blob: 1ab676bd13f0e90bbda314e180ae601fc3714c4f [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/* Performance event support for sparc64.
David S. Miller59abbd12009-09-10 06:28:20 -07002 *
David S. Miller4f6dbe42010-01-19 00:26:13 -08003 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
David S. Miller59abbd12009-09-10 06:28:20 -07004 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * This code is based almost entirely upon the x86 perf event
David S. Miller59abbd12009-09-10 06:28:20 -07006 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
David S. Miller59abbd12009-09-10 06:28:20 -070016#include <linux/kprobes.h>
David S. Miller667f0ce2010-04-21 03:08:11 -070017#include <linux/ftrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070018#include <linux/kernel.h>
19#include <linux/kdebug.h>
20#include <linux/mutex.h>
21
David S. Miller4f6dbe42010-01-19 00:26:13 -080022#include <asm/stacktrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070023#include <asm/cpudata.h>
David S. Miller4f6dbe42010-01-19 00:26:13 -080024#include <asm/uaccess.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
David S. Miller59abbd12009-09-10 06:28:20 -070026#include <asm/nmi.h>
27#include <asm/pcr.h>
David Howellsd550bbd2012-03-28 18:30:03 +010028#include <asm/cacheflush.h>
David S. Miller59abbd12009-09-10 06:28:20 -070029
Sam Ravnborgcb1b8202011-04-21 15:45:45 -070030#include "kernel.h"
David S. Miller4f6dbe42010-01-19 00:26:13 -080031#include "kstack.h"
32
David S. Miller59abbd12009-09-10 06:28:20 -070033/* Sparc64 chips have two performance counters, 32-bits each, with
34 * overflow interrupts generated on transition from 0xffffffff to 0.
35 * The counters are accessed in one go using a 64-bit register.
36 *
37 * Both counters are controlled using a single control register. The
38 * only way to stop all sampling is to clear all of the context (user,
39 * supervisor, hypervisor) sampling enable bits. But these bits apply
40 * to both counters, thus the two counters can't be enabled/disabled
41 * individually.
42 *
43 * The control register has two event fields, one for each of the two
44 * counters. It's thus nearly impossible to have one counter going
45 * while keeping the other one stopped. Therefore it is possible to
46 * get overflow interrupts for counters not currently "in use" and
47 * that condition must be checked in the overflow interrupt handler.
48 *
49 * So we use a hack, in that we program inactive counters with the
50 * "sw_count0" and "sw_count1" events. These count how many times
51 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
52 * unusual way to encode a NOP and therefore will not trigger in
53 * normal code.
54 */
55
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056#define MAX_HWEVENTS 2
David S. Miller59abbd12009-09-10 06:28:20 -070057#define MAX_PERIOD ((1UL << 32) - 1)
58
59#define PIC_UPPER_INDEX 0
60#define PIC_LOWER_INDEX 1
David S. Millere7bef6b2010-01-20 02:59:47 -080061#define PIC_NO_INDEX -1
David S. Miller59abbd12009-09-10 06:28:20 -070062
Ingo Molnarcdd6c482009-09-21 12:02:48 +020063struct cpu_hw_events {
David S. Millere7bef6b2010-01-20 02:59:47 -080064 /* Number of events currently scheduled onto this cpu.
65 * This tells how many entries in the arrays below
66 * are valid.
67 */
68 int n_events;
69
70 /* Number of new events added since the last hw_perf_disable().
71 * This works because the perf event layer always adds new
72 * events inside of a perf_{disable,enable}() sequence.
73 */
74 int n_added;
75
76 /* Array of events current scheduled on this cpu. */
77 struct perf_event *event[MAX_HWEVENTS];
78
79 /* Array of encoded longs, specifying the %pcr register
80 * encoding and the mask of PIC counters this even can
81 * be scheduled on. See perf_event_encode() et al.
82 */
83 unsigned long events[MAX_HWEVENTS];
84
85 /* The current counter index assigned to an event. When the
86 * event hasn't been programmed into the cpu yet, this will
87 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
88 * we ought to schedule the event.
89 */
90 int current_idx[MAX_HWEVENTS];
91
92 /* Software copy of %pcr register on this cpu. */
David S. Millerd1751382009-09-29 21:27:06 -070093 u64 pcr;
David S. Millere7bef6b2010-01-20 02:59:47 -080094
95 /* Enabled/disable state. */
David S. Millerd1751382009-09-29 21:27:06 -070096 int enabled;
Lin Minga13c3af2010-04-23 13:56:33 +080097
98 unsigned int group_flag;
David S. Miller59abbd12009-09-10 06:28:20 -070099};
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200100DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
David S. Miller59abbd12009-09-10 06:28:20 -0700101
David S. Millere7bef6b2010-01-20 02:59:47 -0800102/* An event map describes the characteristics of a performance
103 * counter event. In particular it gives the encoding as well as
104 * a mask telling which counters the event can be measured on.
105 */
David S. Miller59abbd12009-09-10 06:28:20 -0700106struct perf_event_map {
107 u16 encoding;
108 u8 pic_mask;
109#define PIC_NONE 0x00
110#define PIC_UPPER 0x01
111#define PIC_LOWER 0x02
112};
113
David S. Millere7bef6b2010-01-20 02:59:47 -0800114/* Encode a perf_event_map entry into a long. */
David S. Millera72a8a52009-09-28 17:35:20 -0700115static unsigned long perf_event_encode(const struct perf_event_map *pmap)
116{
117 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
118}
119
David S. Millere7bef6b2010-01-20 02:59:47 -0800120static u8 perf_event_get_msk(unsigned long val)
David S. Millera72a8a52009-09-28 17:35:20 -0700121{
David S. Millere7bef6b2010-01-20 02:59:47 -0800122 return val & 0xff;
123}
124
125static u64 perf_event_get_enc(unsigned long val)
126{
127 return val >> 16;
David S. Millera72a8a52009-09-28 17:35:20 -0700128}
129
David S. Miller2ce4da22009-09-26 20:42:10 -0700130#define C(x) PERF_COUNT_HW_CACHE_##x
131
132#define CACHE_OP_UNSUPPORTED 0xfffe
133#define CACHE_OP_NONSENSE 0xffff
134
135typedef struct perf_event_map cache_map_t
136 [PERF_COUNT_HW_CACHE_MAX]
137 [PERF_COUNT_HW_CACHE_OP_MAX]
138 [PERF_COUNT_HW_CACHE_RESULT_MAX];
139
David S. Miller59abbd12009-09-10 06:28:20 -0700140struct sparc_pmu {
141 const struct perf_event_map *(*event_map)(int);
David S. Miller2ce4da22009-09-26 20:42:10 -0700142 const cache_map_t *cache_map;
David S. Miller59abbd12009-09-10 06:28:20 -0700143 int max_events;
David S. Miller53443032012-08-17 02:37:06 -0700144 u32 (*read_pmc)(int);
145 void (*write_pmc)(int, u64);
David S. Miller59abbd12009-09-10 06:28:20 -0700146 int upper_shift;
147 int lower_shift;
148 int event_mask;
David S. Miller7ac2ed22012-08-17 02:41:32 -0700149 int user_bit;
150 int priv_bit;
David S. Miller91b92862009-09-10 07:09:06 -0700151 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -0700152 int irq_bit;
David S. Miller660d1372009-09-10 07:13:26 -0700153 int upper_nop;
154 int lower_nop;
David S. Millerb38e99f2012-08-17 02:31:10 -0700155 unsigned int flags;
156#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
157#define SPARC_PMU_HAS_CONFLICTS 0x00000002
David S. Miller59660492012-08-17 02:33:44 -0700158 int max_hw_events;
David S. Miller59abbd12009-09-10 06:28:20 -0700159};
160
David S. Miller53443032012-08-17 02:37:06 -0700161static u32 sparc_default_read_pmc(int idx)
162{
163 u64 val;
164
165 val = pcr_ops->read_pic(0);
166 if (idx == PIC_UPPER_INDEX)
167 val >>= 32;
168
169 return val & 0xffffffff;
170}
171
172static void sparc_default_write_pmc(int idx, u64 val)
173{
174 u64 shift, mask, pic;
175
176 shift = 0;
177 if (idx == PIC_UPPER_INDEX)
178 shift = 32;
179
180 mask = ((u64) 0xffffffff) << shift;
181 val <<= shift;
182
183 pic = pcr_ops->read_pic(0);
184 pic &= ~mask;
185 pic |= val;
186 pcr_ops->write_pic(0, pic);
187}
188
David S. Miller28e8f9b2009-09-26 20:54:22 -0700189static const struct perf_event_map ultra3_perfmon_event_map[] = {
David S. Miller59abbd12009-09-10 06:28:20 -0700190 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
191 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
192 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
193 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
194};
195
David S. Miller28e8f9b2009-09-26 20:54:22 -0700196static const struct perf_event_map *ultra3_event_map(int event_id)
David S. Miller59abbd12009-09-10 06:28:20 -0700197{
David S. Miller28e8f9b2009-09-26 20:54:22 -0700198 return &ultra3_perfmon_event_map[event_id];
David S. Miller59abbd12009-09-10 06:28:20 -0700199}
200
David S. Miller28e8f9b2009-09-26 20:54:22 -0700201static const cache_map_t ultra3_cache_map = {
David S. Miller2ce4da22009-09-26 20:42:10 -0700202[C(L1D)] = {
203 [C(OP_READ)] = {
204 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
205 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
206 },
207 [C(OP_WRITE)] = {
208 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
209 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
210 },
211 [C(OP_PREFETCH)] = {
212 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
213 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
214 },
215},
216[C(L1I)] = {
217 [C(OP_READ)] = {
218 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
219 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
220 },
221 [ C(OP_WRITE) ] = {
222 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
223 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
224 },
225 [ C(OP_PREFETCH) ] = {
226 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
227 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
228 },
229},
230[C(LL)] = {
231 [C(OP_READ)] = {
232 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
233 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
234 },
235 [C(OP_WRITE)] = {
236 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
237 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
238 },
239 [C(OP_PREFETCH)] = {
240 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
241 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
242 },
243},
244[C(DTLB)] = {
245 [C(OP_READ)] = {
246 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
247 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
248 },
249 [ C(OP_WRITE) ] = {
250 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
251 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
252 },
253 [ C(OP_PREFETCH) ] = {
254 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
255 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
256 },
257},
258[C(ITLB)] = {
259 [C(OP_READ)] = {
260 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
261 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
262 },
263 [ C(OP_WRITE) ] = {
264 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
265 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
266 },
267 [ C(OP_PREFETCH) ] = {
268 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
269 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
270 },
271},
272[C(BPU)] = {
273 [C(OP_READ)] = {
274 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
275 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
276 },
277 [ C(OP_WRITE) ] = {
278 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
279 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
280 },
281 [ C(OP_PREFETCH) ] = {
282 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
283 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
284 },
285},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200286[C(NODE)] = {
287 [C(OP_READ)] = {
288 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
289 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
290 },
291 [ C(OP_WRITE) ] = {
292 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
293 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
294 },
295 [ C(OP_PREFETCH) ] = {
296 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
297 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
298 },
299},
David S. Miller2ce4da22009-09-26 20:42:10 -0700300};
301
David S. Miller28e8f9b2009-09-26 20:54:22 -0700302static const struct sparc_pmu ultra3_pmu = {
303 .event_map = ultra3_event_map,
304 .cache_map = &ultra3_cache_map,
305 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700306 .read_pmc = sparc_default_read_pmc,
307 .write_pmc = sparc_default_write_pmc,
David S. Miller59abbd12009-09-10 06:28:20 -0700308 .upper_shift = 11,
309 .lower_shift = 4,
310 .event_mask = 0x3f,
David S. Miller7ac2ed22012-08-17 02:41:32 -0700311 .user_bit = PCR_UTRACE,
312 .priv_bit = PCR_STRACE,
David S. Miller660d1372009-09-10 07:13:26 -0700313 .upper_nop = 0x1c,
314 .lower_nop = 0x14,
David S. Millerb38e99f2012-08-17 02:31:10 -0700315 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
316 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700317 .max_hw_events = 2,
David S. Miller59abbd12009-09-10 06:28:20 -0700318};
319
David S. Miller7eebda62009-09-26 21:23:41 -0700320/* Niagara1 is very limited. The upper PIC is hard-locked to count
321 * only instructions, so it is free running which creates all kinds of
David S. Miller6e804252009-09-29 15:10:23 -0700322 * problems. Some hardware designs make one wonder if the creator
David S. Miller7eebda62009-09-26 21:23:41 -0700323 * even looked at how this stuff gets used by software.
324 */
325static const struct perf_event_map niagara1_perfmon_event_map[] = {
326 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
327 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
328 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
329 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
330};
331
332static const struct perf_event_map *niagara1_event_map(int event_id)
333{
334 return &niagara1_perfmon_event_map[event_id];
335}
336
337static const cache_map_t niagara1_cache_map = {
338[C(L1D)] = {
339 [C(OP_READ)] = {
340 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
341 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
342 },
343 [C(OP_WRITE)] = {
344 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
345 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
346 },
347 [C(OP_PREFETCH)] = {
348 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
349 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
350 },
351},
352[C(L1I)] = {
353 [C(OP_READ)] = {
354 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
355 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
356 },
357 [ C(OP_WRITE) ] = {
358 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
359 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
360 },
361 [ C(OP_PREFETCH) ] = {
362 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
363 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
364 },
365},
366[C(LL)] = {
367 [C(OP_READ)] = {
368 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
369 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
370 },
371 [C(OP_WRITE)] = {
372 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
373 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
374 },
375 [C(OP_PREFETCH)] = {
376 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
377 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
378 },
379},
380[C(DTLB)] = {
381 [C(OP_READ)] = {
382 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
383 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
384 },
385 [ C(OP_WRITE) ] = {
386 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
387 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
388 },
389 [ C(OP_PREFETCH) ] = {
390 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
391 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
392 },
393},
394[C(ITLB)] = {
395 [C(OP_READ)] = {
396 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
397 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
398 },
399 [ C(OP_WRITE) ] = {
400 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
401 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
402 },
403 [ C(OP_PREFETCH) ] = {
404 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
405 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
406 },
407},
408[C(BPU)] = {
409 [C(OP_READ)] = {
410 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
411 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
412 },
413 [ C(OP_WRITE) ] = {
414 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
415 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
416 },
417 [ C(OP_PREFETCH) ] = {
418 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
419 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
420 },
421},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200422[C(NODE)] = {
423 [C(OP_READ)] = {
424 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
425 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
426 },
427 [ C(OP_WRITE) ] = {
428 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
429 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
430 },
431 [ C(OP_PREFETCH) ] = {
432 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
433 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
434 },
435},
David S. Miller7eebda62009-09-26 21:23:41 -0700436};
437
438static const struct sparc_pmu niagara1_pmu = {
439 .event_map = niagara1_event_map,
440 .cache_map = &niagara1_cache_map,
441 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700442 .read_pmc = sparc_default_read_pmc,
443 .write_pmc = sparc_default_write_pmc,
David S. Miller7eebda62009-09-26 21:23:41 -0700444 .upper_shift = 0,
445 .lower_shift = 4,
446 .event_mask = 0x7,
David S. Miller7ac2ed22012-08-17 02:41:32 -0700447 .user_bit = PCR_UTRACE,
448 .priv_bit = PCR_STRACE,
David S. Miller7eebda62009-09-26 21:23:41 -0700449 .upper_nop = 0x0,
450 .lower_nop = 0x0,
David S. Millerb38e99f2012-08-17 02:31:10 -0700451 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
452 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700453 .max_hw_events = 2,
David S. Miller7eebda62009-09-26 21:23:41 -0700454};
455
David S. Millerb73d8842009-09-10 07:22:18 -0700456static const struct perf_event_map niagara2_perfmon_event_map[] = {
457 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
458 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
459 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
460 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
461 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
462 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
463};
464
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200465static const struct perf_event_map *niagara2_event_map(int event_id)
David S. Millerb73d8842009-09-10 07:22:18 -0700466{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200467 return &niagara2_perfmon_event_map[event_id];
David S. Millerb73d8842009-09-10 07:22:18 -0700468}
469
David S. Millerd0b86482009-09-26 21:04:16 -0700470static const cache_map_t niagara2_cache_map = {
471[C(L1D)] = {
472 [C(OP_READ)] = {
473 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
474 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
475 },
476 [C(OP_WRITE)] = {
477 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
478 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
479 },
480 [C(OP_PREFETCH)] = {
481 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
482 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
483 },
484},
485[C(L1I)] = {
486 [C(OP_READ)] = {
487 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
488 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
489 },
490 [ C(OP_WRITE) ] = {
491 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
492 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
493 },
494 [ C(OP_PREFETCH) ] = {
495 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
496 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
497 },
498},
499[C(LL)] = {
500 [C(OP_READ)] = {
501 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
502 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
503 },
504 [C(OP_WRITE)] = {
505 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
506 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
507 },
508 [C(OP_PREFETCH)] = {
509 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
510 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
511 },
512},
513[C(DTLB)] = {
514 [C(OP_READ)] = {
515 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
516 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
517 },
518 [ C(OP_WRITE) ] = {
519 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
520 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
521 },
522 [ C(OP_PREFETCH) ] = {
523 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
524 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
525 },
526},
527[C(ITLB)] = {
528 [C(OP_READ)] = {
529 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
530 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
531 },
532 [ C(OP_WRITE) ] = {
533 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
534 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
535 },
536 [ C(OP_PREFETCH) ] = {
537 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
538 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
539 },
540},
541[C(BPU)] = {
542 [C(OP_READ)] = {
543 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
544 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
545 },
546 [ C(OP_WRITE) ] = {
547 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
548 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
549 },
550 [ C(OP_PREFETCH) ] = {
551 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
552 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
553 },
554},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200555[C(NODE)] = {
556 [C(OP_READ)] = {
557 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
558 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
559 },
560 [ C(OP_WRITE) ] = {
561 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
562 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
563 },
564 [ C(OP_PREFETCH) ] = {
565 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
566 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
567 },
568},
David S. Millerd0b86482009-09-26 21:04:16 -0700569};
570
David S. Millerb73d8842009-09-10 07:22:18 -0700571static const struct sparc_pmu niagara2_pmu = {
572 .event_map = niagara2_event_map,
David S. Millerd0b86482009-09-26 21:04:16 -0700573 .cache_map = &niagara2_cache_map,
David S. Millerb73d8842009-09-10 07:22:18 -0700574 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700575 .read_pmc = sparc_default_read_pmc,
576 .write_pmc = sparc_default_write_pmc,
David S. Millerb73d8842009-09-10 07:22:18 -0700577 .upper_shift = 19,
578 .lower_shift = 6,
579 .event_mask = 0xfff,
David S. Miller7ac2ed22012-08-17 02:41:32 -0700580 .user_bit = PCR_UTRACE,
581 .priv_bit = PCR_STRACE,
582 .hv_bit = PCR_N2_HTRACE,
David S. Millerde23cf32009-10-09 00:42:40 -0700583 .irq_bit = 0x30,
David S. Millerb73d8842009-09-10 07:22:18 -0700584 .upper_nop = 0x220,
585 .lower_nop = 0x220,
David S. Millerb38e99f2012-08-17 02:31:10 -0700586 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
587 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700588 .max_hw_events = 2,
David S. Millerb73d8842009-09-10 07:22:18 -0700589};
590
David S. Miller59abbd12009-09-10 06:28:20 -0700591static const struct sparc_pmu *sparc_pmu __read_mostly;
592
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200593static u64 event_encoding(u64 event_id, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700594{
595 if (idx == PIC_UPPER_INDEX)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200596 event_id <<= sparc_pmu->upper_shift;
David S. Miller59abbd12009-09-10 06:28:20 -0700597 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200598 event_id <<= sparc_pmu->lower_shift;
599 return event_id;
David S. Miller59abbd12009-09-10 06:28:20 -0700600}
601
602static u64 mask_for_index(int idx)
603{
604 return event_encoding(sparc_pmu->event_mask, idx);
605}
606
607static u64 nop_for_index(int idx)
608{
609 return event_encoding(idx == PIC_UPPER_INDEX ?
David S. Miller660d1372009-09-10 07:13:26 -0700610 sparc_pmu->upper_nop :
611 sparc_pmu->lower_nop, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700612}
613
David S. Millerd1751382009-09-29 21:27:06 -0700614static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700615{
616 u64 val, mask = mask_for_index(idx);
617
David S. Millerd1751382009-09-29 21:27:06 -0700618 val = cpuc->pcr;
619 val &= ~mask;
620 val |= hwc->config;
621 cpuc->pcr = val;
622
David S. Miller09d053c2012-08-16 23:19:32 -0700623 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700624}
625
David S. Millerd1751382009-09-29 21:27:06 -0700626static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700627{
628 u64 mask = mask_for_index(idx);
629 u64 nop = nop_for_index(idx);
David S. Millerd1751382009-09-29 21:27:06 -0700630 u64 val;
David S. Miller59abbd12009-09-10 06:28:20 -0700631
David S. Millerd1751382009-09-29 21:27:06 -0700632 val = cpuc->pcr;
633 val &= ~mask;
634 val |= nop;
635 cpuc->pcr = val;
636
David S. Miller09d053c2012-08-16 23:19:32 -0700637 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700638}
639
David S. Millere7bef6b2010-01-20 02:59:47 -0800640static u64 sparc_perf_event_update(struct perf_event *event,
641 struct hw_perf_event *hwc, int idx)
642{
643 int shift = 64 - 32;
644 u64 prev_raw_count, new_raw_count;
645 s64 delta;
646
647again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200648 prev_raw_count = local64_read(&hwc->prev_count);
David S. Miller53443032012-08-17 02:37:06 -0700649 new_raw_count = sparc_pmu->read_pmc(idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800650
Peter Zijlstrae7850592010-05-21 14:43:08 +0200651 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800652 new_raw_count) != prev_raw_count)
653 goto again;
654
655 delta = (new_raw_count << shift) - (prev_raw_count << shift);
656 delta >>= shift;
657
Peter Zijlstrae7850592010-05-21 14:43:08 +0200658 local64_add(delta, &event->count);
659 local64_sub(delta, &hwc->period_left);
David S. Millere7bef6b2010-01-20 02:59:47 -0800660
661 return new_raw_count;
662}
663
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200664static int sparc_perf_event_set_period(struct perf_event *event,
David S. Millerd29862f2009-09-28 17:37:12 -0700665 struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700666{
Peter Zijlstrae7850592010-05-21 14:43:08 +0200667 s64 left = local64_read(&hwc->period_left);
David S. Miller59abbd12009-09-10 06:28:20 -0700668 s64 period = hwc->sample_period;
669 int ret = 0;
670
671 if (unlikely(left <= -period)) {
672 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200673 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700674 hwc->last_period = period;
675 ret = 1;
676 }
677
678 if (unlikely(left <= 0)) {
679 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200680 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700681 hwc->last_period = period;
682 ret = 1;
683 }
684 if (left > MAX_PERIOD)
685 left = MAX_PERIOD;
686
Peter Zijlstrae7850592010-05-21 14:43:08 +0200687 local64_set(&hwc->prev_count, (u64)-left);
David S. Miller59abbd12009-09-10 06:28:20 -0700688
David S. Miller53443032012-08-17 02:37:06 -0700689 sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
David S. Miller59abbd12009-09-10 06:28:20 -0700690
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200691 perf_event_update_userpage(event);
David S. Miller59abbd12009-09-10 06:28:20 -0700692
693 return ret;
694}
695
David S. Millere7bef6b2010-01-20 02:59:47 -0800696/* If performance event entries have been added, move existing
697 * events around (if necessary) and then assign new entries to
698 * counters.
699 */
700static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
David S. Miller59abbd12009-09-10 06:28:20 -0700701{
David S. Millere7bef6b2010-01-20 02:59:47 -0800702 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700703
David S. Millere7bef6b2010-01-20 02:59:47 -0800704 if (!cpuc->n_added)
705 goto out;
David S. Miller59abbd12009-09-10 06:28:20 -0700706
David S. Millere7bef6b2010-01-20 02:59:47 -0800707 /* Read in the counters which are moving. */
708 for (i = 0; i < cpuc->n_events; i++) {
709 struct perf_event *cp = cpuc->event[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700710
David S. Millere7bef6b2010-01-20 02:59:47 -0800711 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
712 cpuc->current_idx[i] != cp->hw.idx) {
713 sparc_perf_event_update(cp, &cp->hw,
714 cpuc->current_idx[i]);
715 cpuc->current_idx[i] = PIC_NO_INDEX;
716 }
717 }
David S. Miller59abbd12009-09-10 06:28:20 -0700718
David S. Millere7bef6b2010-01-20 02:59:47 -0800719 /* Assign to counters all unassigned events. */
720 for (i = 0; i < cpuc->n_events; i++) {
721 struct perf_event *cp = cpuc->event[i];
722 struct hw_perf_event *hwc = &cp->hw;
723 int idx = hwc->idx;
724 u64 enc;
725
726 if (cpuc->current_idx[i] != PIC_NO_INDEX)
727 continue;
728
729 sparc_perf_event_set_period(cp, hwc, idx);
730 cpuc->current_idx[i] = idx;
731
732 enc = perf_event_get_enc(cpuc->events[i]);
David S. Millerb7d45c32010-06-23 11:39:02 -0700733 pcr &= ~mask_for_index(idx);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200734 if (hwc->state & PERF_HES_STOPPED)
735 pcr |= nop_for_index(idx);
736 else
737 pcr |= event_encoding(enc, idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800738 }
739out:
740 return pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700741}
742
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200743static void sparc_pmu_enable(struct pmu *pmu)
David S. Miller59abbd12009-09-10 06:28:20 -0700744{
David S. Millere7bef6b2010-01-20 02:59:47 -0800745 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
746 u64 pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700747
David S. Millere7bef6b2010-01-20 02:59:47 -0800748 if (cpuc->enabled)
749 return;
David S. Miller59abbd12009-09-10 06:28:20 -0700750
David S. Millere7bef6b2010-01-20 02:59:47 -0800751 cpuc->enabled = 1;
752 barrier();
David S. Miller59abbd12009-09-10 06:28:20 -0700753
David S. Millere7bef6b2010-01-20 02:59:47 -0800754 pcr = cpuc->pcr;
755 if (!cpuc->n_events) {
756 pcr = 0;
757 } else {
758 pcr = maybe_change_configuration(cpuc, pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700759
David S. Millere7bef6b2010-01-20 02:59:47 -0800760 /* We require that all of the events have the same
761 * configuration, so just fetch the settings from the
762 * first entry.
763 */
764 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
765 }
David S. Miller59abbd12009-09-10 06:28:20 -0700766
David S. Miller09d053c2012-08-16 23:19:32 -0700767 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Millere7bef6b2010-01-20 02:59:47 -0800768}
769
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200770static void sparc_pmu_disable(struct pmu *pmu)
David S. Millere7bef6b2010-01-20 02:59:47 -0800771{
772 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
773 u64 val;
774
775 if (!cpuc->enabled)
776 return;
777
778 cpuc->enabled = 0;
779 cpuc->n_added = 0;
780
781 val = cpuc->pcr;
David S. Miller7ac2ed22012-08-17 02:41:32 -0700782 val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
David S. Millere7bef6b2010-01-20 02:59:47 -0800783 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
784 cpuc->pcr = val;
785
David S. Miller09d053c2012-08-16 23:19:32 -0700786 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700787}
788
David S. Millere7bef6b2010-01-20 02:59:47 -0800789static int active_event_index(struct cpu_hw_events *cpuc,
790 struct perf_event *event)
791{
792 int i;
793
794 for (i = 0; i < cpuc->n_events; i++) {
795 if (cpuc->event[i] == event)
796 break;
797 }
798 BUG_ON(i == cpuc->n_events);
799 return cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700800}
801
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200802static void sparc_pmu_start(struct perf_event *event, int flags)
803{
804 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
805 int idx = active_event_index(cpuc, event);
806
807 if (flags & PERF_EF_RELOAD) {
808 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
809 sparc_perf_event_set_period(event, &event->hw, idx);
810 }
811
812 event->hw.state = 0;
813
814 sparc_pmu_enable_event(cpuc, &event->hw, idx);
815}
816
817static void sparc_pmu_stop(struct perf_event *event, int flags)
818{
819 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
820 int idx = active_event_index(cpuc, event);
821
822 if (!(event->hw.state & PERF_HES_STOPPED)) {
823 sparc_pmu_disable_event(cpuc, &event->hw, idx);
824 event->hw.state |= PERF_HES_STOPPED;
825 }
826
827 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
828 sparc_perf_event_update(event, &event->hw, idx);
829 event->hw.state |= PERF_HES_UPTODATE;
830 }
831}
832
833static void sparc_pmu_del(struct perf_event *event, int _flags)
834{
835 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
836 unsigned long flags;
837 int i;
838
839 local_irq_save(flags);
840 perf_pmu_disable(event->pmu);
841
842 for (i = 0; i < cpuc->n_events; i++) {
843 if (event == cpuc->event[i]) {
844 /* Absorb the final count and turn off the
845 * event.
846 */
847 sparc_pmu_stop(event, PERF_EF_UPDATE);
848
849 /* Shift remaining entries down into
850 * the existing slot.
851 */
852 while (++i < cpuc->n_events) {
853 cpuc->event[i - 1] = cpuc->event[i];
854 cpuc->events[i - 1] = cpuc->events[i];
855 cpuc->current_idx[i - 1] =
856 cpuc->current_idx[i];
857 }
858
859 perf_event_update_userpage(event);
860
861 cpuc->n_events--;
862 break;
863 }
864 }
865
866 perf_pmu_enable(event->pmu);
867 local_irq_restore(flags);
868}
869
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200870static void sparc_pmu_read(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700871{
David S. Millere7bef6b2010-01-20 02:59:47 -0800872 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
873 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200874 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700875
David S. Millere7bef6b2010-01-20 02:59:47 -0800876 sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700877}
878
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200879static atomic_t active_events = ATOMIC_INIT(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700880static DEFINE_MUTEX(pmc_grab_mutex);
881
David S. Millerd1751382009-09-29 21:27:06 -0700882static void perf_stop_nmi_watchdog(void *unused)
883{
884 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
885
886 stop_nmi_watchdog(NULL);
David S. Miller09d053c2012-08-16 23:19:32 -0700887 cpuc->pcr = pcr_ops->read_pcr(0);
David S. Millerd1751382009-09-29 21:27:06 -0700888}
889
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200890void perf_event_grab_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700891{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200892 if (atomic_inc_not_zero(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -0700893 return;
894
895 mutex_lock(&pmc_grab_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200896 if (atomic_read(&active_events) == 0) {
David S. Miller59abbd12009-09-10 06:28:20 -0700897 if (atomic_read(&nmi_active) > 0) {
David S. Millerd1751382009-09-29 21:27:06 -0700898 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
David S. Miller59abbd12009-09-10 06:28:20 -0700899 BUG_ON(atomic_read(&nmi_active) != 0);
900 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200901 atomic_inc(&active_events);
David S. Miller59abbd12009-09-10 06:28:20 -0700902 }
903 mutex_unlock(&pmc_grab_mutex);
904}
905
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200906void perf_event_release_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700907{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200908 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
David S. Miller59abbd12009-09-10 06:28:20 -0700909 if (atomic_read(&nmi_active) == 0)
910 on_each_cpu(start_nmi_watchdog, NULL, 1);
911 mutex_unlock(&pmc_grab_mutex);
912 }
913}
914
David S. Miller2ce4da22009-09-26 20:42:10 -0700915static const struct perf_event_map *sparc_map_cache_event(u64 config)
916{
917 unsigned int cache_type, cache_op, cache_result;
918 const struct perf_event_map *pmap;
919
920 if (!sparc_pmu->cache_map)
921 return ERR_PTR(-ENOENT);
922
923 cache_type = (config >> 0) & 0xff;
924 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
925 return ERR_PTR(-EINVAL);
926
927 cache_op = (config >> 8) & 0xff;
928 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
929 return ERR_PTR(-EINVAL);
930
931 cache_result = (config >> 16) & 0xff;
932 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
933 return ERR_PTR(-EINVAL);
934
935 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
936
937 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
938 return ERR_PTR(-ENOENT);
939
940 if (pmap->encoding == CACHE_OP_NONSENSE)
941 return ERR_PTR(-EINVAL);
942
943 return pmap;
944}
945
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200946static void hw_perf_event_destroy(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700947{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200948 perf_event_release_pmc();
David S. Miller59abbd12009-09-10 06:28:20 -0700949}
950
David S. Millera72a8a52009-09-28 17:35:20 -0700951/* Make sure all events can be scheduled into the hardware at
952 * the same time. This is simplified by the fact that we only
953 * need to support 2 simultaneous HW events.
David S. Millere7bef6b2010-01-20 02:59:47 -0800954 *
955 * As a side effect, the evts[]->hw.idx values will be assigned
956 * on success. These are pending indexes. When the events are
957 * actually programmed into the chip, these values will propagate
958 * to the per-cpu cpuc->current_idx[] slots, see the code in
959 * maybe_change_configuration() for details.
David S. Millera72a8a52009-09-28 17:35:20 -0700960 */
David S. Millere7bef6b2010-01-20 02:59:47 -0800961static int sparc_check_constraints(struct perf_event **evts,
962 unsigned long *events, int n_ev)
David S. Millera72a8a52009-09-28 17:35:20 -0700963{
David S. Millere7bef6b2010-01-20 02:59:47 -0800964 u8 msk0 = 0, msk1 = 0;
965 int idx0 = 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700966
David S. Millere7bef6b2010-01-20 02:59:47 -0800967 /* This case is possible when we are invoked from
968 * hw_perf_group_sched_in().
969 */
970 if (!n_ev)
971 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700972
David S. Miller59660492012-08-17 02:33:44 -0700973 if (n_ev > sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -0800974 return -1;
David S. Millera72a8a52009-09-28 17:35:20 -0700975
David S. Millerb38e99f2012-08-17 02:31:10 -0700976 if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
977 int i;
978
979 for (i = 0; i < n_ev; i++)
980 evts[i]->hw.idx = i;
981 return 0;
982 }
983
David S. Millere7bef6b2010-01-20 02:59:47 -0800984 msk0 = perf_event_get_msk(events[0]);
985 if (n_ev == 1) {
986 if (msk0 & PIC_LOWER)
987 idx0 = 1;
988 goto success;
989 }
990 BUG_ON(n_ev != 2);
991 msk1 = perf_event_get_msk(events[1]);
David S. Millera72a8a52009-09-28 17:35:20 -0700992
David S. Millere7bef6b2010-01-20 02:59:47 -0800993 /* If both events can go on any counter, OK. */
994 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
995 msk1 == (PIC_UPPER | PIC_LOWER))
996 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700997
David S. Millere7bef6b2010-01-20 02:59:47 -0800998 /* If one event is limited to a specific counter,
999 * and the other can go on both, OK.
1000 */
1001 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
1002 msk1 == (PIC_UPPER | PIC_LOWER)) {
1003 if (msk0 & PIC_LOWER)
1004 idx0 = 1;
1005 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -07001006 }
1007
David S. Millere7bef6b2010-01-20 02:59:47 -08001008 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
1009 msk0 == (PIC_UPPER | PIC_LOWER)) {
1010 if (msk1 & PIC_UPPER)
1011 idx0 = 1;
1012 goto success;
1013 }
1014
1015 /* If the events are fixed to different counters, OK. */
1016 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
1017 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
1018 if (msk0 & PIC_LOWER)
1019 idx0 = 1;
1020 goto success;
1021 }
1022
1023 /* Otherwise, there is a conflict. */
David S. Millera72a8a52009-09-28 17:35:20 -07001024 return -1;
David S. Millere7bef6b2010-01-20 02:59:47 -08001025
1026success:
1027 evts[0]->hw.idx = idx0;
1028 if (n_ev == 2)
1029 evts[1]->hw.idx = idx0 ^ 1;
1030 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -07001031}
1032
David S. Miller01552f72009-09-27 20:43:07 -07001033static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
1034{
1035 int eu = 0, ek = 0, eh = 0;
1036 struct perf_event *event;
1037 int i, n, first;
1038
David S. Millerb38e99f2012-08-17 02:31:10 -07001039 if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
1040 return 0;
1041
David S. Miller01552f72009-09-27 20:43:07 -07001042 n = n_prev + n_new;
1043 if (n <= 1)
1044 return 0;
1045
1046 first = 1;
1047 for (i = 0; i < n; i++) {
1048 event = evts[i];
1049 if (first) {
1050 eu = event->attr.exclude_user;
1051 ek = event->attr.exclude_kernel;
1052 eh = event->attr.exclude_hv;
1053 first = 0;
1054 } else if (event->attr.exclude_user != eu ||
1055 event->attr.exclude_kernel != ek ||
1056 event->attr.exclude_hv != eh) {
1057 return -EAGAIN;
1058 }
1059 }
1060
1061 return 0;
1062}
1063
1064static int collect_events(struct perf_event *group, int max_count,
David S. Millere7bef6b2010-01-20 02:59:47 -08001065 struct perf_event *evts[], unsigned long *events,
1066 int *current_idx)
David S. Miller01552f72009-09-27 20:43:07 -07001067{
1068 struct perf_event *event;
1069 int n = 0;
1070
1071 if (!is_software_event(group)) {
1072 if (n >= max_count)
1073 return -1;
1074 evts[n] = group;
David S. Millere7bef6b2010-01-20 02:59:47 -08001075 events[n] = group->hw.event_base;
1076 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001077 }
1078 list_for_each_entry(event, &group->sibling_list, group_entry) {
1079 if (!is_software_event(event) &&
1080 event->state != PERF_EVENT_STATE_OFF) {
1081 if (n >= max_count)
1082 return -1;
1083 evts[n] = event;
David S. Millere7bef6b2010-01-20 02:59:47 -08001084 events[n] = event->hw.event_base;
1085 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001086 }
1087 }
1088 return n;
1089}
1090
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001091static int sparc_pmu_add(struct perf_event *event, int ef_flags)
David S. Millere7bef6b2010-01-20 02:59:47 -08001092{
1093 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1094 int n0, ret = -EAGAIN;
1095 unsigned long flags;
1096
1097 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001098 perf_pmu_disable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001099
1100 n0 = cpuc->n_events;
David S. Miller59660492012-08-17 02:33:44 -07001101 if (n0 >= sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -08001102 goto out;
1103
1104 cpuc->event[n0] = event;
1105 cpuc->events[n0] = event->hw.event_base;
1106 cpuc->current_idx[n0] = PIC_NO_INDEX;
1107
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001108 event->hw.state = PERF_HES_UPTODATE;
1109 if (!(ef_flags & PERF_EF_START))
1110 event->hw.state |= PERF_HES_STOPPED;
1111
Lin Minga13c3af2010-04-23 13:56:33 +08001112 /*
1113 * If group events scheduling transaction was started,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001114 * skip the schedulability test here, it will be performed
Lin Minga13c3af2010-04-23 13:56:33 +08001115 * at commit time(->commit_txn) as a whole
1116 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001117 if (cpuc->group_flag & PERF_EVENT_TXN)
Lin Minga13c3af2010-04-23 13:56:33 +08001118 goto nocheck;
1119
David S. Millere7bef6b2010-01-20 02:59:47 -08001120 if (check_excludes(cpuc->event, n0, 1))
1121 goto out;
1122 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1123 goto out;
1124
Lin Minga13c3af2010-04-23 13:56:33 +08001125nocheck:
David S. Millere7bef6b2010-01-20 02:59:47 -08001126 cpuc->n_events++;
1127 cpuc->n_added++;
1128
1129 ret = 0;
1130out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001131 perf_pmu_enable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001132 local_irq_restore(flags);
1133 return ret;
1134}
1135
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001136static int sparc_pmu_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001137{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001138 struct perf_event_attr *attr = &event->attr;
David S. Miller01552f72009-09-27 20:43:07 -07001139 struct perf_event *evts[MAX_HWEVENTS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001140 struct hw_perf_event *hwc = &event->hw;
David S. Millera72a8a52009-09-28 17:35:20 -07001141 unsigned long events[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -08001142 int current_idx_dmy[MAX_HWEVENTS];
David S. Miller59abbd12009-09-10 06:28:20 -07001143 const struct perf_event_map *pmap;
David S. Miller01552f72009-09-27 20:43:07 -07001144 int n;
David S. Miller59abbd12009-09-10 06:28:20 -07001145
1146 if (atomic_read(&nmi_active) < 0)
1147 return -ENODEV;
1148
Stephane Eranian2481c5f2012-02-09 23:20:59 +01001149 /* does not support taken branch sampling */
1150 if (has_branch_stack(event))
1151 return -EOPNOTSUPP;
1152
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001153 switch (attr->type) {
1154 case PERF_TYPE_HARDWARE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001155 if (attr->config >= sparc_pmu->max_events)
1156 return -EINVAL;
1157 pmap = sparc_pmu->event_map(attr->config);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001158 break;
1159
1160 case PERF_TYPE_HW_CACHE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001161 pmap = sparc_map_cache_event(attr->config);
1162 if (IS_ERR(pmap))
1163 return PTR_ERR(pmap);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001164 break;
1165
1166 case PERF_TYPE_RAW:
Ingo Molnard0303d72010-09-23 08:02:09 +02001167 pmap = NULL;
1168 break;
David S. Miller59abbd12009-09-10 06:28:20 -07001169
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001170 default:
1171 return -ENOENT;
1172
1173 }
1174
David S. Millerb343ae52010-09-12 17:20:24 -07001175 if (pmap) {
1176 hwc->event_base = perf_event_encode(pmap);
1177 } else {
Ingo Molnard0303d72010-09-23 08:02:09 +02001178 /*
1179 * User gives us "(encoding << 16) | pic_mask" for
David S. Millerb343ae52010-09-12 17:20:24 -07001180 * PERF_TYPE_RAW events.
1181 */
1182 hwc->event_base = attr->config;
1183 }
1184
David S. Millere7bef6b2010-01-20 02:59:47 -08001185 /* We save the enable bits in the config_base. */
David S. Miller496c07e2009-09-10 07:10:59 -07001186 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001187 if (!attr->exclude_user)
David S. Miller7ac2ed22012-08-17 02:41:32 -07001188 hwc->config_base |= sparc_pmu->user_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001189 if (!attr->exclude_kernel)
David S. Miller7ac2ed22012-08-17 02:41:32 -07001190 hwc->config_base |= sparc_pmu->priv_bit;
David S. Miller91b92862009-09-10 07:09:06 -07001191 if (!attr->exclude_hv)
1192 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001193
David S. Miller01552f72009-09-27 20:43:07 -07001194 n = 0;
1195 if (event->group_leader != event) {
1196 n = collect_events(event->group_leader,
David S. Miller59660492012-08-17 02:33:44 -07001197 sparc_pmu->max_hw_events - 1,
David S. Millere7bef6b2010-01-20 02:59:47 -08001198 evts, events, current_idx_dmy);
David S. Miller01552f72009-09-27 20:43:07 -07001199 if (n < 0)
1200 return -EINVAL;
1201 }
David S. Millera72a8a52009-09-28 17:35:20 -07001202 events[n] = hwc->event_base;
David S. Miller01552f72009-09-27 20:43:07 -07001203 evts[n] = event;
1204
1205 if (check_excludes(evts, n, 1))
1206 return -EINVAL;
1207
David S. Millere7bef6b2010-01-20 02:59:47 -08001208 if (sparc_check_constraints(evts, events, n + 1))
David S. Millera72a8a52009-09-28 17:35:20 -07001209 return -EINVAL;
1210
David S. Millere7bef6b2010-01-20 02:59:47 -08001211 hwc->idx = PIC_NO_INDEX;
1212
David S. Miller01552f72009-09-27 20:43:07 -07001213 /* Try to do all error checking before this point, as unwinding
1214 * state after grabbing the PMC is difficult.
1215 */
1216 perf_event_grab_pmc();
1217 event->destroy = hw_perf_event_destroy;
1218
David S. Miller59abbd12009-09-10 06:28:20 -07001219 if (!hwc->sample_period) {
1220 hwc->sample_period = MAX_PERIOD;
1221 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001222 local64_set(&hwc->period_left, hwc->sample_period);
David S. Miller59abbd12009-09-10 06:28:20 -07001223 }
1224
David S. Miller59abbd12009-09-10 06:28:20 -07001225 return 0;
1226}
1227
Lin Minga13c3af2010-04-23 13:56:33 +08001228/*
1229 * Start group events scheduling transaction
1230 * Set the flag to make pmu::enable() not perform the
1231 * schedulability test, it will be performed at commit time
1232 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001233static void sparc_pmu_start_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001234{
1235 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1236
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001237 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001238 cpuhw->group_flag |= PERF_EVENT_TXN;
Lin Minga13c3af2010-04-23 13:56:33 +08001239}
1240
1241/*
1242 * Stop group events scheduling transaction
1243 * Clear the flag and pmu::enable() will perform the
1244 * schedulability test.
1245 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001246static void sparc_pmu_cancel_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001247{
1248 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1249
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001250 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001251 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001252}
1253
1254/*
1255 * Commit group events scheduling transaction
1256 * Perform the group schedulability test as a whole
1257 * Return 0 if success
1258 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001259static int sparc_pmu_commit_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001260{
1261 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1262 int n;
1263
1264 if (!sparc_pmu)
1265 return -EINVAL;
1266
1267 cpuc = &__get_cpu_var(cpu_hw_events);
1268 n = cpuc->n_events;
1269 if (check_excludes(cpuc->event, 0, n))
1270 return -EINVAL;
1271 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1272 return -EAGAIN;
1273
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001274 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001275 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001276 return 0;
1277}
1278
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001279static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001280 .pmu_enable = sparc_pmu_enable,
1281 .pmu_disable = sparc_pmu_disable,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001282 .event_init = sparc_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001283 .add = sparc_pmu_add,
1284 .del = sparc_pmu_del,
1285 .start = sparc_pmu_start,
1286 .stop = sparc_pmu_stop,
David S. Miller59abbd12009-09-10 06:28:20 -07001287 .read = sparc_pmu_read,
Lin Minga13c3af2010-04-23 13:56:33 +08001288 .start_txn = sparc_pmu_start_txn,
1289 .cancel_txn = sparc_pmu_cancel_txn,
1290 .commit_txn = sparc_pmu_commit_txn,
David S. Miller59abbd12009-09-10 06:28:20 -07001291};
1292
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001293void perf_event_print_debug(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001294{
1295 unsigned long flags;
1296 u64 pcr, pic;
1297 int cpu;
1298
1299 if (!sparc_pmu)
1300 return;
1301
1302 local_irq_save(flags);
1303
1304 cpu = smp_processor_id();
1305
David S. Miller09d053c2012-08-16 23:19:32 -07001306 pcr = pcr_ops->read_pcr(0);
1307 pic = pcr_ops->read_pic(0);
David S. Miller59abbd12009-09-10 06:28:20 -07001308
1309 pr_info("\n");
1310 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1311 cpu, pcr, pic);
1312
1313 local_irq_restore(flags);
1314}
1315
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001316static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
David S. Millerd29862f2009-09-28 17:37:12 -07001317 unsigned long cmd, void *__args)
David S. Miller59abbd12009-09-10 06:28:20 -07001318{
1319 struct die_args *args = __args;
1320 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001321 struct cpu_hw_events *cpuc;
David S. Miller59abbd12009-09-10 06:28:20 -07001322 struct pt_regs *regs;
David S. Millere7bef6b2010-01-20 02:59:47 -08001323 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001324
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001325 if (!atomic_read(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001326 return NOTIFY_DONE;
1327
1328 switch (cmd) {
1329 case DIE_NMI:
1330 break;
1331
1332 default:
1333 return NOTIFY_DONE;
1334 }
1335
1336 regs = args->regs;
1337
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001338 cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere04ed382010-01-04 23:16:03 -08001339
1340 /* If the PMU has the TOE IRQ enable bits, we need to do a
1341 * dummy write to the %pcr to clear the overflow bits and thus
1342 * the interrupt.
1343 *
1344 * Do this before we peek at the counters to determine
1345 * overflow so we don't lose any events.
1346 */
1347 if (sparc_pmu->irq_bit)
David S. Miller09d053c2012-08-16 23:19:32 -07001348 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Millere04ed382010-01-04 23:16:03 -08001349
David S. Millere7bef6b2010-01-20 02:59:47 -08001350 for (i = 0; i < cpuc->n_events; i++) {
1351 struct perf_event *event = cpuc->event[i];
1352 int idx = cpuc->current_idx[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001353 struct hw_perf_event *hwc;
David S. Miller59abbd12009-09-10 06:28:20 -07001354 u64 val;
1355
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001356 hwc = &event->hw;
1357 val = sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001358 if (val & (1ULL << 31))
1359 continue;
1360
Robert Richterfd0d0002012-04-02 20:19:08 +02001361 perf_sample_data_init(&data, 0, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001362 if (!sparc_perf_event_set_period(event, hwc, idx))
David S. Miller59abbd12009-09-10 06:28:20 -07001363 continue;
1364
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001365 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001366 sparc_pmu_stop(event, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001367 }
1368
1369 return NOTIFY_STOP;
1370}
1371
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001372static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1373 .notifier_call = perf_event_nmi_handler,
David S. Miller59abbd12009-09-10 06:28:20 -07001374};
1375
1376static bool __init supported_pmu(void)
1377{
David S. Miller28e8f9b2009-09-26 20:54:22 -07001378 if (!strcmp(sparc_pmu_type, "ultra3") ||
1379 !strcmp(sparc_pmu_type, "ultra3+") ||
1380 !strcmp(sparc_pmu_type, "ultra3i") ||
1381 !strcmp(sparc_pmu_type, "ultra4+")) {
1382 sparc_pmu = &ultra3_pmu;
David S. Miller59abbd12009-09-10 06:28:20 -07001383 return true;
1384 }
David S. Miller7eebda62009-09-26 21:23:41 -07001385 if (!strcmp(sparc_pmu_type, "niagara")) {
1386 sparc_pmu = &niagara1_pmu;
1387 return true;
1388 }
David S. Miller4ba991d2011-07-27 21:06:16 -07001389 if (!strcmp(sparc_pmu_type, "niagara2") ||
1390 !strcmp(sparc_pmu_type, "niagara3")) {
David S. Millerb73d8842009-09-10 07:22:18 -07001391 sparc_pmu = &niagara2_pmu;
1392 return true;
1393 }
David S. Miller59abbd12009-09-10 06:28:20 -07001394 return false;
1395}
1396
Peter Zijlstra004417a2010-11-25 18:38:29 +01001397int __init init_hw_perf_events(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001398{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001399 pr_info("Performance events: ");
David S. Miller59abbd12009-09-10 06:28:20 -07001400
1401 if (!supported_pmu()) {
1402 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001403 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001404 }
1405
1406 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1407
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001408 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001409 register_die_notifier(&perf_event_nmi_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001410
1411 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001412}
Ingo Molnarefc70d22010-12-10 00:27:23 +01001413early_initcall(init_hw_perf_events);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001414
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001415void perf_callchain_kernel(struct perf_callchain_entry *entry,
1416 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001417{
1418 unsigned long ksp, fp;
David S. Miller667f0ce2010-04-21 03:08:11 -07001419#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1420 int graph = 0;
1421#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001422
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001423 stack_trace_flush();
1424
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001425 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001426
1427 ksp = regs->u_regs[UREG_I6];
1428 fp = ksp + STACK_BIAS;
1429 do {
1430 struct sparc_stackf *sf;
1431 struct pt_regs *regs;
1432 unsigned long pc;
1433
1434 if (!kstack_valid(current_thread_info(), fp))
1435 break;
1436
1437 sf = (struct sparc_stackf *) fp;
1438 regs = (struct pt_regs *) (sf + 1);
1439
1440 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1441 if (user_mode(regs))
1442 break;
1443 pc = regs->tpc;
1444 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1445 } else {
1446 pc = sf->callers_pc;
1447 fp = (unsigned long)sf->fp + STACK_BIAS;
1448 }
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001449 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001450#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1451 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1452 int index = current->curr_ret_stack;
1453 if (current->ret_stack && index >= graph) {
1454 pc = current->ret_stack[index - graph].ret;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001455 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001456 graph++;
1457 }
1458 }
1459#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001460 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1461}
1462
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001463static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1464 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001465{
1466 unsigned long ufp;
1467
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001468 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001469
1470 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1471 do {
1472 struct sparc_stackf *usf, sf;
1473 unsigned long pc;
1474
1475 usf = (struct sparc_stackf *) ufp;
1476 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1477 break;
1478
1479 pc = sf.callers_pc;
1480 ufp = (unsigned long)sf.fp + STACK_BIAS;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001481 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001482 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1483}
1484
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001485static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1486 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001487{
1488 unsigned long ufp;
1489
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001490 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001491
David S. Miller9e8307e2010-03-29 13:08:52 -07001492 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
David S. Miller4f6dbe42010-01-19 00:26:13 -08001493 do {
1494 struct sparc_stackf32 *usf, sf;
1495 unsigned long pc;
1496
1497 usf = (struct sparc_stackf32 *) ufp;
1498 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1499 break;
1500
1501 pc = sf.callers_pc;
1502 ufp = (unsigned long)sf.fp;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001503 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001504 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1505}
1506
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001507void
1508perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001509{
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001510 flushw_user();
1511 if (test_thread_flag(TIF_32BIT))
1512 perf_callchain_user_32(entry, regs);
1513 else
1514 perf_callchain_user_64(entry, regs);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001515}