blob: f7b9ae39c2642b2212ebbf9f1186bca1c45d48e3 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/* Performance event support for sparc64.
David S. Miller59abbd12009-09-10 06:28:20 -07002 *
David S. Miller4f6dbe42010-01-19 00:26:13 -08003 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
David S. Miller59abbd12009-09-10 06:28:20 -07004 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * This code is based almost entirely upon the x86 perf event
David S. Miller59abbd12009-09-10 06:28:20 -07006 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
David S. Miller59abbd12009-09-10 06:28:20 -070016#include <linux/kprobes.h>
David S. Miller667f0ce2010-04-21 03:08:11 -070017#include <linux/ftrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070018#include <linux/kernel.h>
19#include <linux/kdebug.h>
20#include <linux/mutex.h>
21
David S. Miller4f6dbe42010-01-19 00:26:13 -080022#include <asm/stacktrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070023#include <asm/cpudata.h>
David S. Miller4f6dbe42010-01-19 00:26:13 -080024#include <asm/uaccess.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
David S. Miller59abbd12009-09-10 06:28:20 -070026#include <asm/nmi.h>
27#include <asm/pcr.h>
David Howellsd550bbd2012-03-28 18:30:03 +010028#include <asm/cacheflush.h>
David S. Miller59abbd12009-09-10 06:28:20 -070029
Sam Ravnborgcb1b8202011-04-21 15:45:45 -070030#include "kernel.h"
David S. Miller4f6dbe42010-01-19 00:26:13 -080031#include "kstack.h"
32
David S. Miller59abbd12009-09-10 06:28:20 -070033/* Sparc64 chips have two performance counters, 32-bits each, with
34 * overflow interrupts generated on transition from 0xffffffff to 0.
35 * The counters are accessed in one go using a 64-bit register.
36 *
37 * Both counters are controlled using a single control register. The
38 * only way to stop all sampling is to clear all of the context (user,
39 * supervisor, hypervisor) sampling enable bits. But these bits apply
40 * to both counters, thus the two counters can't be enabled/disabled
41 * individually.
42 *
43 * The control register has two event fields, one for each of the two
44 * counters. It's thus nearly impossible to have one counter going
45 * while keeping the other one stopped. Therefore it is possible to
46 * get overflow interrupts for counters not currently "in use" and
47 * that condition must be checked in the overflow interrupt handler.
48 *
49 * So we use a hack, in that we program inactive counters with the
50 * "sw_count0" and "sw_count1" events. These count how many times
51 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
52 * unusual way to encode a NOP and therefore will not trigger in
53 * normal code.
54 */
55
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056#define MAX_HWEVENTS 2
David S. Miller59abbd12009-09-10 06:28:20 -070057#define MAX_PERIOD ((1UL << 32) - 1)
58
59#define PIC_UPPER_INDEX 0
60#define PIC_LOWER_INDEX 1
David S. Millere7bef6b2010-01-20 02:59:47 -080061#define PIC_NO_INDEX -1
David S. Miller59abbd12009-09-10 06:28:20 -070062
Ingo Molnarcdd6c482009-09-21 12:02:48 +020063struct cpu_hw_events {
David S. Millere7bef6b2010-01-20 02:59:47 -080064 /* Number of events currently scheduled onto this cpu.
65 * This tells how many entries in the arrays below
66 * are valid.
67 */
68 int n_events;
69
70 /* Number of new events added since the last hw_perf_disable().
71 * This works because the perf event layer always adds new
72 * events inside of a perf_{disable,enable}() sequence.
73 */
74 int n_added;
75
76 /* Array of events current scheduled on this cpu. */
77 struct perf_event *event[MAX_HWEVENTS];
78
79 /* Array of encoded longs, specifying the %pcr register
80 * encoding and the mask of PIC counters this even can
81 * be scheduled on. See perf_event_encode() et al.
82 */
83 unsigned long events[MAX_HWEVENTS];
84
85 /* The current counter index assigned to an event. When the
86 * event hasn't been programmed into the cpu yet, this will
87 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
88 * we ought to schedule the event.
89 */
90 int current_idx[MAX_HWEVENTS];
91
92 /* Software copy of %pcr register on this cpu. */
David S. Millerd1751382009-09-29 21:27:06 -070093 u64 pcr;
David S. Millere7bef6b2010-01-20 02:59:47 -080094
95 /* Enabled/disable state. */
David S. Millerd1751382009-09-29 21:27:06 -070096 int enabled;
Lin Minga13c3af2010-04-23 13:56:33 +080097
98 unsigned int group_flag;
David S. Miller59abbd12009-09-10 06:28:20 -070099};
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200100DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
David S. Miller59abbd12009-09-10 06:28:20 -0700101
David S. Millere7bef6b2010-01-20 02:59:47 -0800102/* An event map describes the characteristics of a performance
103 * counter event. In particular it gives the encoding as well as
104 * a mask telling which counters the event can be measured on.
105 */
David S. Miller59abbd12009-09-10 06:28:20 -0700106struct perf_event_map {
107 u16 encoding;
108 u8 pic_mask;
109#define PIC_NONE 0x00
110#define PIC_UPPER 0x01
111#define PIC_LOWER 0x02
112};
113
David S. Millere7bef6b2010-01-20 02:59:47 -0800114/* Encode a perf_event_map entry into a long. */
David S. Millera72a8a52009-09-28 17:35:20 -0700115static unsigned long perf_event_encode(const struct perf_event_map *pmap)
116{
117 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
118}
119
David S. Millere7bef6b2010-01-20 02:59:47 -0800120static u8 perf_event_get_msk(unsigned long val)
David S. Millera72a8a52009-09-28 17:35:20 -0700121{
David S. Millere7bef6b2010-01-20 02:59:47 -0800122 return val & 0xff;
123}
124
125static u64 perf_event_get_enc(unsigned long val)
126{
127 return val >> 16;
David S. Millera72a8a52009-09-28 17:35:20 -0700128}
129
David S. Miller2ce4da22009-09-26 20:42:10 -0700130#define C(x) PERF_COUNT_HW_CACHE_##x
131
132#define CACHE_OP_UNSUPPORTED 0xfffe
133#define CACHE_OP_NONSENSE 0xffff
134
135typedef struct perf_event_map cache_map_t
136 [PERF_COUNT_HW_CACHE_MAX]
137 [PERF_COUNT_HW_CACHE_OP_MAX]
138 [PERF_COUNT_HW_CACHE_RESULT_MAX];
139
David S. Miller59abbd12009-09-10 06:28:20 -0700140struct sparc_pmu {
141 const struct perf_event_map *(*event_map)(int);
David S. Miller2ce4da22009-09-26 20:42:10 -0700142 const cache_map_t *cache_map;
David S. Miller59abbd12009-09-10 06:28:20 -0700143 int max_events;
144 int upper_shift;
145 int lower_shift;
146 int event_mask;
David S. Miller91b92862009-09-10 07:09:06 -0700147 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -0700148 int irq_bit;
David S. Miller660d1372009-09-10 07:13:26 -0700149 int upper_nop;
150 int lower_nop;
David S. Millerb38e99f2012-08-17 02:31:10 -0700151 unsigned int flags;
152#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
153#define SPARC_PMU_HAS_CONFLICTS 0x00000002
David S. Miller59660492012-08-17 02:33:44 -0700154 int max_hw_events;
David S. Miller59abbd12009-09-10 06:28:20 -0700155};
156
David S. Miller28e8f9b2009-09-26 20:54:22 -0700157static const struct perf_event_map ultra3_perfmon_event_map[] = {
David S. Miller59abbd12009-09-10 06:28:20 -0700158 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
159 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
160 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
161 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
162};
163
David S. Miller28e8f9b2009-09-26 20:54:22 -0700164static const struct perf_event_map *ultra3_event_map(int event_id)
David S. Miller59abbd12009-09-10 06:28:20 -0700165{
David S. Miller28e8f9b2009-09-26 20:54:22 -0700166 return &ultra3_perfmon_event_map[event_id];
David S. Miller59abbd12009-09-10 06:28:20 -0700167}
168
David S. Miller28e8f9b2009-09-26 20:54:22 -0700169static const cache_map_t ultra3_cache_map = {
David S. Miller2ce4da22009-09-26 20:42:10 -0700170[C(L1D)] = {
171 [C(OP_READ)] = {
172 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
173 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
174 },
175 [C(OP_WRITE)] = {
176 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
177 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
178 },
179 [C(OP_PREFETCH)] = {
180 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
181 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
182 },
183},
184[C(L1I)] = {
185 [C(OP_READ)] = {
186 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
187 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
188 },
189 [ C(OP_WRITE) ] = {
190 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
191 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
192 },
193 [ C(OP_PREFETCH) ] = {
194 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
195 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
196 },
197},
198[C(LL)] = {
199 [C(OP_READ)] = {
200 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
201 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
202 },
203 [C(OP_WRITE)] = {
204 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
205 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
206 },
207 [C(OP_PREFETCH)] = {
208 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
209 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
210 },
211},
212[C(DTLB)] = {
213 [C(OP_READ)] = {
214 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
215 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
216 },
217 [ C(OP_WRITE) ] = {
218 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
219 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
220 },
221 [ C(OP_PREFETCH) ] = {
222 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
223 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
224 },
225},
226[C(ITLB)] = {
227 [C(OP_READ)] = {
228 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
229 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
230 },
231 [ C(OP_WRITE) ] = {
232 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
233 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
234 },
235 [ C(OP_PREFETCH) ] = {
236 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
237 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
238 },
239},
240[C(BPU)] = {
241 [C(OP_READ)] = {
242 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
243 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
244 },
245 [ C(OP_WRITE) ] = {
246 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
247 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
248 },
249 [ C(OP_PREFETCH) ] = {
250 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
251 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
252 },
253},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200254[C(NODE)] = {
255 [C(OP_READ)] = {
256 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
257 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
258 },
259 [ C(OP_WRITE) ] = {
260 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
261 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
262 },
263 [ C(OP_PREFETCH) ] = {
264 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
265 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
266 },
267},
David S. Miller2ce4da22009-09-26 20:42:10 -0700268};
269
David S. Miller28e8f9b2009-09-26 20:54:22 -0700270static const struct sparc_pmu ultra3_pmu = {
271 .event_map = ultra3_event_map,
272 .cache_map = &ultra3_cache_map,
273 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
David S. Miller59abbd12009-09-10 06:28:20 -0700274 .upper_shift = 11,
275 .lower_shift = 4,
276 .event_mask = 0x3f,
David S. Miller660d1372009-09-10 07:13:26 -0700277 .upper_nop = 0x1c,
278 .lower_nop = 0x14,
David S. Millerb38e99f2012-08-17 02:31:10 -0700279 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
280 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700281 .max_hw_events = 2,
David S. Miller59abbd12009-09-10 06:28:20 -0700282};
283
David S. Miller7eebda62009-09-26 21:23:41 -0700284/* Niagara1 is very limited. The upper PIC is hard-locked to count
285 * only instructions, so it is free running which creates all kinds of
David S. Miller6e804252009-09-29 15:10:23 -0700286 * problems. Some hardware designs make one wonder if the creator
David S. Miller7eebda62009-09-26 21:23:41 -0700287 * even looked at how this stuff gets used by software.
288 */
289static const struct perf_event_map niagara1_perfmon_event_map[] = {
290 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
291 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
292 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
293 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
294};
295
296static const struct perf_event_map *niagara1_event_map(int event_id)
297{
298 return &niagara1_perfmon_event_map[event_id];
299}
300
301static const cache_map_t niagara1_cache_map = {
302[C(L1D)] = {
303 [C(OP_READ)] = {
304 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
305 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
306 },
307 [C(OP_WRITE)] = {
308 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
309 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
310 },
311 [C(OP_PREFETCH)] = {
312 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
313 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
314 },
315},
316[C(L1I)] = {
317 [C(OP_READ)] = {
318 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
319 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
320 },
321 [ C(OP_WRITE) ] = {
322 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
323 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
324 },
325 [ C(OP_PREFETCH) ] = {
326 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
327 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
328 },
329},
330[C(LL)] = {
331 [C(OP_READ)] = {
332 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
333 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
334 },
335 [C(OP_WRITE)] = {
336 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
337 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
338 },
339 [C(OP_PREFETCH)] = {
340 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
341 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
342 },
343},
344[C(DTLB)] = {
345 [C(OP_READ)] = {
346 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
347 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
348 },
349 [ C(OP_WRITE) ] = {
350 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
351 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
352 },
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
355 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
356 },
357},
358[C(ITLB)] = {
359 [C(OP_READ)] = {
360 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
361 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
362 },
363 [ C(OP_WRITE) ] = {
364 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
365 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
366 },
367 [ C(OP_PREFETCH) ] = {
368 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
369 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
370 },
371},
372[C(BPU)] = {
373 [C(OP_READ)] = {
374 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
375 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
376 },
377 [ C(OP_WRITE) ] = {
378 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
379 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
380 },
381 [ C(OP_PREFETCH) ] = {
382 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
383 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
384 },
385},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200386[C(NODE)] = {
387 [C(OP_READ)] = {
388 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
389 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
390 },
391 [ C(OP_WRITE) ] = {
392 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
393 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
394 },
395 [ C(OP_PREFETCH) ] = {
396 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
397 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
398 },
399},
David S. Miller7eebda62009-09-26 21:23:41 -0700400};
401
402static const struct sparc_pmu niagara1_pmu = {
403 .event_map = niagara1_event_map,
404 .cache_map = &niagara1_cache_map,
405 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
406 .upper_shift = 0,
407 .lower_shift = 4,
408 .event_mask = 0x7,
409 .upper_nop = 0x0,
410 .lower_nop = 0x0,
David S. Millerb38e99f2012-08-17 02:31:10 -0700411 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
412 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700413 .max_hw_events = 2,
David S. Miller7eebda62009-09-26 21:23:41 -0700414};
415
David S. Millerb73d8842009-09-10 07:22:18 -0700416static const struct perf_event_map niagara2_perfmon_event_map[] = {
417 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
418 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
419 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
420 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
421 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
422 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
423};
424
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200425static const struct perf_event_map *niagara2_event_map(int event_id)
David S. Millerb73d8842009-09-10 07:22:18 -0700426{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200427 return &niagara2_perfmon_event_map[event_id];
David S. Millerb73d8842009-09-10 07:22:18 -0700428}
429
David S. Millerd0b86482009-09-26 21:04:16 -0700430static const cache_map_t niagara2_cache_map = {
431[C(L1D)] = {
432 [C(OP_READ)] = {
433 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
434 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
435 },
436 [C(OP_WRITE)] = {
437 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
438 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
439 },
440 [C(OP_PREFETCH)] = {
441 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
442 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
443 },
444},
445[C(L1I)] = {
446 [C(OP_READ)] = {
447 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
448 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
449 },
450 [ C(OP_WRITE) ] = {
451 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
452 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
453 },
454 [ C(OP_PREFETCH) ] = {
455 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
456 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
457 },
458},
459[C(LL)] = {
460 [C(OP_READ)] = {
461 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
462 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
463 },
464 [C(OP_WRITE)] = {
465 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
466 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
467 },
468 [C(OP_PREFETCH)] = {
469 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
470 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
471 },
472},
473[C(DTLB)] = {
474 [C(OP_READ)] = {
475 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
476 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
477 },
478 [ C(OP_WRITE) ] = {
479 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
480 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
481 },
482 [ C(OP_PREFETCH) ] = {
483 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
484 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
485 },
486},
487[C(ITLB)] = {
488 [C(OP_READ)] = {
489 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
490 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
491 },
492 [ C(OP_WRITE) ] = {
493 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
494 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
495 },
496 [ C(OP_PREFETCH) ] = {
497 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
498 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
499 },
500},
501[C(BPU)] = {
502 [C(OP_READ)] = {
503 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
504 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
505 },
506 [ C(OP_WRITE) ] = {
507 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
508 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
509 },
510 [ C(OP_PREFETCH) ] = {
511 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
512 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
513 },
514},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200515[C(NODE)] = {
516 [C(OP_READ)] = {
517 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
518 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
519 },
520 [ C(OP_WRITE) ] = {
521 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
522 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
523 },
524 [ C(OP_PREFETCH) ] = {
525 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
526 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
527 },
528},
David S. Millerd0b86482009-09-26 21:04:16 -0700529};
530
David S. Millerb73d8842009-09-10 07:22:18 -0700531static const struct sparc_pmu niagara2_pmu = {
532 .event_map = niagara2_event_map,
David S. Millerd0b86482009-09-26 21:04:16 -0700533 .cache_map = &niagara2_cache_map,
David S. Millerb73d8842009-09-10 07:22:18 -0700534 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
535 .upper_shift = 19,
536 .lower_shift = 6,
537 .event_mask = 0xfff,
538 .hv_bit = 0x8,
David S. Millerde23cf32009-10-09 00:42:40 -0700539 .irq_bit = 0x30,
David S. Millerb73d8842009-09-10 07:22:18 -0700540 .upper_nop = 0x220,
541 .lower_nop = 0x220,
David S. Millerb38e99f2012-08-17 02:31:10 -0700542 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
543 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700544 .max_hw_events = 2,
David S. Millerb73d8842009-09-10 07:22:18 -0700545};
546
David S. Miller59abbd12009-09-10 06:28:20 -0700547static const struct sparc_pmu *sparc_pmu __read_mostly;
548
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200549static u64 event_encoding(u64 event_id, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700550{
551 if (idx == PIC_UPPER_INDEX)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200552 event_id <<= sparc_pmu->upper_shift;
David S. Miller59abbd12009-09-10 06:28:20 -0700553 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200554 event_id <<= sparc_pmu->lower_shift;
555 return event_id;
David S. Miller59abbd12009-09-10 06:28:20 -0700556}
557
558static u64 mask_for_index(int idx)
559{
560 return event_encoding(sparc_pmu->event_mask, idx);
561}
562
563static u64 nop_for_index(int idx)
564{
565 return event_encoding(idx == PIC_UPPER_INDEX ?
David S. Miller660d1372009-09-10 07:13:26 -0700566 sparc_pmu->upper_nop :
567 sparc_pmu->lower_nop, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700568}
569
David S. Millerd1751382009-09-29 21:27:06 -0700570static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700571{
572 u64 val, mask = mask_for_index(idx);
573
David S. Millerd1751382009-09-29 21:27:06 -0700574 val = cpuc->pcr;
575 val &= ~mask;
576 val |= hwc->config;
577 cpuc->pcr = val;
578
David S. Miller09d053c2012-08-16 23:19:32 -0700579 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700580}
581
David S. Millerd1751382009-09-29 21:27:06 -0700582static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700583{
584 u64 mask = mask_for_index(idx);
585 u64 nop = nop_for_index(idx);
David S. Millerd1751382009-09-29 21:27:06 -0700586 u64 val;
David S. Miller59abbd12009-09-10 06:28:20 -0700587
David S. Millerd1751382009-09-29 21:27:06 -0700588 val = cpuc->pcr;
589 val &= ~mask;
590 val |= nop;
591 cpuc->pcr = val;
592
David S. Miller09d053c2012-08-16 23:19:32 -0700593 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700594}
595
David S. Miller59abbd12009-09-10 06:28:20 -0700596static u32 read_pmc(int idx)
597{
598 u64 val;
599
David S. Miller09d053c2012-08-16 23:19:32 -0700600 val = pcr_ops->read_pic(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700601 if (idx == PIC_UPPER_INDEX)
602 val >>= 32;
603
604 return val & 0xffffffff;
605}
606
607static void write_pmc(int idx, u64 val)
608{
609 u64 shift, mask, pic;
610
611 shift = 0;
612 if (idx == PIC_UPPER_INDEX)
613 shift = 32;
614
615 mask = ((u64) 0xffffffff) << shift;
616 val <<= shift;
617
David S. Miller09d053c2012-08-16 23:19:32 -0700618 pic = pcr_ops->read_pic(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700619 pic &= ~mask;
620 pic |= val;
David S. Miller09d053c2012-08-16 23:19:32 -0700621 pcr_ops->write_pic(0, pic);
David S. Miller59abbd12009-09-10 06:28:20 -0700622}
623
David S. Millere7bef6b2010-01-20 02:59:47 -0800624static u64 sparc_perf_event_update(struct perf_event *event,
625 struct hw_perf_event *hwc, int idx)
626{
627 int shift = 64 - 32;
628 u64 prev_raw_count, new_raw_count;
629 s64 delta;
630
631again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200632 prev_raw_count = local64_read(&hwc->prev_count);
David S. Millere7bef6b2010-01-20 02:59:47 -0800633 new_raw_count = read_pmc(idx);
634
Peter Zijlstrae7850592010-05-21 14:43:08 +0200635 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800636 new_raw_count) != prev_raw_count)
637 goto again;
638
639 delta = (new_raw_count << shift) - (prev_raw_count << shift);
640 delta >>= shift;
641
Peter Zijlstrae7850592010-05-21 14:43:08 +0200642 local64_add(delta, &event->count);
643 local64_sub(delta, &hwc->period_left);
David S. Millere7bef6b2010-01-20 02:59:47 -0800644
645 return new_raw_count;
646}
647
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200648static int sparc_perf_event_set_period(struct perf_event *event,
David S. Millerd29862f2009-09-28 17:37:12 -0700649 struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700650{
Peter Zijlstrae7850592010-05-21 14:43:08 +0200651 s64 left = local64_read(&hwc->period_left);
David S. Miller59abbd12009-09-10 06:28:20 -0700652 s64 period = hwc->sample_period;
653 int ret = 0;
654
655 if (unlikely(left <= -period)) {
656 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200657 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700658 hwc->last_period = period;
659 ret = 1;
660 }
661
662 if (unlikely(left <= 0)) {
663 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200664 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700665 hwc->last_period = period;
666 ret = 1;
667 }
668 if (left > MAX_PERIOD)
669 left = MAX_PERIOD;
670
Peter Zijlstrae7850592010-05-21 14:43:08 +0200671 local64_set(&hwc->prev_count, (u64)-left);
David S. Miller59abbd12009-09-10 06:28:20 -0700672
673 write_pmc(idx, (u64)(-left) & 0xffffffff);
674
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200675 perf_event_update_userpage(event);
David S. Miller59abbd12009-09-10 06:28:20 -0700676
677 return ret;
678}
679
David S. Millere7bef6b2010-01-20 02:59:47 -0800680/* If performance event entries have been added, move existing
681 * events around (if necessary) and then assign new entries to
682 * counters.
683 */
684static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
David S. Miller59abbd12009-09-10 06:28:20 -0700685{
David S. Millere7bef6b2010-01-20 02:59:47 -0800686 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700687
David S. Millere7bef6b2010-01-20 02:59:47 -0800688 if (!cpuc->n_added)
689 goto out;
David S. Miller59abbd12009-09-10 06:28:20 -0700690
David S. Millere7bef6b2010-01-20 02:59:47 -0800691 /* Read in the counters which are moving. */
692 for (i = 0; i < cpuc->n_events; i++) {
693 struct perf_event *cp = cpuc->event[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700694
David S. Millere7bef6b2010-01-20 02:59:47 -0800695 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
696 cpuc->current_idx[i] != cp->hw.idx) {
697 sparc_perf_event_update(cp, &cp->hw,
698 cpuc->current_idx[i]);
699 cpuc->current_idx[i] = PIC_NO_INDEX;
700 }
701 }
David S. Miller59abbd12009-09-10 06:28:20 -0700702
David S. Millere7bef6b2010-01-20 02:59:47 -0800703 /* Assign to counters all unassigned events. */
704 for (i = 0; i < cpuc->n_events; i++) {
705 struct perf_event *cp = cpuc->event[i];
706 struct hw_perf_event *hwc = &cp->hw;
707 int idx = hwc->idx;
708 u64 enc;
709
710 if (cpuc->current_idx[i] != PIC_NO_INDEX)
711 continue;
712
713 sparc_perf_event_set_period(cp, hwc, idx);
714 cpuc->current_idx[i] = idx;
715
716 enc = perf_event_get_enc(cpuc->events[i]);
David S. Millerb7d45c32010-06-23 11:39:02 -0700717 pcr &= ~mask_for_index(idx);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200718 if (hwc->state & PERF_HES_STOPPED)
719 pcr |= nop_for_index(idx);
720 else
721 pcr |= event_encoding(enc, idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800722 }
723out:
724 return pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700725}
726
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200727static void sparc_pmu_enable(struct pmu *pmu)
David S. Miller59abbd12009-09-10 06:28:20 -0700728{
David S. Millere7bef6b2010-01-20 02:59:47 -0800729 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
730 u64 pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700731
David S. Millere7bef6b2010-01-20 02:59:47 -0800732 if (cpuc->enabled)
733 return;
David S. Miller59abbd12009-09-10 06:28:20 -0700734
David S. Millere7bef6b2010-01-20 02:59:47 -0800735 cpuc->enabled = 1;
736 barrier();
David S. Miller59abbd12009-09-10 06:28:20 -0700737
David S. Millere7bef6b2010-01-20 02:59:47 -0800738 pcr = cpuc->pcr;
739 if (!cpuc->n_events) {
740 pcr = 0;
741 } else {
742 pcr = maybe_change_configuration(cpuc, pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700743
David S. Millere7bef6b2010-01-20 02:59:47 -0800744 /* We require that all of the events have the same
745 * configuration, so just fetch the settings from the
746 * first entry.
747 */
748 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
749 }
David S. Miller59abbd12009-09-10 06:28:20 -0700750
David S. Miller09d053c2012-08-16 23:19:32 -0700751 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Millere7bef6b2010-01-20 02:59:47 -0800752}
753
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200754static void sparc_pmu_disable(struct pmu *pmu)
David S. Millere7bef6b2010-01-20 02:59:47 -0800755{
756 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
757 u64 val;
758
759 if (!cpuc->enabled)
760 return;
761
762 cpuc->enabled = 0;
763 cpuc->n_added = 0;
764
765 val = cpuc->pcr;
766 val &= ~(PCR_UTRACE | PCR_STRACE |
767 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
768 cpuc->pcr = val;
769
David S. Miller09d053c2012-08-16 23:19:32 -0700770 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700771}
772
David S. Millere7bef6b2010-01-20 02:59:47 -0800773static int active_event_index(struct cpu_hw_events *cpuc,
774 struct perf_event *event)
775{
776 int i;
777
778 for (i = 0; i < cpuc->n_events; i++) {
779 if (cpuc->event[i] == event)
780 break;
781 }
782 BUG_ON(i == cpuc->n_events);
783 return cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700784}
785
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200786static void sparc_pmu_start(struct perf_event *event, int flags)
787{
788 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
789 int idx = active_event_index(cpuc, event);
790
791 if (flags & PERF_EF_RELOAD) {
792 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
793 sparc_perf_event_set_period(event, &event->hw, idx);
794 }
795
796 event->hw.state = 0;
797
798 sparc_pmu_enable_event(cpuc, &event->hw, idx);
799}
800
801static void sparc_pmu_stop(struct perf_event *event, int flags)
802{
803 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
804 int idx = active_event_index(cpuc, event);
805
806 if (!(event->hw.state & PERF_HES_STOPPED)) {
807 sparc_pmu_disable_event(cpuc, &event->hw, idx);
808 event->hw.state |= PERF_HES_STOPPED;
809 }
810
811 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
812 sparc_perf_event_update(event, &event->hw, idx);
813 event->hw.state |= PERF_HES_UPTODATE;
814 }
815}
816
817static void sparc_pmu_del(struct perf_event *event, int _flags)
818{
819 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
820 unsigned long flags;
821 int i;
822
823 local_irq_save(flags);
824 perf_pmu_disable(event->pmu);
825
826 for (i = 0; i < cpuc->n_events; i++) {
827 if (event == cpuc->event[i]) {
828 /* Absorb the final count and turn off the
829 * event.
830 */
831 sparc_pmu_stop(event, PERF_EF_UPDATE);
832
833 /* Shift remaining entries down into
834 * the existing slot.
835 */
836 while (++i < cpuc->n_events) {
837 cpuc->event[i - 1] = cpuc->event[i];
838 cpuc->events[i - 1] = cpuc->events[i];
839 cpuc->current_idx[i - 1] =
840 cpuc->current_idx[i];
841 }
842
843 perf_event_update_userpage(event);
844
845 cpuc->n_events--;
846 break;
847 }
848 }
849
850 perf_pmu_enable(event->pmu);
851 local_irq_restore(flags);
852}
853
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200854static void sparc_pmu_read(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700855{
David S. Millere7bef6b2010-01-20 02:59:47 -0800856 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
857 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200858 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700859
David S. Millere7bef6b2010-01-20 02:59:47 -0800860 sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700861}
862
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200863static atomic_t active_events = ATOMIC_INIT(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700864static DEFINE_MUTEX(pmc_grab_mutex);
865
David S. Millerd1751382009-09-29 21:27:06 -0700866static void perf_stop_nmi_watchdog(void *unused)
867{
868 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
869
870 stop_nmi_watchdog(NULL);
David S. Miller09d053c2012-08-16 23:19:32 -0700871 cpuc->pcr = pcr_ops->read_pcr(0);
David S. Millerd1751382009-09-29 21:27:06 -0700872}
873
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200874void perf_event_grab_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700875{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200876 if (atomic_inc_not_zero(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -0700877 return;
878
879 mutex_lock(&pmc_grab_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200880 if (atomic_read(&active_events) == 0) {
David S. Miller59abbd12009-09-10 06:28:20 -0700881 if (atomic_read(&nmi_active) > 0) {
David S. Millerd1751382009-09-29 21:27:06 -0700882 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
David S. Miller59abbd12009-09-10 06:28:20 -0700883 BUG_ON(atomic_read(&nmi_active) != 0);
884 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200885 atomic_inc(&active_events);
David S. Miller59abbd12009-09-10 06:28:20 -0700886 }
887 mutex_unlock(&pmc_grab_mutex);
888}
889
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200890void perf_event_release_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700891{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200892 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
David S. Miller59abbd12009-09-10 06:28:20 -0700893 if (atomic_read(&nmi_active) == 0)
894 on_each_cpu(start_nmi_watchdog, NULL, 1);
895 mutex_unlock(&pmc_grab_mutex);
896 }
897}
898
David S. Miller2ce4da22009-09-26 20:42:10 -0700899static const struct perf_event_map *sparc_map_cache_event(u64 config)
900{
901 unsigned int cache_type, cache_op, cache_result;
902 const struct perf_event_map *pmap;
903
904 if (!sparc_pmu->cache_map)
905 return ERR_PTR(-ENOENT);
906
907 cache_type = (config >> 0) & 0xff;
908 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
909 return ERR_PTR(-EINVAL);
910
911 cache_op = (config >> 8) & 0xff;
912 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
913 return ERR_PTR(-EINVAL);
914
915 cache_result = (config >> 16) & 0xff;
916 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
917 return ERR_PTR(-EINVAL);
918
919 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
920
921 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
922 return ERR_PTR(-ENOENT);
923
924 if (pmap->encoding == CACHE_OP_NONSENSE)
925 return ERR_PTR(-EINVAL);
926
927 return pmap;
928}
929
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200930static void hw_perf_event_destroy(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700931{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200932 perf_event_release_pmc();
David S. Miller59abbd12009-09-10 06:28:20 -0700933}
934
David S. Millera72a8a52009-09-28 17:35:20 -0700935/* Make sure all events can be scheduled into the hardware at
936 * the same time. This is simplified by the fact that we only
937 * need to support 2 simultaneous HW events.
David S. Millere7bef6b2010-01-20 02:59:47 -0800938 *
939 * As a side effect, the evts[]->hw.idx values will be assigned
940 * on success. These are pending indexes. When the events are
941 * actually programmed into the chip, these values will propagate
942 * to the per-cpu cpuc->current_idx[] slots, see the code in
943 * maybe_change_configuration() for details.
David S. Millera72a8a52009-09-28 17:35:20 -0700944 */
David S. Millere7bef6b2010-01-20 02:59:47 -0800945static int sparc_check_constraints(struct perf_event **evts,
946 unsigned long *events, int n_ev)
David S. Millera72a8a52009-09-28 17:35:20 -0700947{
David S. Millere7bef6b2010-01-20 02:59:47 -0800948 u8 msk0 = 0, msk1 = 0;
949 int idx0 = 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700950
David S. Millere7bef6b2010-01-20 02:59:47 -0800951 /* This case is possible when we are invoked from
952 * hw_perf_group_sched_in().
953 */
954 if (!n_ev)
955 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700956
David S. Miller59660492012-08-17 02:33:44 -0700957 if (n_ev > sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -0800958 return -1;
David S. Millera72a8a52009-09-28 17:35:20 -0700959
David S. Millerb38e99f2012-08-17 02:31:10 -0700960 if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
961 int i;
962
963 for (i = 0; i < n_ev; i++)
964 evts[i]->hw.idx = i;
965 return 0;
966 }
967
David S. Millere7bef6b2010-01-20 02:59:47 -0800968 msk0 = perf_event_get_msk(events[0]);
969 if (n_ev == 1) {
970 if (msk0 & PIC_LOWER)
971 idx0 = 1;
972 goto success;
973 }
974 BUG_ON(n_ev != 2);
975 msk1 = perf_event_get_msk(events[1]);
David S. Millera72a8a52009-09-28 17:35:20 -0700976
David S. Millere7bef6b2010-01-20 02:59:47 -0800977 /* If both events can go on any counter, OK. */
978 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
979 msk1 == (PIC_UPPER | PIC_LOWER))
980 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700981
David S. Millere7bef6b2010-01-20 02:59:47 -0800982 /* If one event is limited to a specific counter,
983 * and the other can go on both, OK.
984 */
985 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
986 msk1 == (PIC_UPPER | PIC_LOWER)) {
987 if (msk0 & PIC_LOWER)
988 idx0 = 1;
989 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700990 }
991
David S. Millere7bef6b2010-01-20 02:59:47 -0800992 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
993 msk0 == (PIC_UPPER | PIC_LOWER)) {
994 if (msk1 & PIC_UPPER)
995 idx0 = 1;
996 goto success;
997 }
998
999 /* If the events are fixed to different counters, OK. */
1000 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
1001 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
1002 if (msk0 & PIC_LOWER)
1003 idx0 = 1;
1004 goto success;
1005 }
1006
1007 /* Otherwise, there is a conflict. */
David S. Millera72a8a52009-09-28 17:35:20 -07001008 return -1;
David S. Millere7bef6b2010-01-20 02:59:47 -08001009
1010success:
1011 evts[0]->hw.idx = idx0;
1012 if (n_ev == 2)
1013 evts[1]->hw.idx = idx0 ^ 1;
1014 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -07001015}
1016
David S. Miller01552f72009-09-27 20:43:07 -07001017static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
1018{
1019 int eu = 0, ek = 0, eh = 0;
1020 struct perf_event *event;
1021 int i, n, first;
1022
David S. Millerb38e99f2012-08-17 02:31:10 -07001023 if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
1024 return 0;
1025
David S. Miller01552f72009-09-27 20:43:07 -07001026 n = n_prev + n_new;
1027 if (n <= 1)
1028 return 0;
1029
1030 first = 1;
1031 for (i = 0; i < n; i++) {
1032 event = evts[i];
1033 if (first) {
1034 eu = event->attr.exclude_user;
1035 ek = event->attr.exclude_kernel;
1036 eh = event->attr.exclude_hv;
1037 first = 0;
1038 } else if (event->attr.exclude_user != eu ||
1039 event->attr.exclude_kernel != ek ||
1040 event->attr.exclude_hv != eh) {
1041 return -EAGAIN;
1042 }
1043 }
1044
1045 return 0;
1046}
1047
1048static int collect_events(struct perf_event *group, int max_count,
David S. Millere7bef6b2010-01-20 02:59:47 -08001049 struct perf_event *evts[], unsigned long *events,
1050 int *current_idx)
David S. Miller01552f72009-09-27 20:43:07 -07001051{
1052 struct perf_event *event;
1053 int n = 0;
1054
1055 if (!is_software_event(group)) {
1056 if (n >= max_count)
1057 return -1;
1058 evts[n] = group;
David S. Millere7bef6b2010-01-20 02:59:47 -08001059 events[n] = group->hw.event_base;
1060 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001061 }
1062 list_for_each_entry(event, &group->sibling_list, group_entry) {
1063 if (!is_software_event(event) &&
1064 event->state != PERF_EVENT_STATE_OFF) {
1065 if (n >= max_count)
1066 return -1;
1067 evts[n] = event;
David S. Millere7bef6b2010-01-20 02:59:47 -08001068 events[n] = event->hw.event_base;
1069 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001070 }
1071 }
1072 return n;
1073}
1074
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001075static int sparc_pmu_add(struct perf_event *event, int ef_flags)
David S. Millere7bef6b2010-01-20 02:59:47 -08001076{
1077 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1078 int n0, ret = -EAGAIN;
1079 unsigned long flags;
1080
1081 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001082 perf_pmu_disable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001083
1084 n0 = cpuc->n_events;
David S. Miller59660492012-08-17 02:33:44 -07001085 if (n0 >= sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -08001086 goto out;
1087
1088 cpuc->event[n0] = event;
1089 cpuc->events[n0] = event->hw.event_base;
1090 cpuc->current_idx[n0] = PIC_NO_INDEX;
1091
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001092 event->hw.state = PERF_HES_UPTODATE;
1093 if (!(ef_flags & PERF_EF_START))
1094 event->hw.state |= PERF_HES_STOPPED;
1095
Lin Minga13c3af2010-04-23 13:56:33 +08001096 /*
1097 * If group events scheduling transaction was started,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001098 * skip the schedulability test here, it will be performed
Lin Minga13c3af2010-04-23 13:56:33 +08001099 * at commit time(->commit_txn) as a whole
1100 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001101 if (cpuc->group_flag & PERF_EVENT_TXN)
Lin Minga13c3af2010-04-23 13:56:33 +08001102 goto nocheck;
1103
David S. Millere7bef6b2010-01-20 02:59:47 -08001104 if (check_excludes(cpuc->event, n0, 1))
1105 goto out;
1106 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1107 goto out;
1108
Lin Minga13c3af2010-04-23 13:56:33 +08001109nocheck:
David S. Millere7bef6b2010-01-20 02:59:47 -08001110 cpuc->n_events++;
1111 cpuc->n_added++;
1112
1113 ret = 0;
1114out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001115 perf_pmu_enable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001116 local_irq_restore(flags);
1117 return ret;
1118}
1119
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001120static int sparc_pmu_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001121{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001122 struct perf_event_attr *attr = &event->attr;
David S. Miller01552f72009-09-27 20:43:07 -07001123 struct perf_event *evts[MAX_HWEVENTS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001124 struct hw_perf_event *hwc = &event->hw;
David S. Millera72a8a52009-09-28 17:35:20 -07001125 unsigned long events[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -08001126 int current_idx_dmy[MAX_HWEVENTS];
David S. Miller59abbd12009-09-10 06:28:20 -07001127 const struct perf_event_map *pmap;
David S. Miller01552f72009-09-27 20:43:07 -07001128 int n;
David S. Miller59abbd12009-09-10 06:28:20 -07001129
1130 if (atomic_read(&nmi_active) < 0)
1131 return -ENODEV;
1132
Stephane Eranian2481c5f2012-02-09 23:20:59 +01001133 /* does not support taken branch sampling */
1134 if (has_branch_stack(event))
1135 return -EOPNOTSUPP;
1136
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001137 switch (attr->type) {
1138 case PERF_TYPE_HARDWARE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001139 if (attr->config >= sparc_pmu->max_events)
1140 return -EINVAL;
1141 pmap = sparc_pmu->event_map(attr->config);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001142 break;
1143
1144 case PERF_TYPE_HW_CACHE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001145 pmap = sparc_map_cache_event(attr->config);
1146 if (IS_ERR(pmap))
1147 return PTR_ERR(pmap);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001148 break;
1149
1150 case PERF_TYPE_RAW:
Ingo Molnard0303d72010-09-23 08:02:09 +02001151 pmap = NULL;
1152 break;
David S. Miller59abbd12009-09-10 06:28:20 -07001153
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001154 default:
1155 return -ENOENT;
1156
1157 }
1158
David S. Millerb343ae52010-09-12 17:20:24 -07001159 if (pmap) {
1160 hwc->event_base = perf_event_encode(pmap);
1161 } else {
Ingo Molnard0303d72010-09-23 08:02:09 +02001162 /*
1163 * User gives us "(encoding << 16) | pic_mask" for
David S. Millerb343ae52010-09-12 17:20:24 -07001164 * PERF_TYPE_RAW events.
1165 */
1166 hwc->event_base = attr->config;
1167 }
1168
David S. Millere7bef6b2010-01-20 02:59:47 -08001169 /* We save the enable bits in the config_base. */
David S. Miller496c07e2009-09-10 07:10:59 -07001170 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001171 if (!attr->exclude_user)
1172 hwc->config_base |= PCR_UTRACE;
1173 if (!attr->exclude_kernel)
1174 hwc->config_base |= PCR_STRACE;
David S. Miller91b92862009-09-10 07:09:06 -07001175 if (!attr->exclude_hv)
1176 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001177
David S. Miller01552f72009-09-27 20:43:07 -07001178 n = 0;
1179 if (event->group_leader != event) {
1180 n = collect_events(event->group_leader,
David S. Miller59660492012-08-17 02:33:44 -07001181 sparc_pmu->max_hw_events - 1,
David S. Millere7bef6b2010-01-20 02:59:47 -08001182 evts, events, current_idx_dmy);
David S. Miller01552f72009-09-27 20:43:07 -07001183 if (n < 0)
1184 return -EINVAL;
1185 }
David S. Millera72a8a52009-09-28 17:35:20 -07001186 events[n] = hwc->event_base;
David S. Miller01552f72009-09-27 20:43:07 -07001187 evts[n] = event;
1188
1189 if (check_excludes(evts, n, 1))
1190 return -EINVAL;
1191
David S. Millere7bef6b2010-01-20 02:59:47 -08001192 if (sparc_check_constraints(evts, events, n + 1))
David S. Millera72a8a52009-09-28 17:35:20 -07001193 return -EINVAL;
1194
David S. Millere7bef6b2010-01-20 02:59:47 -08001195 hwc->idx = PIC_NO_INDEX;
1196
David S. Miller01552f72009-09-27 20:43:07 -07001197 /* Try to do all error checking before this point, as unwinding
1198 * state after grabbing the PMC is difficult.
1199 */
1200 perf_event_grab_pmc();
1201 event->destroy = hw_perf_event_destroy;
1202
David S. Miller59abbd12009-09-10 06:28:20 -07001203 if (!hwc->sample_period) {
1204 hwc->sample_period = MAX_PERIOD;
1205 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001206 local64_set(&hwc->period_left, hwc->sample_period);
David S. Miller59abbd12009-09-10 06:28:20 -07001207 }
1208
David S. Miller59abbd12009-09-10 06:28:20 -07001209 return 0;
1210}
1211
Lin Minga13c3af2010-04-23 13:56:33 +08001212/*
1213 * Start group events scheduling transaction
1214 * Set the flag to make pmu::enable() not perform the
1215 * schedulability test, it will be performed at commit time
1216 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001217static void sparc_pmu_start_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001218{
1219 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1220
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001221 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001222 cpuhw->group_flag |= PERF_EVENT_TXN;
Lin Minga13c3af2010-04-23 13:56:33 +08001223}
1224
1225/*
1226 * Stop group events scheduling transaction
1227 * Clear the flag and pmu::enable() will perform the
1228 * schedulability test.
1229 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001230static void sparc_pmu_cancel_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001231{
1232 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1233
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001234 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001235 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001236}
1237
1238/*
1239 * Commit group events scheduling transaction
1240 * Perform the group schedulability test as a whole
1241 * Return 0 if success
1242 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001243static int sparc_pmu_commit_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001244{
1245 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1246 int n;
1247
1248 if (!sparc_pmu)
1249 return -EINVAL;
1250
1251 cpuc = &__get_cpu_var(cpu_hw_events);
1252 n = cpuc->n_events;
1253 if (check_excludes(cpuc->event, 0, n))
1254 return -EINVAL;
1255 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1256 return -EAGAIN;
1257
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001258 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001259 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001260 return 0;
1261}
1262
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001263static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001264 .pmu_enable = sparc_pmu_enable,
1265 .pmu_disable = sparc_pmu_disable,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001266 .event_init = sparc_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001267 .add = sparc_pmu_add,
1268 .del = sparc_pmu_del,
1269 .start = sparc_pmu_start,
1270 .stop = sparc_pmu_stop,
David S. Miller59abbd12009-09-10 06:28:20 -07001271 .read = sparc_pmu_read,
Lin Minga13c3af2010-04-23 13:56:33 +08001272 .start_txn = sparc_pmu_start_txn,
1273 .cancel_txn = sparc_pmu_cancel_txn,
1274 .commit_txn = sparc_pmu_commit_txn,
David S. Miller59abbd12009-09-10 06:28:20 -07001275};
1276
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001277void perf_event_print_debug(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001278{
1279 unsigned long flags;
1280 u64 pcr, pic;
1281 int cpu;
1282
1283 if (!sparc_pmu)
1284 return;
1285
1286 local_irq_save(flags);
1287
1288 cpu = smp_processor_id();
1289
David S. Miller09d053c2012-08-16 23:19:32 -07001290 pcr = pcr_ops->read_pcr(0);
1291 pic = pcr_ops->read_pic(0);
David S. Miller59abbd12009-09-10 06:28:20 -07001292
1293 pr_info("\n");
1294 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1295 cpu, pcr, pic);
1296
1297 local_irq_restore(flags);
1298}
1299
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001300static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
David S. Millerd29862f2009-09-28 17:37:12 -07001301 unsigned long cmd, void *__args)
David S. Miller59abbd12009-09-10 06:28:20 -07001302{
1303 struct die_args *args = __args;
1304 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001305 struct cpu_hw_events *cpuc;
David S. Miller59abbd12009-09-10 06:28:20 -07001306 struct pt_regs *regs;
David S. Millere7bef6b2010-01-20 02:59:47 -08001307 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001308
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001309 if (!atomic_read(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001310 return NOTIFY_DONE;
1311
1312 switch (cmd) {
1313 case DIE_NMI:
1314 break;
1315
1316 default:
1317 return NOTIFY_DONE;
1318 }
1319
1320 regs = args->regs;
1321
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001322 cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere04ed382010-01-04 23:16:03 -08001323
1324 /* If the PMU has the TOE IRQ enable bits, we need to do a
1325 * dummy write to the %pcr to clear the overflow bits and thus
1326 * the interrupt.
1327 *
1328 * Do this before we peek at the counters to determine
1329 * overflow so we don't lose any events.
1330 */
1331 if (sparc_pmu->irq_bit)
David S. Miller09d053c2012-08-16 23:19:32 -07001332 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Millere04ed382010-01-04 23:16:03 -08001333
David S. Millere7bef6b2010-01-20 02:59:47 -08001334 for (i = 0; i < cpuc->n_events; i++) {
1335 struct perf_event *event = cpuc->event[i];
1336 int idx = cpuc->current_idx[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001337 struct hw_perf_event *hwc;
David S. Miller59abbd12009-09-10 06:28:20 -07001338 u64 val;
1339
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001340 hwc = &event->hw;
1341 val = sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001342 if (val & (1ULL << 31))
1343 continue;
1344
Robert Richterfd0d0002012-04-02 20:19:08 +02001345 perf_sample_data_init(&data, 0, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001346 if (!sparc_perf_event_set_period(event, hwc, idx))
David S. Miller59abbd12009-09-10 06:28:20 -07001347 continue;
1348
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001349 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001350 sparc_pmu_stop(event, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001351 }
1352
1353 return NOTIFY_STOP;
1354}
1355
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001356static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1357 .notifier_call = perf_event_nmi_handler,
David S. Miller59abbd12009-09-10 06:28:20 -07001358};
1359
1360static bool __init supported_pmu(void)
1361{
David S. Miller28e8f9b2009-09-26 20:54:22 -07001362 if (!strcmp(sparc_pmu_type, "ultra3") ||
1363 !strcmp(sparc_pmu_type, "ultra3+") ||
1364 !strcmp(sparc_pmu_type, "ultra3i") ||
1365 !strcmp(sparc_pmu_type, "ultra4+")) {
1366 sparc_pmu = &ultra3_pmu;
David S. Miller59abbd12009-09-10 06:28:20 -07001367 return true;
1368 }
David S. Miller7eebda62009-09-26 21:23:41 -07001369 if (!strcmp(sparc_pmu_type, "niagara")) {
1370 sparc_pmu = &niagara1_pmu;
1371 return true;
1372 }
David S. Miller4ba991d2011-07-27 21:06:16 -07001373 if (!strcmp(sparc_pmu_type, "niagara2") ||
1374 !strcmp(sparc_pmu_type, "niagara3")) {
David S. Millerb73d8842009-09-10 07:22:18 -07001375 sparc_pmu = &niagara2_pmu;
1376 return true;
1377 }
David S. Miller59abbd12009-09-10 06:28:20 -07001378 return false;
1379}
1380
Peter Zijlstra004417a2010-11-25 18:38:29 +01001381int __init init_hw_perf_events(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001382{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001383 pr_info("Performance events: ");
David S. Miller59abbd12009-09-10 06:28:20 -07001384
1385 if (!supported_pmu()) {
1386 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001387 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001388 }
1389
1390 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1391
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001392 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001393 register_die_notifier(&perf_event_nmi_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001394
1395 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001396}
Ingo Molnarefc70d22010-12-10 00:27:23 +01001397early_initcall(init_hw_perf_events);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001398
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001399void perf_callchain_kernel(struct perf_callchain_entry *entry,
1400 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001401{
1402 unsigned long ksp, fp;
David S. Miller667f0ce2010-04-21 03:08:11 -07001403#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1404 int graph = 0;
1405#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001406
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001407 stack_trace_flush();
1408
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001409 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001410
1411 ksp = regs->u_regs[UREG_I6];
1412 fp = ksp + STACK_BIAS;
1413 do {
1414 struct sparc_stackf *sf;
1415 struct pt_regs *regs;
1416 unsigned long pc;
1417
1418 if (!kstack_valid(current_thread_info(), fp))
1419 break;
1420
1421 sf = (struct sparc_stackf *) fp;
1422 regs = (struct pt_regs *) (sf + 1);
1423
1424 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1425 if (user_mode(regs))
1426 break;
1427 pc = regs->tpc;
1428 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1429 } else {
1430 pc = sf->callers_pc;
1431 fp = (unsigned long)sf->fp + STACK_BIAS;
1432 }
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001433 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001434#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1435 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1436 int index = current->curr_ret_stack;
1437 if (current->ret_stack && index >= graph) {
1438 pc = current->ret_stack[index - graph].ret;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001439 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001440 graph++;
1441 }
1442 }
1443#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001444 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1445}
1446
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001447static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1448 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001449{
1450 unsigned long ufp;
1451
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001452 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001453
1454 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1455 do {
1456 struct sparc_stackf *usf, sf;
1457 unsigned long pc;
1458
1459 usf = (struct sparc_stackf *) ufp;
1460 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1461 break;
1462
1463 pc = sf.callers_pc;
1464 ufp = (unsigned long)sf.fp + STACK_BIAS;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001465 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001466 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1467}
1468
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001469static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1470 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001471{
1472 unsigned long ufp;
1473
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001474 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001475
David S. Miller9e8307e2010-03-29 13:08:52 -07001476 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
David S. Miller4f6dbe42010-01-19 00:26:13 -08001477 do {
1478 struct sparc_stackf32 *usf, sf;
1479 unsigned long pc;
1480
1481 usf = (struct sparc_stackf32 *) ufp;
1482 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1483 break;
1484
1485 pc = sf.callers_pc;
1486 ufp = (unsigned long)sf.fp;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001487 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001488 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1489}
1490
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001491void
1492perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001493{
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001494 flushw_user();
1495 if (test_thread_flag(TIF_32BIT))
1496 perf_callchain_user_32(entry, regs);
1497 else
1498 perf_callchain_user_64(entry, regs);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001499}