blob: cf4ce263ff81b44886c4297cf5c246e80216aea4 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/* Performance event support for sparc64.
David S. Miller59abbd12009-09-10 06:28:20 -07002 *
David S. Miller4f6dbe42010-01-19 00:26:13 -08003 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
David S. Miller59abbd12009-09-10 06:28:20 -07004 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * This code is based almost entirely upon the x86 perf event
David S. Miller59abbd12009-09-10 06:28:20 -07006 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
David S. Miller59abbd12009-09-10 06:28:20 -070016#include <linux/kprobes.h>
17#include <linux/kernel.h>
18#include <linux/kdebug.h>
19#include <linux/mutex.h>
20
David S. Miller4f6dbe42010-01-19 00:26:13 -080021#include <asm/stacktrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070022#include <asm/cpudata.h>
David S. Miller4f6dbe42010-01-19 00:26:13 -080023#include <asm/uaccess.h>
David S. Miller59abbd12009-09-10 06:28:20 -070024#include <asm/atomic.h>
25#include <asm/nmi.h>
26#include <asm/pcr.h>
27
David S. Miller4f6dbe42010-01-19 00:26:13 -080028#include "kstack.h"
29
David S. Miller59abbd12009-09-10 06:28:20 -070030/* Sparc64 chips have two performance counters, 32-bits each, with
31 * overflow interrupts generated on transition from 0xffffffff to 0.
32 * The counters are accessed in one go using a 64-bit register.
33 *
34 * Both counters are controlled using a single control register. The
35 * only way to stop all sampling is to clear all of the context (user,
36 * supervisor, hypervisor) sampling enable bits. But these bits apply
37 * to both counters, thus the two counters can't be enabled/disabled
38 * individually.
39 *
40 * The control register has two event fields, one for each of the two
41 * counters. It's thus nearly impossible to have one counter going
42 * while keeping the other one stopped. Therefore it is possible to
43 * get overflow interrupts for counters not currently "in use" and
44 * that condition must be checked in the overflow interrupt handler.
45 *
46 * So we use a hack, in that we program inactive counters with the
47 * "sw_count0" and "sw_count1" events. These count how many times
48 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
49 * unusual way to encode a NOP and therefore will not trigger in
50 * normal code.
51 */
52
Ingo Molnarcdd6c482009-09-21 12:02:48 +020053#define MAX_HWEVENTS 2
David S. Miller59abbd12009-09-10 06:28:20 -070054#define MAX_PERIOD ((1UL << 32) - 1)
55
56#define PIC_UPPER_INDEX 0
57#define PIC_LOWER_INDEX 1
David S. Millere7bef6b2010-01-20 02:59:47 -080058#define PIC_NO_INDEX -1
David S. Miller59abbd12009-09-10 06:28:20 -070059
Ingo Molnarcdd6c482009-09-21 12:02:48 +020060struct cpu_hw_events {
David S. Millere7bef6b2010-01-20 02:59:47 -080061 /* Number of events currently scheduled onto this cpu.
62 * This tells how many entries in the arrays below
63 * are valid.
64 */
65 int n_events;
66
67 /* Number of new events added since the last hw_perf_disable().
68 * This works because the perf event layer always adds new
69 * events inside of a perf_{disable,enable}() sequence.
70 */
71 int n_added;
72
73 /* Array of events current scheduled on this cpu. */
74 struct perf_event *event[MAX_HWEVENTS];
75
76 /* Array of encoded longs, specifying the %pcr register
77 * encoding and the mask of PIC counters this even can
78 * be scheduled on. See perf_event_encode() et al.
79 */
80 unsigned long events[MAX_HWEVENTS];
81
82 /* The current counter index assigned to an event. When the
83 * event hasn't been programmed into the cpu yet, this will
84 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
85 * we ought to schedule the event.
86 */
87 int current_idx[MAX_HWEVENTS];
88
89 /* Software copy of %pcr register on this cpu. */
David S. Millerd1751382009-09-29 21:27:06 -070090 u64 pcr;
David S. Millere7bef6b2010-01-20 02:59:47 -080091
92 /* Enabled/disable state. */
David S. Millerd1751382009-09-29 21:27:06 -070093 int enabled;
Lin Minga13c3af2010-04-23 13:56:33 +080094
95 unsigned int group_flag;
David S. Miller59abbd12009-09-10 06:28:20 -070096};
Ingo Molnarcdd6c482009-09-21 12:02:48 +020097DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
David S. Miller59abbd12009-09-10 06:28:20 -070098
David S. Millere7bef6b2010-01-20 02:59:47 -080099/* An event map describes the characteristics of a performance
100 * counter event. In particular it gives the encoding as well as
101 * a mask telling which counters the event can be measured on.
102 */
David S. Miller59abbd12009-09-10 06:28:20 -0700103struct perf_event_map {
104 u16 encoding;
105 u8 pic_mask;
106#define PIC_NONE 0x00
107#define PIC_UPPER 0x01
108#define PIC_LOWER 0x02
109};
110
David S. Millere7bef6b2010-01-20 02:59:47 -0800111/* Encode a perf_event_map entry into a long. */
David S. Millera72a8a52009-09-28 17:35:20 -0700112static unsigned long perf_event_encode(const struct perf_event_map *pmap)
113{
114 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
115}
116
David S. Millere7bef6b2010-01-20 02:59:47 -0800117static u8 perf_event_get_msk(unsigned long val)
David S. Millera72a8a52009-09-28 17:35:20 -0700118{
David S. Millere7bef6b2010-01-20 02:59:47 -0800119 return val & 0xff;
120}
121
122static u64 perf_event_get_enc(unsigned long val)
123{
124 return val >> 16;
David S. Millera72a8a52009-09-28 17:35:20 -0700125}
126
David S. Miller2ce4da22009-09-26 20:42:10 -0700127#define C(x) PERF_COUNT_HW_CACHE_##x
128
129#define CACHE_OP_UNSUPPORTED 0xfffe
130#define CACHE_OP_NONSENSE 0xffff
131
132typedef struct perf_event_map cache_map_t
133 [PERF_COUNT_HW_CACHE_MAX]
134 [PERF_COUNT_HW_CACHE_OP_MAX]
135 [PERF_COUNT_HW_CACHE_RESULT_MAX];
136
David S. Miller59abbd12009-09-10 06:28:20 -0700137struct sparc_pmu {
138 const struct perf_event_map *(*event_map)(int);
David S. Miller2ce4da22009-09-26 20:42:10 -0700139 const cache_map_t *cache_map;
David S. Miller59abbd12009-09-10 06:28:20 -0700140 int max_events;
141 int upper_shift;
142 int lower_shift;
143 int event_mask;
David S. Miller91b92862009-09-10 07:09:06 -0700144 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -0700145 int irq_bit;
David S. Miller660d1372009-09-10 07:13:26 -0700146 int upper_nop;
147 int lower_nop;
David S. Miller59abbd12009-09-10 06:28:20 -0700148};
149
David S. Miller28e8f9b2009-09-26 20:54:22 -0700150static const struct perf_event_map ultra3_perfmon_event_map[] = {
David S. Miller59abbd12009-09-10 06:28:20 -0700151 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
152 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
153 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
154 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
155};
156
David S. Miller28e8f9b2009-09-26 20:54:22 -0700157static const struct perf_event_map *ultra3_event_map(int event_id)
David S. Miller59abbd12009-09-10 06:28:20 -0700158{
David S. Miller28e8f9b2009-09-26 20:54:22 -0700159 return &ultra3_perfmon_event_map[event_id];
David S. Miller59abbd12009-09-10 06:28:20 -0700160}
161
David S. Miller28e8f9b2009-09-26 20:54:22 -0700162static const cache_map_t ultra3_cache_map = {
David S. Miller2ce4da22009-09-26 20:42:10 -0700163[C(L1D)] = {
164 [C(OP_READ)] = {
165 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
166 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
167 },
168 [C(OP_WRITE)] = {
169 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
170 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
171 },
172 [C(OP_PREFETCH)] = {
173 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
174 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
175 },
176},
177[C(L1I)] = {
178 [C(OP_READ)] = {
179 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
180 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
181 },
182 [ C(OP_WRITE) ] = {
183 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
184 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
185 },
186 [ C(OP_PREFETCH) ] = {
187 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
188 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
189 },
190},
191[C(LL)] = {
192 [C(OP_READ)] = {
193 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
194 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
195 },
196 [C(OP_WRITE)] = {
197 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
198 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
199 },
200 [C(OP_PREFETCH)] = {
201 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
202 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
203 },
204},
205[C(DTLB)] = {
206 [C(OP_READ)] = {
207 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
208 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
209 },
210 [ C(OP_WRITE) ] = {
211 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
212 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
213 },
214 [ C(OP_PREFETCH) ] = {
215 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
216 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
217 },
218},
219[C(ITLB)] = {
220 [C(OP_READ)] = {
221 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
222 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
223 },
224 [ C(OP_WRITE) ] = {
225 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
226 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
227 },
228 [ C(OP_PREFETCH) ] = {
229 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
230 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
231 },
232},
233[C(BPU)] = {
234 [C(OP_READ)] = {
235 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
236 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
237 },
238 [ C(OP_WRITE) ] = {
239 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
240 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
241 },
242 [ C(OP_PREFETCH) ] = {
243 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
244 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
245 },
246},
247};
248
David S. Miller28e8f9b2009-09-26 20:54:22 -0700249static const struct sparc_pmu ultra3_pmu = {
250 .event_map = ultra3_event_map,
251 .cache_map = &ultra3_cache_map,
252 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
David S. Miller59abbd12009-09-10 06:28:20 -0700253 .upper_shift = 11,
254 .lower_shift = 4,
255 .event_mask = 0x3f,
David S. Miller660d1372009-09-10 07:13:26 -0700256 .upper_nop = 0x1c,
257 .lower_nop = 0x14,
David S. Miller59abbd12009-09-10 06:28:20 -0700258};
259
David S. Miller7eebda62009-09-26 21:23:41 -0700260/* Niagara1 is very limited. The upper PIC is hard-locked to count
261 * only instructions, so it is free running which creates all kinds of
David S. Miller6e804252009-09-29 15:10:23 -0700262 * problems. Some hardware designs make one wonder if the creator
David S. Miller7eebda62009-09-26 21:23:41 -0700263 * even looked at how this stuff gets used by software.
264 */
265static const struct perf_event_map niagara1_perfmon_event_map[] = {
266 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
267 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
268 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
269 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
270};
271
272static const struct perf_event_map *niagara1_event_map(int event_id)
273{
274 return &niagara1_perfmon_event_map[event_id];
275}
276
277static const cache_map_t niagara1_cache_map = {
278[C(L1D)] = {
279 [C(OP_READ)] = {
280 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
281 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
282 },
283 [C(OP_WRITE)] = {
284 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
285 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
286 },
287 [C(OP_PREFETCH)] = {
288 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
289 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
290 },
291},
292[C(L1I)] = {
293 [C(OP_READ)] = {
294 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
295 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
296 },
297 [ C(OP_WRITE) ] = {
298 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
299 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
300 },
301 [ C(OP_PREFETCH) ] = {
302 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
303 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
304 },
305},
306[C(LL)] = {
307 [C(OP_READ)] = {
308 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
309 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
310 },
311 [C(OP_WRITE)] = {
312 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
313 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
314 },
315 [C(OP_PREFETCH)] = {
316 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
317 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
318 },
319},
320[C(DTLB)] = {
321 [C(OP_READ)] = {
322 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
323 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
324 },
325 [ C(OP_WRITE) ] = {
326 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
327 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
328 },
329 [ C(OP_PREFETCH) ] = {
330 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
331 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
332 },
333},
334[C(ITLB)] = {
335 [C(OP_READ)] = {
336 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
337 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
338 },
339 [ C(OP_WRITE) ] = {
340 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
341 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
342 },
343 [ C(OP_PREFETCH) ] = {
344 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
345 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
346 },
347},
348[C(BPU)] = {
349 [C(OP_READ)] = {
350 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
351 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
352 },
353 [ C(OP_WRITE) ] = {
354 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
355 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
356 },
357 [ C(OP_PREFETCH) ] = {
358 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
359 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
360 },
361},
362};
363
364static const struct sparc_pmu niagara1_pmu = {
365 .event_map = niagara1_event_map,
366 .cache_map = &niagara1_cache_map,
367 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
368 .upper_shift = 0,
369 .lower_shift = 4,
370 .event_mask = 0x7,
371 .upper_nop = 0x0,
372 .lower_nop = 0x0,
373};
374
David S. Millerb73d8842009-09-10 07:22:18 -0700375static const struct perf_event_map niagara2_perfmon_event_map[] = {
376 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
377 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
378 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
379 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
380 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
381 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
382};
383
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200384static const struct perf_event_map *niagara2_event_map(int event_id)
David S. Millerb73d8842009-09-10 07:22:18 -0700385{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200386 return &niagara2_perfmon_event_map[event_id];
David S. Millerb73d8842009-09-10 07:22:18 -0700387}
388
David S. Millerd0b86482009-09-26 21:04:16 -0700389static const cache_map_t niagara2_cache_map = {
390[C(L1D)] = {
391 [C(OP_READ)] = {
392 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
393 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
394 },
395 [C(OP_WRITE)] = {
396 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
397 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
398 },
399 [C(OP_PREFETCH)] = {
400 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
401 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
402 },
403},
404[C(L1I)] = {
405 [C(OP_READ)] = {
406 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
407 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
408 },
409 [ C(OP_WRITE) ] = {
410 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
411 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
412 },
413 [ C(OP_PREFETCH) ] = {
414 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
415 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
416 },
417},
418[C(LL)] = {
419 [C(OP_READ)] = {
420 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
421 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
422 },
423 [C(OP_WRITE)] = {
424 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
425 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
426 },
427 [C(OP_PREFETCH)] = {
428 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
429 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
430 },
431},
432[C(DTLB)] = {
433 [C(OP_READ)] = {
434 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
435 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
436 },
437 [ C(OP_WRITE) ] = {
438 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
439 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
440 },
441 [ C(OP_PREFETCH) ] = {
442 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
443 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
444 },
445},
446[C(ITLB)] = {
447 [C(OP_READ)] = {
448 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
449 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
450 },
451 [ C(OP_WRITE) ] = {
452 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
453 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
454 },
455 [ C(OP_PREFETCH) ] = {
456 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
457 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
458 },
459},
460[C(BPU)] = {
461 [C(OP_READ)] = {
462 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
463 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
464 },
465 [ C(OP_WRITE) ] = {
466 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
467 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
468 },
469 [ C(OP_PREFETCH) ] = {
470 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
471 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
472 },
473},
474};
475
David S. Millerb73d8842009-09-10 07:22:18 -0700476static const struct sparc_pmu niagara2_pmu = {
477 .event_map = niagara2_event_map,
David S. Millerd0b86482009-09-26 21:04:16 -0700478 .cache_map = &niagara2_cache_map,
David S. Millerb73d8842009-09-10 07:22:18 -0700479 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
480 .upper_shift = 19,
481 .lower_shift = 6,
482 .event_mask = 0xfff,
483 .hv_bit = 0x8,
David S. Millerde23cf32009-10-09 00:42:40 -0700484 .irq_bit = 0x30,
David S. Millerb73d8842009-09-10 07:22:18 -0700485 .upper_nop = 0x220,
486 .lower_nop = 0x220,
487};
488
David S. Miller59abbd12009-09-10 06:28:20 -0700489static const struct sparc_pmu *sparc_pmu __read_mostly;
490
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200491static u64 event_encoding(u64 event_id, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700492{
493 if (idx == PIC_UPPER_INDEX)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200494 event_id <<= sparc_pmu->upper_shift;
David S. Miller59abbd12009-09-10 06:28:20 -0700495 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200496 event_id <<= sparc_pmu->lower_shift;
497 return event_id;
David S. Miller59abbd12009-09-10 06:28:20 -0700498}
499
500static u64 mask_for_index(int idx)
501{
502 return event_encoding(sparc_pmu->event_mask, idx);
503}
504
505static u64 nop_for_index(int idx)
506{
507 return event_encoding(idx == PIC_UPPER_INDEX ?
David S. Miller660d1372009-09-10 07:13:26 -0700508 sparc_pmu->upper_nop :
509 sparc_pmu->lower_nop, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700510}
511
David S. Millerd1751382009-09-29 21:27:06 -0700512static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700513{
514 u64 val, mask = mask_for_index(idx);
515
David S. Millerd1751382009-09-29 21:27:06 -0700516 val = cpuc->pcr;
517 val &= ~mask;
518 val |= hwc->config;
519 cpuc->pcr = val;
520
521 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700522}
523
David S. Millerd1751382009-09-29 21:27:06 -0700524static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700525{
526 u64 mask = mask_for_index(idx);
527 u64 nop = nop_for_index(idx);
David S. Millerd1751382009-09-29 21:27:06 -0700528 u64 val;
David S. Miller59abbd12009-09-10 06:28:20 -0700529
David S. Millerd1751382009-09-29 21:27:06 -0700530 val = cpuc->pcr;
531 val &= ~mask;
532 val |= nop;
533 cpuc->pcr = val;
534
535 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700536}
537
David S. Miller59abbd12009-09-10 06:28:20 -0700538static u32 read_pmc(int idx)
539{
540 u64 val;
541
542 read_pic(val);
543 if (idx == PIC_UPPER_INDEX)
544 val >>= 32;
545
546 return val & 0xffffffff;
547}
548
549static void write_pmc(int idx, u64 val)
550{
551 u64 shift, mask, pic;
552
553 shift = 0;
554 if (idx == PIC_UPPER_INDEX)
555 shift = 32;
556
557 mask = ((u64) 0xffffffff) << shift;
558 val <<= shift;
559
560 read_pic(pic);
561 pic &= ~mask;
562 pic |= val;
563 write_pic(pic);
564}
565
David S. Millere7bef6b2010-01-20 02:59:47 -0800566static u64 sparc_perf_event_update(struct perf_event *event,
567 struct hw_perf_event *hwc, int idx)
568{
569 int shift = 64 - 32;
570 u64 prev_raw_count, new_raw_count;
571 s64 delta;
572
573again:
574 prev_raw_count = atomic64_read(&hwc->prev_count);
575 new_raw_count = read_pmc(idx);
576
577 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
578 new_raw_count) != prev_raw_count)
579 goto again;
580
581 delta = (new_raw_count << shift) - (prev_raw_count << shift);
582 delta >>= shift;
583
584 atomic64_add(delta, &event->count);
585 atomic64_sub(delta, &hwc->period_left);
586
587 return new_raw_count;
588}
589
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200590static int sparc_perf_event_set_period(struct perf_event *event,
David S. Millerd29862f2009-09-28 17:37:12 -0700591 struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700592{
593 s64 left = atomic64_read(&hwc->period_left);
594 s64 period = hwc->sample_period;
595 int ret = 0;
596
597 if (unlikely(left <= -period)) {
598 left = period;
599 atomic64_set(&hwc->period_left, left);
600 hwc->last_period = period;
601 ret = 1;
602 }
603
604 if (unlikely(left <= 0)) {
605 left += period;
606 atomic64_set(&hwc->period_left, left);
607 hwc->last_period = period;
608 ret = 1;
609 }
610 if (left > MAX_PERIOD)
611 left = MAX_PERIOD;
612
613 atomic64_set(&hwc->prev_count, (u64)-left);
614
615 write_pmc(idx, (u64)(-left) & 0xffffffff);
616
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200617 perf_event_update_userpage(event);
David S. Miller59abbd12009-09-10 06:28:20 -0700618
619 return ret;
620}
621
David S. Millere7bef6b2010-01-20 02:59:47 -0800622/* If performance event entries have been added, move existing
623 * events around (if necessary) and then assign new entries to
624 * counters.
625 */
626static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
David S. Miller59abbd12009-09-10 06:28:20 -0700627{
David S. Millere7bef6b2010-01-20 02:59:47 -0800628 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700629
David S. Millere7bef6b2010-01-20 02:59:47 -0800630 if (!cpuc->n_added)
631 goto out;
David S. Miller59abbd12009-09-10 06:28:20 -0700632
David S. Millere7bef6b2010-01-20 02:59:47 -0800633 /* Read in the counters which are moving. */
634 for (i = 0; i < cpuc->n_events; i++) {
635 struct perf_event *cp = cpuc->event[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700636
David S. Millere7bef6b2010-01-20 02:59:47 -0800637 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
638 cpuc->current_idx[i] != cp->hw.idx) {
639 sparc_perf_event_update(cp, &cp->hw,
640 cpuc->current_idx[i]);
641 cpuc->current_idx[i] = PIC_NO_INDEX;
642 }
643 }
David S. Miller59abbd12009-09-10 06:28:20 -0700644
David S. Millere7bef6b2010-01-20 02:59:47 -0800645 /* Assign to counters all unassigned events. */
646 for (i = 0; i < cpuc->n_events; i++) {
647 struct perf_event *cp = cpuc->event[i];
648 struct hw_perf_event *hwc = &cp->hw;
649 int idx = hwc->idx;
650 u64 enc;
651
652 if (cpuc->current_idx[i] != PIC_NO_INDEX)
653 continue;
654
655 sparc_perf_event_set_period(cp, hwc, idx);
656 cpuc->current_idx[i] = idx;
657
658 enc = perf_event_get_enc(cpuc->events[i]);
659 pcr |= event_encoding(enc, idx);
660 }
661out:
662 return pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700663}
664
David S. Millere7bef6b2010-01-20 02:59:47 -0800665void hw_perf_enable(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700666{
David S. Millere7bef6b2010-01-20 02:59:47 -0800667 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
668 u64 pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700669
David S. Millere7bef6b2010-01-20 02:59:47 -0800670 if (cpuc->enabled)
671 return;
David S. Miller59abbd12009-09-10 06:28:20 -0700672
David S. Millere7bef6b2010-01-20 02:59:47 -0800673 cpuc->enabled = 1;
674 barrier();
David S. Miller59abbd12009-09-10 06:28:20 -0700675
David S. Millere7bef6b2010-01-20 02:59:47 -0800676 pcr = cpuc->pcr;
677 if (!cpuc->n_events) {
678 pcr = 0;
679 } else {
680 pcr = maybe_change_configuration(cpuc, pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700681
David S. Millere7bef6b2010-01-20 02:59:47 -0800682 /* We require that all of the events have the same
683 * configuration, so just fetch the settings from the
684 * first entry.
685 */
686 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
687 }
David S. Miller59abbd12009-09-10 06:28:20 -0700688
David S. Millere7bef6b2010-01-20 02:59:47 -0800689 pcr_ops->write(cpuc->pcr);
690}
691
692void hw_perf_disable(void)
693{
694 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
695 u64 val;
696
697 if (!cpuc->enabled)
698 return;
699
700 cpuc->enabled = 0;
701 cpuc->n_added = 0;
702
703 val = cpuc->pcr;
704 val &= ~(PCR_UTRACE | PCR_STRACE |
705 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
706 cpuc->pcr = val;
707
708 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700709}
710
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200711static void sparc_pmu_disable(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700712{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200713 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
714 struct hw_perf_event *hwc = &event->hw;
David S. Millere7bef6b2010-01-20 02:59:47 -0800715 unsigned long flags;
716 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700717
David S. Millere7bef6b2010-01-20 02:59:47 -0800718 local_irq_save(flags);
719 perf_disable();
David S. Miller59abbd12009-09-10 06:28:20 -0700720
David S. Millere7bef6b2010-01-20 02:59:47 -0800721 for (i = 0; i < cpuc->n_events; i++) {
722 if (event == cpuc->event[i]) {
723 int idx = cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700724
David S. Millere7bef6b2010-01-20 02:59:47 -0800725 /* Shift remaining entries down into
726 * the existing slot.
727 */
728 while (++i < cpuc->n_events) {
729 cpuc->event[i - 1] = cpuc->event[i];
730 cpuc->events[i - 1] = cpuc->events[i];
731 cpuc->current_idx[i - 1] =
732 cpuc->current_idx[i];
733 }
David S. Miller59abbd12009-09-10 06:28:20 -0700734
David S. Millere7bef6b2010-01-20 02:59:47 -0800735 /* Absorb the final count and turn off the
736 * event.
737 */
738 sparc_pmu_disable_event(cpuc, hwc, idx);
739 barrier();
740 sparc_perf_event_update(event, hwc, idx);
741
742 perf_event_update_userpage(event);
743
744 cpuc->n_events--;
745 break;
746 }
747 }
748
749 perf_enable();
750 local_irq_restore(flags);
751}
752
753static int active_event_index(struct cpu_hw_events *cpuc,
754 struct perf_event *event)
755{
756 int i;
757
758 for (i = 0; i < cpuc->n_events; i++) {
759 if (cpuc->event[i] == event)
760 break;
761 }
762 BUG_ON(i == cpuc->n_events);
763 return cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700764}
765
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200766static void sparc_pmu_read(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700767{
David S. Millere7bef6b2010-01-20 02:59:47 -0800768 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
769 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200770 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700771
David S. Millere7bef6b2010-01-20 02:59:47 -0800772 sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700773}
774
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200775static void sparc_pmu_unthrottle(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700776{
David S. Millerd1751382009-09-29 21:27:06 -0700777 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere7bef6b2010-01-20 02:59:47 -0800778 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200779 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700780
David S. Millere7bef6b2010-01-20 02:59:47 -0800781 sparc_pmu_enable_event(cpuc, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700782}
783
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200784static atomic_t active_events = ATOMIC_INIT(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700785static DEFINE_MUTEX(pmc_grab_mutex);
786
David S. Millerd1751382009-09-29 21:27:06 -0700787static void perf_stop_nmi_watchdog(void *unused)
788{
789 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
790
791 stop_nmi_watchdog(NULL);
792 cpuc->pcr = pcr_ops->read();
793}
794
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200795void perf_event_grab_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700796{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200797 if (atomic_inc_not_zero(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -0700798 return;
799
800 mutex_lock(&pmc_grab_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200801 if (atomic_read(&active_events) == 0) {
David S. Miller59abbd12009-09-10 06:28:20 -0700802 if (atomic_read(&nmi_active) > 0) {
David S. Millerd1751382009-09-29 21:27:06 -0700803 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
David S. Miller59abbd12009-09-10 06:28:20 -0700804 BUG_ON(atomic_read(&nmi_active) != 0);
805 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200806 atomic_inc(&active_events);
David S. Miller59abbd12009-09-10 06:28:20 -0700807 }
808 mutex_unlock(&pmc_grab_mutex);
809}
810
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200811void perf_event_release_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700812{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200813 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
David S. Miller59abbd12009-09-10 06:28:20 -0700814 if (atomic_read(&nmi_active) == 0)
815 on_each_cpu(start_nmi_watchdog, NULL, 1);
816 mutex_unlock(&pmc_grab_mutex);
817 }
818}
819
David S. Miller2ce4da22009-09-26 20:42:10 -0700820static const struct perf_event_map *sparc_map_cache_event(u64 config)
821{
822 unsigned int cache_type, cache_op, cache_result;
823 const struct perf_event_map *pmap;
824
825 if (!sparc_pmu->cache_map)
826 return ERR_PTR(-ENOENT);
827
828 cache_type = (config >> 0) & 0xff;
829 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
830 return ERR_PTR(-EINVAL);
831
832 cache_op = (config >> 8) & 0xff;
833 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
834 return ERR_PTR(-EINVAL);
835
836 cache_result = (config >> 16) & 0xff;
837 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
838 return ERR_PTR(-EINVAL);
839
840 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
841
842 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
843 return ERR_PTR(-ENOENT);
844
845 if (pmap->encoding == CACHE_OP_NONSENSE)
846 return ERR_PTR(-EINVAL);
847
848 return pmap;
849}
850
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200851static void hw_perf_event_destroy(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700852{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200853 perf_event_release_pmc();
David S. Miller59abbd12009-09-10 06:28:20 -0700854}
855
David S. Millera72a8a52009-09-28 17:35:20 -0700856/* Make sure all events can be scheduled into the hardware at
857 * the same time. This is simplified by the fact that we only
858 * need to support 2 simultaneous HW events.
David S. Millere7bef6b2010-01-20 02:59:47 -0800859 *
860 * As a side effect, the evts[]->hw.idx values will be assigned
861 * on success. These are pending indexes. When the events are
862 * actually programmed into the chip, these values will propagate
863 * to the per-cpu cpuc->current_idx[] slots, see the code in
864 * maybe_change_configuration() for details.
David S. Millera72a8a52009-09-28 17:35:20 -0700865 */
David S. Millere7bef6b2010-01-20 02:59:47 -0800866static int sparc_check_constraints(struct perf_event **evts,
867 unsigned long *events, int n_ev)
David S. Millera72a8a52009-09-28 17:35:20 -0700868{
David S. Millere7bef6b2010-01-20 02:59:47 -0800869 u8 msk0 = 0, msk1 = 0;
870 int idx0 = 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700871
David S. Millere7bef6b2010-01-20 02:59:47 -0800872 /* This case is possible when we are invoked from
873 * hw_perf_group_sched_in().
874 */
875 if (!n_ev)
876 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700877
David S. Millere7bef6b2010-01-20 02:59:47 -0800878 if (n_ev > perf_max_events)
879 return -1;
David S. Millera72a8a52009-09-28 17:35:20 -0700880
David S. Millere7bef6b2010-01-20 02:59:47 -0800881 msk0 = perf_event_get_msk(events[0]);
882 if (n_ev == 1) {
883 if (msk0 & PIC_LOWER)
884 idx0 = 1;
885 goto success;
886 }
887 BUG_ON(n_ev != 2);
888 msk1 = perf_event_get_msk(events[1]);
David S. Millera72a8a52009-09-28 17:35:20 -0700889
David S. Millere7bef6b2010-01-20 02:59:47 -0800890 /* If both events can go on any counter, OK. */
891 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
892 msk1 == (PIC_UPPER | PIC_LOWER))
893 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700894
David S. Millere7bef6b2010-01-20 02:59:47 -0800895 /* If one event is limited to a specific counter,
896 * and the other can go on both, OK.
897 */
898 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
899 msk1 == (PIC_UPPER | PIC_LOWER)) {
900 if (msk0 & PIC_LOWER)
901 idx0 = 1;
902 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700903 }
904
David S. Millere7bef6b2010-01-20 02:59:47 -0800905 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
906 msk0 == (PIC_UPPER | PIC_LOWER)) {
907 if (msk1 & PIC_UPPER)
908 idx0 = 1;
909 goto success;
910 }
911
912 /* If the events are fixed to different counters, OK. */
913 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
914 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
915 if (msk0 & PIC_LOWER)
916 idx0 = 1;
917 goto success;
918 }
919
920 /* Otherwise, there is a conflict. */
David S. Millera72a8a52009-09-28 17:35:20 -0700921 return -1;
David S. Millere7bef6b2010-01-20 02:59:47 -0800922
923success:
924 evts[0]->hw.idx = idx0;
925 if (n_ev == 2)
926 evts[1]->hw.idx = idx0 ^ 1;
927 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700928}
929
David S. Miller01552f72009-09-27 20:43:07 -0700930static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
931{
932 int eu = 0, ek = 0, eh = 0;
933 struct perf_event *event;
934 int i, n, first;
935
936 n = n_prev + n_new;
937 if (n <= 1)
938 return 0;
939
940 first = 1;
941 for (i = 0; i < n; i++) {
942 event = evts[i];
943 if (first) {
944 eu = event->attr.exclude_user;
945 ek = event->attr.exclude_kernel;
946 eh = event->attr.exclude_hv;
947 first = 0;
948 } else if (event->attr.exclude_user != eu ||
949 event->attr.exclude_kernel != ek ||
950 event->attr.exclude_hv != eh) {
951 return -EAGAIN;
952 }
953 }
954
955 return 0;
956}
957
958static int collect_events(struct perf_event *group, int max_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800959 struct perf_event *evts[], unsigned long *events,
960 int *current_idx)
David S. Miller01552f72009-09-27 20:43:07 -0700961{
962 struct perf_event *event;
963 int n = 0;
964
965 if (!is_software_event(group)) {
966 if (n >= max_count)
967 return -1;
968 evts[n] = group;
David S. Millere7bef6b2010-01-20 02:59:47 -0800969 events[n] = group->hw.event_base;
970 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -0700971 }
972 list_for_each_entry(event, &group->sibling_list, group_entry) {
973 if (!is_software_event(event) &&
974 event->state != PERF_EVENT_STATE_OFF) {
975 if (n >= max_count)
976 return -1;
977 evts[n] = event;
David S. Millere7bef6b2010-01-20 02:59:47 -0800978 events[n] = event->hw.event_base;
979 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -0700980 }
981 }
982 return n;
983}
984
David S. Millere7bef6b2010-01-20 02:59:47 -0800985static int sparc_pmu_enable(struct perf_event *event)
986{
987 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
988 int n0, ret = -EAGAIN;
989 unsigned long flags;
990
991 local_irq_save(flags);
992 perf_disable();
993
994 n0 = cpuc->n_events;
995 if (n0 >= perf_max_events)
996 goto out;
997
998 cpuc->event[n0] = event;
999 cpuc->events[n0] = event->hw.event_base;
1000 cpuc->current_idx[n0] = PIC_NO_INDEX;
1001
Lin Minga13c3af2010-04-23 13:56:33 +08001002 /*
1003 * If group events scheduling transaction was started,
1004 * skip the schedulability test here, it will be peformed
1005 * at commit time(->commit_txn) as a whole
1006 */
1007 if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
1008 goto nocheck;
1009
David S. Millere7bef6b2010-01-20 02:59:47 -08001010 if (check_excludes(cpuc->event, n0, 1))
1011 goto out;
1012 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1013 goto out;
1014
Lin Minga13c3af2010-04-23 13:56:33 +08001015nocheck:
David S. Millere7bef6b2010-01-20 02:59:47 -08001016 cpuc->n_events++;
1017 cpuc->n_added++;
1018
1019 ret = 0;
1020out:
1021 perf_enable();
1022 local_irq_restore(flags);
1023 return ret;
1024}
1025
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001026static int __hw_perf_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001027{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001028 struct perf_event_attr *attr = &event->attr;
David S. Miller01552f72009-09-27 20:43:07 -07001029 struct perf_event *evts[MAX_HWEVENTS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001030 struct hw_perf_event *hwc = &event->hw;
David S. Millera72a8a52009-09-28 17:35:20 -07001031 unsigned long events[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -08001032 int current_idx_dmy[MAX_HWEVENTS];
David S. Miller59abbd12009-09-10 06:28:20 -07001033 const struct perf_event_map *pmap;
David S. Miller01552f72009-09-27 20:43:07 -07001034 int n;
David S. Miller59abbd12009-09-10 06:28:20 -07001035
1036 if (atomic_read(&nmi_active) < 0)
1037 return -ENODEV;
1038
David S. Miller2ce4da22009-09-26 20:42:10 -07001039 if (attr->type == PERF_TYPE_HARDWARE) {
1040 if (attr->config >= sparc_pmu->max_events)
1041 return -EINVAL;
1042 pmap = sparc_pmu->event_map(attr->config);
1043 } else if (attr->type == PERF_TYPE_HW_CACHE) {
1044 pmap = sparc_map_cache_event(attr->config);
1045 if (IS_ERR(pmap))
1046 return PTR_ERR(pmap);
1047 } else
David S. Miller59abbd12009-09-10 06:28:20 -07001048 return -EOPNOTSUPP;
1049
David S. Millere7bef6b2010-01-20 02:59:47 -08001050 /* We save the enable bits in the config_base. */
David S. Miller496c07e2009-09-10 07:10:59 -07001051 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001052 if (!attr->exclude_user)
1053 hwc->config_base |= PCR_UTRACE;
1054 if (!attr->exclude_kernel)
1055 hwc->config_base |= PCR_STRACE;
David S. Miller91b92862009-09-10 07:09:06 -07001056 if (!attr->exclude_hv)
1057 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001058
David S. Millera72a8a52009-09-28 17:35:20 -07001059 hwc->event_base = perf_event_encode(pmap);
1060
David S. Miller01552f72009-09-27 20:43:07 -07001061 n = 0;
1062 if (event->group_leader != event) {
1063 n = collect_events(event->group_leader,
1064 perf_max_events - 1,
David S. Millere7bef6b2010-01-20 02:59:47 -08001065 evts, events, current_idx_dmy);
David S. Miller01552f72009-09-27 20:43:07 -07001066 if (n < 0)
1067 return -EINVAL;
1068 }
David S. Millera72a8a52009-09-28 17:35:20 -07001069 events[n] = hwc->event_base;
David S. Miller01552f72009-09-27 20:43:07 -07001070 evts[n] = event;
1071
1072 if (check_excludes(evts, n, 1))
1073 return -EINVAL;
1074
David S. Millere7bef6b2010-01-20 02:59:47 -08001075 if (sparc_check_constraints(evts, events, n + 1))
David S. Millera72a8a52009-09-28 17:35:20 -07001076 return -EINVAL;
1077
David S. Millere7bef6b2010-01-20 02:59:47 -08001078 hwc->idx = PIC_NO_INDEX;
1079
David S. Miller01552f72009-09-27 20:43:07 -07001080 /* Try to do all error checking before this point, as unwinding
1081 * state after grabbing the PMC is difficult.
1082 */
1083 perf_event_grab_pmc();
1084 event->destroy = hw_perf_event_destroy;
1085
David S. Miller59abbd12009-09-10 06:28:20 -07001086 if (!hwc->sample_period) {
1087 hwc->sample_period = MAX_PERIOD;
1088 hwc->last_period = hwc->sample_period;
1089 atomic64_set(&hwc->period_left, hwc->sample_period);
1090 }
1091
David S. Miller59abbd12009-09-10 06:28:20 -07001092 return 0;
1093}
1094
Lin Minga13c3af2010-04-23 13:56:33 +08001095/*
1096 * Start group events scheduling transaction
1097 * Set the flag to make pmu::enable() not perform the
1098 * schedulability test, it will be performed at commit time
1099 */
1100static void sparc_pmu_start_txn(const struct pmu *pmu)
1101{
1102 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1103
1104 cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
1105}
1106
1107/*
1108 * Stop group events scheduling transaction
1109 * Clear the flag and pmu::enable() will perform the
1110 * schedulability test.
1111 */
1112static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1113{
1114 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1115
1116 cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
1117}
1118
1119/*
1120 * Commit group events scheduling transaction
1121 * Perform the group schedulability test as a whole
1122 * Return 0 if success
1123 */
1124static int sparc_pmu_commit_txn(const struct pmu *pmu)
1125{
1126 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1127 int n;
1128
1129 if (!sparc_pmu)
1130 return -EINVAL;
1131
1132 cpuc = &__get_cpu_var(cpu_hw_events);
1133 n = cpuc->n_events;
1134 if (check_excludes(cpuc->event, 0, n))
1135 return -EINVAL;
1136 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1137 return -EAGAIN;
1138
1139 return 0;
1140}
1141
David S. Miller59abbd12009-09-10 06:28:20 -07001142static const struct pmu pmu = {
1143 .enable = sparc_pmu_enable,
1144 .disable = sparc_pmu_disable,
1145 .read = sparc_pmu_read,
1146 .unthrottle = sparc_pmu_unthrottle,
Lin Minga13c3af2010-04-23 13:56:33 +08001147 .start_txn = sparc_pmu_start_txn,
1148 .cancel_txn = sparc_pmu_cancel_txn,
1149 .commit_txn = sparc_pmu_commit_txn,
David S. Miller59abbd12009-09-10 06:28:20 -07001150};
1151
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001152const struct pmu *hw_perf_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001153{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001154 int err = __hw_perf_event_init(event);
David S. Miller59abbd12009-09-10 06:28:20 -07001155
1156 if (err)
1157 return ERR_PTR(err);
1158 return &pmu;
1159}
1160
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001161void perf_event_print_debug(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001162{
1163 unsigned long flags;
1164 u64 pcr, pic;
1165 int cpu;
1166
1167 if (!sparc_pmu)
1168 return;
1169
1170 local_irq_save(flags);
1171
1172 cpu = smp_processor_id();
1173
1174 pcr = pcr_ops->read();
1175 read_pic(pic);
1176
1177 pr_info("\n");
1178 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1179 cpu, pcr, pic);
1180
1181 local_irq_restore(flags);
1182}
1183
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001184static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
David S. Millerd29862f2009-09-28 17:37:12 -07001185 unsigned long cmd, void *__args)
David S. Miller59abbd12009-09-10 06:28:20 -07001186{
1187 struct die_args *args = __args;
1188 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001189 struct cpu_hw_events *cpuc;
David S. Miller59abbd12009-09-10 06:28:20 -07001190 struct pt_regs *regs;
David S. Millere7bef6b2010-01-20 02:59:47 -08001191 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001192
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001193 if (!atomic_read(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001194 return NOTIFY_DONE;
1195
1196 switch (cmd) {
1197 case DIE_NMI:
1198 break;
1199
1200 default:
1201 return NOTIFY_DONE;
1202 }
1203
1204 regs = args->regs;
1205
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001206 perf_sample_data_init(&data, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001207
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001208 cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere04ed382010-01-04 23:16:03 -08001209
1210 /* If the PMU has the TOE IRQ enable bits, we need to do a
1211 * dummy write to the %pcr to clear the overflow bits and thus
1212 * the interrupt.
1213 *
1214 * Do this before we peek at the counters to determine
1215 * overflow so we don't lose any events.
1216 */
1217 if (sparc_pmu->irq_bit)
1218 pcr_ops->write(cpuc->pcr);
1219
David S. Millere7bef6b2010-01-20 02:59:47 -08001220 for (i = 0; i < cpuc->n_events; i++) {
1221 struct perf_event *event = cpuc->event[i];
1222 int idx = cpuc->current_idx[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001223 struct hw_perf_event *hwc;
David S. Miller59abbd12009-09-10 06:28:20 -07001224 u64 val;
1225
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001226 hwc = &event->hw;
1227 val = sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001228 if (val & (1ULL << 31))
1229 continue;
1230
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001231 data.period = event->hw.last_period;
1232 if (!sparc_perf_event_set_period(event, hwc, idx))
David S. Miller59abbd12009-09-10 06:28:20 -07001233 continue;
1234
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001235 if (perf_event_overflow(event, 1, &data, regs))
David S. Millerd1751382009-09-29 21:27:06 -07001236 sparc_pmu_disable_event(cpuc, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001237 }
1238
1239 return NOTIFY_STOP;
1240}
1241
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001242static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1243 .notifier_call = perf_event_nmi_handler,
David S. Miller59abbd12009-09-10 06:28:20 -07001244};
1245
1246static bool __init supported_pmu(void)
1247{
David S. Miller28e8f9b2009-09-26 20:54:22 -07001248 if (!strcmp(sparc_pmu_type, "ultra3") ||
1249 !strcmp(sparc_pmu_type, "ultra3+") ||
1250 !strcmp(sparc_pmu_type, "ultra3i") ||
1251 !strcmp(sparc_pmu_type, "ultra4+")) {
1252 sparc_pmu = &ultra3_pmu;
David S. Miller59abbd12009-09-10 06:28:20 -07001253 return true;
1254 }
David S. Miller7eebda62009-09-26 21:23:41 -07001255 if (!strcmp(sparc_pmu_type, "niagara")) {
1256 sparc_pmu = &niagara1_pmu;
1257 return true;
1258 }
David S. Millerb73d8842009-09-10 07:22:18 -07001259 if (!strcmp(sparc_pmu_type, "niagara2")) {
1260 sparc_pmu = &niagara2_pmu;
1261 return true;
1262 }
David S. Miller59abbd12009-09-10 06:28:20 -07001263 return false;
1264}
1265
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001266void __init init_hw_perf_events(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001267{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001268 pr_info("Performance events: ");
David S. Miller59abbd12009-09-10 06:28:20 -07001269
1270 if (!supported_pmu()) {
1271 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1272 return;
1273 }
1274
1275 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1276
David S. Millere7bef6b2010-01-20 02:59:47 -08001277 /* All sparc64 PMUs currently have 2 events. */
1278 perf_max_events = 2;
David S. Miller59abbd12009-09-10 06:28:20 -07001279
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001280 register_die_notifier(&perf_event_nmi_notifier);
David S. Miller59abbd12009-09-10 06:28:20 -07001281}
David S. Miller4f6dbe42010-01-19 00:26:13 -08001282
1283static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1284{
1285 if (entry->nr < PERF_MAX_STACK_DEPTH)
1286 entry->ip[entry->nr++] = ip;
1287}
1288
1289static void perf_callchain_kernel(struct pt_regs *regs,
1290 struct perf_callchain_entry *entry)
1291{
1292 unsigned long ksp, fp;
1293
1294 callchain_store(entry, PERF_CONTEXT_KERNEL);
1295 callchain_store(entry, regs->tpc);
1296
1297 ksp = regs->u_regs[UREG_I6];
1298 fp = ksp + STACK_BIAS;
1299 do {
1300 struct sparc_stackf *sf;
1301 struct pt_regs *regs;
1302 unsigned long pc;
1303
1304 if (!kstack_valid(current_thread_info(), fp))
1305 break;
1306
1307 sf = (struct sparc_stackf *) fp;
1308 regs = (struct pt_regs *) (sf + 1);
1309
1310 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1311 if (user_mode(regs))
1312 break;
1313 pc = regs->tpc;
1314 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1315 } else {
1316 pc = sf->callers_pc;
1317 fp = (unsigned long)sf->fp + STACK_BIAS;
1318 }
1319 callchain_store(entry, pc);
1320 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1321}
1322
1323static void perf_callchain_user_64(struct pt_regs *regs,
1324 struct perf_callchain_entry *entry)
1325{
1326 unsigned long ufp;
1327
1328 callchain_store(entry, PERF_CONTEXT_USER);
1329 callchain_store(entry, regs->tpc);
1330
1331 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1332 do {
1333 struct sparc_stackf *usf, sf;
1334 unsigned long pc;
1335
1336 usf = (struct sparc_stackf *) ufp;
1337 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1338 break;
1339
1340 pc = sf.callers_pc;
1341 ufp = (unsigned long)sf.fp + STACK_BIAS;
1342 callchain_store(entry, pc);
1343 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1344}
1345
1346static void perf_callchain_user_32(struct pt_regs *regs,
1347 struct perf_callchain_entry *entry)
1348{
1349 unsigned long ufp;
1350
1351 callchain_store(entry, PERF_CONTEXT_USER);
1352 callchain_store(entry, regs->tpc);
1353
David S. Miller9e8307e2010-03-29 13:08:52 -07001354 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
David S. Miller4f6dbe42010-01-19 00:26:13 -08001355 do {
1356 struct sparc_stackf32 *usf, sf;
1357 unsigned long pc;
1358
1359 usf = (struct sparc_stackf32 *) ufp;
1360 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1361 break;
1362
1363 pc = sf.callers_pc;
1364 ufp = (unsigned long)sf.fp;
1365 callchain_store(entry, pc);
1366 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1367}
1368
1369/* Like powerpc we can't get PMU interrupts within the PMU handler,
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001370 * so no need for separate NMI and IRQ chains as on x86.
David S. Miller4f6dbe42010-01-19 00:26:13 -08001371 */
1372static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1373
1374struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1375{
1376 struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
1377
1378 entry->nr = 0;
1379 if (!user_mode(regs)) {
1380 stack_trace_flush();
1381 perf_callchain_kernel(regs, entry);
1382 if (current->mm)
1383 regs = task_pt_regs(current);
1384 else
1385 regs = NULL;
1386 }
1387 if (regs) {
1388 flushw_user();
1389 if (test_thread_flag(TIF_32BIT))
1390 perf_callchain_user_32(regs, entry);
1391 else
1392 perf_callchain_user_64(regs, entry);
1393 }
1394 return entry;
1395}