blob: 62a034318b1818ba12f010f89821a53e41bd851b [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/* Performance event support for sparc64.
David S. Miller59abbd12009-09-10 06:28:20 -07002 *
David S. Miller4f6dbe42010-01-19 00:26:13 -08003 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
David S. Miller59abbd12009-09-10 06:28:20 -07004 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * This code is based almost entirely upon the x86 perf event
David S. Miller59abbd12009-09-10 06:28:20 -07006 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
David S. Miller59abbd12009-09-10 06:28:20 -070016#include <linux/kprobes.h>
David S. Miller667f0ce2010-04-21 03:08:11 -070017#include <linux/ftrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070018#include <linux/kernel.h>
19#include <linux/kdebug.h>
20#include <linux/mutex.h>
21
David S. Miller4f6dbe42010-01-19 00:26:13 -080022#include <asm/stacktrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070023#include <asm/cpudata.h>
David S. Miller4f6dbe42010-01-19 00:26:13 -080024#include <asm/uaccess.h>
David S. Miller59abbd12009-09-10 06:28:20 -070025#include <asm/atomic.h>
26#include <asm/nmi.h>
27#include <asm/pcr.h>
28
Sam Ravnborgcb1b8202011-04-21 15:45:45 -070029#include "kernel.h"
David S. Miller4f6dbe42010-01-19 00:26:13 -080030#include "kstack.h"
31
David S. Miller59abbd12009-09-10 06:28:20 -070032/* Sparc64 chips have two performance counters, 32-bits each, with
33 * overflow interrupts generated on transition from 0xffffffff to 0.
34 * The counters are accessed in one go using a 64-bit register.
35 *
36 * Both counters are controlled using a single control register. The
37 * only way to stop all sampling is to clear all of the context (user,
38 * supervisor, hypervisor) sampling enable bits. But these bits apply
39 * to both counters, thus the two counters can't be enabled/disabled
40 * individually.
41 *
42 * The control register has two event fields, one for each of the two
43 * counters. It's thus nearly impossible to have one counter going
44 * while keeping the other one stopped. Therefore it is possible to
45 * get overflow interrupts for counters not currently "in use" and
46 * that condition must be checked in the overflow interrupt handler.
47 *
48 * So we use a hack, in that we program inactive counters with the
49 * "sw_count0" and "sw_count1" events. These count how many times
50 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
51 * unusual way to encode a NOP and therefore will not trigger in
52 * normal code.
53 */
54
Ingo Molnarcdd6c482009-09-21 12:02:48 +020055#define MAX_HWEVENTS 2
David S. Miller59abbd12009-09-10 06:28:20 -070056#define MAX_PERIOD ((1UL << 32) - 1)
57
58#define PIC_UPPER_INDEX 0
59#define PIC_LOWER_INDEX 1
David S. Millere7bef6b2010-01-20 02:59:47 -080060#define PIC_NO_INDEX -1
David S. Miller59abbd12009-09-10 06:28:20 -070061
Ingo Molnarcdd6c482009-09-21 12:02:48 +020062struct cpu_hw_events {
David S. Millere7bef6b2010-01-20 02:59:47 -080063 /* Number of events currently scheduled onto this cpu.
64 * This tells how many entries in the arrays below
65 * are valid.
66 */
67 int n_events;
68
69 /* Number of new events added since the last hw_perf_disable().
70 * This works because the perf event layer always adds new
71 * events inside of a perf_{disable,enable}() sequence.
72 */
73 int n_added;
74
75 /* Array of events current scheduled on this cpu. */
76 struct perf_event *event[MAX_HWEVENTS];
77
78 /* Array of encoded longs, specifying the %pcr register
79 * encoding and the mask of PIC counters this even can
80 * be scheduled on. See perf_event_encode() et al.
81 */
82 unsigned long events[MAX_HWEVENTS];
83
84 /* The current counter index assigned to an event. When the
85 * event hasn't been programmed into the cpu yet, this will
86 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
87 * we ought to schedule the event.
88 */
89 int current_idx[MAX_HWEVENTS];
90
91 /* Software copy of %pcr register on this cpu. */
David S. Millerd1751382009-09-29 21:27:06 -070092 u64 pcr;
David S. Millere7bef6b2010-01-20 02:59:47 -080093
94 /* Enabled/disable state. */
David S. Millerd1751382009-09-29 21:27:06 -070095 int enabled;
Lin Minga13c3af2010-04-23 13:56:33 +080096
97 unsigned int group_flag;
David S. Miller59abbd12009-09-10 06:28:20 -070098};
Ingo Molnarcdd6c482009-09-21 12:02:48 +020099DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
David S. Miller59abbd12009-09-10 06:28:20 -0700100
David S. Millere7bef6b2010-01-20 02:59:47 -0800101/* An event map describes the characteristics of a performance
102 * counter event. In particular it gives the encoding as well as
103 * a mask telling which counters the event can be measured on.
104 */
David S. Miller59abbd12009-09-10 06:28:20 -0700105struct perf_event_map {
106 u16 encoding;
107 u8 pic_mask;
108#define PIC_NONE 0x00
109#define PIC_UPPER 0x01
110#define PIC_LOWER 0x02
111};
112
David S. Millere7bef6b2010-01-20 02:59:47 -0800113/* Encode a perf_event_map entry into a long. */
David S. Millera72a8a52009-09-28 17:35:20 -0700114static unsigned long perf_event_encode(const struct perf_event_map *pmap)
115{
116 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
117}
118
David S. Millere7bef6b2010-01-20 02:59:47 -0800119static u8 perf_event_get_msk(unsigned long val)
David S. Millera72a8a52009-09-28 17:35:20 -0700120{
David S. Millere7bef6b2010-01-20 02:59:47 -0800121 return val & 0xff;
122}
123
124static u64 perf_event_get_enc(unsigned long val)
125{
126 return val >> 16;
David S. Millera72a8a52009-09-28 17:35:20 -0700127}
128
David S. Miller2ce4da22009-09-26 20:42:10 -0700129#define C(x) PERF_COUNT_HW_CACHE_##x
130
131#define CACHE_OP_UNSUPPORTED 0xfffe
132#define CACHE_OP_NONSENSE 0xffff
133
134typedef struct perf_event_map cache_map_t
135 [PERF_COUNT_HW_CACHE_MAX]
136 [PERF_COUNT_HW_CACHE_OP_MAX]
137 [PERF_COUNT_HW_CACHE_RESULT_MAX];
138
David S. Miller59abbd12009-09-10 06:28:20 -0700139struct sparc_pmu {
140 const struct perf_event_map *(*event_map)(int);
David S. Miller2ce4da22009-09-26 20:42:10 -0700141 const cache_map_t *cache_map;
David S. Miller59abbd12009-09-10 06:28:20 -0700142 int max_events;
143 int upper_shift;
144 int lower_shift;
145 int event_mask;
David S. Miller91b92862009-09-10 07:09:06 -0700146 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -0700147 int irq_bit;
David S. Miller660d1372009-09-10 07:13:26 -0700148 int upper_nop;
149 int lower_nop;
David S. Miller59abbd12009-09-10 06:28:20 -0700150};
151
David S. Miller28e8f9b2009-09-26 20:54:22 -0700152static const struct perf_event_map ultra3_perfmon_event_map[] = {
David S. Miller59abbd12009-09-10 06:28:20 -0700153 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
154 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
155 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
156 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
157};
158
David S. Miller28e8f9b2009-09-26 20:54:22 -0700159static const struct perf_event_map *ultra3_event_map(int event_id)
David S. Miller59abbd12009-09-10 06:28:20 -0700160{
David S. Miller28e8f9b2009-09-26 20:54:22 -0700161 return &ultra3_perfmon_event_map[event_id];
David S. Miller59abbd12009-09-10 06:28:20 -0700162}
163
David S. Miller28e8f9b2009-09-26 20:54:22 -0700164static const cache_map_t ultra3_cache_map = {
David S. Miller2ce4da22009-09-26 20:42:10 -0700165[C(L1D)] = {
166 [C(OP_READ)] = {
167 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
168 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
169 },
170 [C(OP_WRITE)] = {
171 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
172 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
173 },
174 [C(OP_PREFETCH)] = {
175 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
176 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
177 },
178},
179[C(L1I)] = {
180 [C(OP_READ)] = {
181 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
182 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
183 },
184 [ C(OP_WRITE) ] = {
185 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
186 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
187 },
188 [ C(OP_PREFETCH) ] = {
189 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
190 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
191 },
192},
193[C(LL)] = {
194 [C(OP_READ)] = {
195 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
196 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
197 },
198 [C(OP_WRITE)] = {
199 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
200 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
201 },
202 [C(OP_PREFETCH)] = {
203 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
204 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
205 },
206},
207[C(DTLB)] = {
208 [C(OP_READ)] = {
209 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
210 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
211 },
212 [ C(OP_WRITE) ] = {
213 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
214 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
215 },
216 [ C(OP_PREFETCH) ] = {
217 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
218 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
219 },
220},
221[C(ITLB)] = {
222 [C(OP_READ)] = {
223 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
224 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
225 },
226 [ C(OP_WRITE) ] = {
227 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
228 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
229 },
230 [ C(OP_PREFETCH) ] = {
231 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
232 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
233 },
234},
235[C(BPU)] = {
236 [C(OP_READ)] = {
237 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
238 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
239 },
240 [ C(OP_WRITE) ] = {
241 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
242 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
243 },
244 [ C(OP_PREFETCH) ] = {
245 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
246 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
247 },
248},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200249[C(NODE)] = {
250 [C(OP_READ)] = {
251 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
252 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
253 },
254 [ C(OP_WRITE) ] = {
255 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
256 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
257 },
258 [ C(OP_PREFETCH) ] = {
259 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
260 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
261 },
262},
David S. Miller2ce4da22009-09-26 20:42:10 -0700263};
264
David S. Miller28e8f9b2009-09-26 20:54:22 -0700265static const struct sparc_pmu ultra3_pmu = {
266 .event_map = ultra3_event_map,
267 .cache_map = &ultra3_cache_map,
268 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
David S. Miller59abbd12009-09-10 06:28:20 -0700269 .upper_shift = 11,
270 .lower_shift = 4,
271 .event_mask = 0x3f,
David S. Miller660d1372009-09-10 07:13:26 -0700272 .upper_nop = 0x1c,
273 .lower_nop = 0x14,
David S. Miller59abbd12009-09-10 06:28:20 -0700274};
275
David S. Miller7eebda62009-09-26 21:23:41 -0700276/* Niagara1 is very limited. The upper PIC is hard-locked to count
277 * only instructions, so it is free running which creates all kinds of
David S. Miller6e804252009-09-29 15:10:23 -0700278 * problems. Some hardware designs make one wonder if the creator
David S. Miller7eebda62009-09-26 21:23:41 -0700279 * even looked at how this stuff gets used by software.
280 */
281static const struct perf_event_map niagara1_perfmon_event_map[] = {
282 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
283 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
284 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
285 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
286};
287
288static const struct perf_event_map *niagara1_event_map(int event_id)
289{
290 return &niagara1_perfmon_event_map[event_id];
291}
292
293static const cache_map_t niagara1_cache_map = {
294[C(L1D)] = {
295 [C(OP_READ)] = {
296 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
297 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
298 },
299 [C(OP_WRITE)] = {
300 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
301 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
302 },
303 [C(OP_PREFETCH)] = {
304 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
305 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
306 },
307},
308[C(L1I)] = {
309 [C(OP_READ)] = {
310 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
311 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
312 },
313 [ C(OP_WRITE) ] = {
314 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
315 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
316 },
317 [ C(OP_PREFETCH) ] = {
318 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
319 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
320 },
321},
322[C(LL)] = {
323 [C(OP_READ)] = {
324 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
325 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
326 },
327 [C(OP_WRITE)] = {
328 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
329 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
330 },
331 [C(OP_PREFETCH)] = {
332 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
333 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
334 },
335},
336[C(DTLB)] = {
337 [C(OP_READ)] = {
338 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
339 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
340 },
341 [ C(OP_WRITE) ] = {
342 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
343 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
344 },
345 [ C(OP_PREFETCH) ] = {
346 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
347 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
348 },
349},
350[C(ITLB)] = {
351 [C(OP_READ)] = {
352 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
353 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
354 },
355 [ C(OP_WRITE) ] = {
356 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
357 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
358 },
359 [ C(OP_PREFETCH) ] = {
360 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
361 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
362 },
363},
364[C(BPU)] = {
365 [C(OP_READ)] = {
366 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
367 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
368 },
369 [ C(OP_WRITE) ] = {
370 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
371 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
372 },
373 [ C(OP_PREFETCH) ] = {
374 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
375 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
376 },
377},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200378[C(NODE)] = {
379 [C(OP_READ)] = {
380 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
381 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
382 },
383 [ C(OP_WRITE) ] = {
384 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
385 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
386 },
387 [ C(OP_PREFETCH) ] = {
388 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
389 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
390 },
391},
David S. Miller7eebda62009-09-26 21:23:41 -0700392};
393
394static const struct sparc_pmu niagara1_pmu = {
395 .event_map = niagara1_event_map,
396 .cache_map = &niagara1_cache_map,
397 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
398 .upper_shift = 0,
399 .lower_shift = 4,
400 .event_mask = 0x7,
401 .upper_nop = 0x0,
402 .lower_nop = 0x0,
403};
404
David S. Millerb73d8842009-09-10 07:22:18 -0700405static const struct perf_event_map niagara2_perfmon_event_map[] = {
406 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
407 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
408 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
409 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
410 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
411 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
412};
413
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200414static const struct perf_event_map *niagara2_event_map(int event_id)
David S. Millerb73d8842009-09-10 07:22:18 -0700415{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200416 return &niagara2_perfmon_event_map[event_id];
David S. Millerb73d8842009-09-10 07:22:18 -0700417}
418
David S. Millerd0b86482009-09-26 21:04:16 -0700419static const cache_map_t niagara2_cache_map = {
420[C(L1D)] = {
421 [C(OP_READ)] = {
422 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
423 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
424 },
425 [C(OP_WRITE)] = {
426 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
427 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
428 },
429 [C(OP_PREFETCH)] = {
430 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
431 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
432 },
433},
434[C(L1I)] = {
435 [C(OP_READ)] = {
436 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
437 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
438 },
439 [ C(OP_WRITE) ] = {
440 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
441 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
442 },
443 [ C(OP_PREFETCH) ] = {
444 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
445 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
446 },
447},
448[C(LL)] = {
449 [C(OP_READ)] = {
450 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
451 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
452 },
453 [C(OP_WRITE)] = {
454 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
455 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
456 },
457 [C(OP_PREFETCH)] = {
458 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
459 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
460 },
461},
462[C(DTLB)] = {
463 [C(OP_READ)] = {
464 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
465 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
466 },
467 [ C(OP_WRITE) ] = {
468 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
469 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
470 },
471 [ C(OP_PREFETCH) ] = {
472 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
473 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
474 },
475},
476[C(ITLB)] = {
477 [C(OP_READ)] = {
478 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
479 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
480 },
481 [ C(OP_WRITE) ] = {
482 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
483 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
484 },
485 [ C(OP_PREFETCH) ] = {
486 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
487 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
488 },
489},
490[C(BPU)] = {
491 [C(OP_READ)] = {
492 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
493 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
494 },
495 [ C(OP_WRITE) ] = {
496 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
497 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
498 },
499 [ C(OP_PREFETCH) ] = {
500 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
501 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
502 },
503},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200504[C(NODE)] = {
505 [C(OP_READ)] = {
506 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
507 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
508 },
509 [ C(OP_WRITE) ] = {
510 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
511 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
512 },
513 [ C(OP_PREFETCH) ] = {
514 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
515 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
516 },
517},
David S. Millerd0b86482009-09-26 21:04:16 -0700518};
519
David S. Millerb73d8842009-09-10 07:22:18 -0700520static const struct sparc_pmu niagara2_pmu = {
521 .event_map = niagara2_event_map,
David S. Millerd0b86482009-09-26 21:04:16 -0700522 .cache_map = &niagara2_cache_map,
David S. Millerb73d8842009-09-10 07:22:18 -0700523 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
524 .upper_shift = 19,
525 .lower_shift = 6,
526 .event_mask = 0xfff,
527 .hv_bit = 0x8,
David S. Millerde23cf32009-10-09 00:42:40 -0700528 .irq_bit = 0x30,
David S. Millerb73d8842009-09-10 07:22:18 -0700529 .upper_nop = 0x220,
530 .lower_nop = 0x220,
531};
532
David S. Miller59abbd12009-09-10 06:28:20 -0700533static const struct sparc_pmu *sparc_pmu __read_mostly;
534
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200535static u64 event_encoding(u64 event_id, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700536{
537 if (idx == PIC_UPPER_INDEX)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200538 event_id <<= sparc_pmu->upper_shift;
David S. Miller59abbd12009-09-10 06:28:20 -0700539 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200540 event_id <<= sparc_pmu->lower_shift;
541 return event_id;
David S. Miller59abbd12009-09-10 06:28:20 -0700542}
543
544static u64 mask_for_index(int idx)
545{
546 return event_encoding(sparc_pmu->event_mask, idx);
547}
548
549static u64 nop_for_index(int idx)
550{
551 return event_encoding(idx == PIC_UPPER_INDEX ?
David S. Miller660d1372009-09-10 07:13:26 -0700552 sparc_pmu->upper_nop :
553 sparc_pmu->lower_nop, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700554}
555
David S. Millerd1751382009-09-29 21:27:06 -0700556static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700557{
558 u64 val, mask = mask_for_index(idx);
559
David S. Millerd1751382009-09-29 21:27:06 -0700560 val = cpuc->pcr;
561 val &= ~mask;
562 val |= hwc->config;
563 cpuc->pcr = val;
564
565 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700566}
567
David S. Millerd1751382009-09-29 21:27:06 -0700568static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700569{
570 u64 mask = mask_for_index(idx);
571 u64 nop = nop_for_index(idx);
David S. Millerd1751382009-09-29 21:27:06 -0700572 u64 val;
David S. Miller59abbd12009-09-10 06:28:20 -0700573
David S. Millerd1751382009-09-29 21:27:06 -0700574 val = cpuc->pcr;
575 val &= ~mask;
576 val |= nop;
577 cpuc->pcr = val;
578
579 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700580}
581
David S. Miller59abbd12009-09-10 06:28:20 -0700582static u32 read_pmc(int idx)
583{
584 u64 val;
585
586 read_pic(val);
587 if (idx == PIC_UPPER_INDEX)
588 val >>= 32;
589
590 return val & 0xffffffff;
591}
592
593static void write_pmc(int idx, u64 val)
594{
595 u64 shift, mask, pic;
596
597 shift = 0;
598 if (idx == PIC_UPPER_INDEX)
599 shift = 32;
600
601 mask = ((u64) 0xffffffff) << shift;
602 val <<= shift;
603
604 read_pic(pic);
605 pic &= ~mask;
606 pic |= val;
607 write_pic(pic);
608}
609
David S. Millere7bef6b2010-01-20 02:59:47 -0800610static u64 sparc_perf_event_update(struct perf_event *event,
611 struct hw_perf_event *hwc, int idx)
612{
613 int shift = 64 - 32;
614 u64 prev_raw_count, new_raw_count;
615 s64 delta;
616
617again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200618 prev_raw_count = local64_read(&hwc->prev_count);
David S. Millere7bef6b2010-01-20 02:59:47 -0800619 new_raw_count = read_pmc(idx);
620
Peter Zijlstrae7850592010-05-21 14:43:08 +0200621 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800622 new_raw_count) != prev_raw_count)
623 goto again;
624
625 delta = (new_raw_count << shift) - (prev_raw_count << shift);
626 delta >>= shift;
627
Peter Zijlstrae7850592010-05-21 14:43:08 +0200628 local64_add(delta, &event->count);
629 local64_sub(delta, &hwc->period_left);
David S. Millere7bef6b2010-01-20 02:59:47 -0800630
631 return new_raw_count;
632}
633
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200634static int sparc_perf_event_set_period(struct perf_event *event,
David S. Millerd29862f2009-09-28 17:37:12 -0700635 struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700636{
Peter Zijlstrae7850592010-05-21 14:43:08 +0200637 s64 left = local64_read(&hwc->period_left);
David S. Miller59abbd12009-09-10 06:28:20 -0700638 s64 period = hwc->sample_period;
639 int ret = 0;
640
641 if (unlikely(left <= -period)) {
642 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200643 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700644 hwc->last_period = period;
645 ret = 1;
646 }
647
648 if (unlikely(left <= 0)) {
649 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200650 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700651 hwc->last_period = period;
652 ret = 1;
653 }
654 if (left > MAX_PERIOD)
655 left = MAX_PERIOD;
656
Peter Zijlstrae7850592010-05-21 14:43:08 +0200657 local64_set(&hwc->prev_count, (u64)-left);
David S. Miller59abbd12009-09-10 06:28:20 -0700658
659 write_pmc(idx, (u64)(-left) & 0xffffffff);
660
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200661 perf_event_update_userpage(event);
David S. Miller59abbd12009-09-10 06:28:20 -0700662
663 return ret;
664}
665
David S. Millere7bef6b2010-01-20 02:59:47 -0800666/* If performance event entries have been added, move existing
667 * events around (if necessary) and then assign new entries to
668 * counters.
669 */
670static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
David S. Miller59abbd12009-09-10 06:28:20 -0700671{
David S. Millere7bef6b2010-01-20 02:59:47 -0800672 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700673
David S. Millere7bef6b2010-01-20 02:59:47 -0800674 if (!cpuc->n_added)
675 goto out;
David S. Miller59abbd12009-09-10 06:28:20 -0700676
David S. Millere7bef6b2010-01-20 02:59:47 -0800677 /* Read in the counters which are moving. */
678 for (i = 0; i < cpuc->n_events; i++) {
679 struct perf_event *cp = cpuc->event[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700680
David S. Millere7bef6b2010-01-20 02:59:47 -0800681 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
682 cpuc->current_idx[i] != cp->hw.idx) {
683 sparc_perf_event_update(cp, &cp->hw,
684 cpuc->current_idx[i]);
685 cpuc->current_idx[i] = PIC_NO_INDEX;
686 }
687 }
David S. Miller59abbd12009-09-10 06:28:20 -0700688
David S. Millere7bef6b2010-01-20 02:59:47 -0800689 /* Assign to counters all unassigned events. */
690 for (i = 0; i < cpuc->n_events; i++) {
691 struct perf_event *cp = cpuc->event[i];
692 struct hw_perf_event *hwc = &cp->hw;
693 int idx = hwc->idx;
694 u64 enc;
695
696 if (cpuc->current_idx[i] != PIC_NO_INDEX)
697 continue;
698
699 sparc_perf_event_set_period(cp, hwc, idx);
700 cpuc->current_idx[i] = idx;
701
702 enc = perf_event_get_enc(cpuc->events[i]);
David S. Millerb7d45c32010-06-23 11:39:02 -0700703 pcr &= ~mask_for_index(idx);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200704 if (hwc->state & PERF_HES_STOPPED)
705 pcr |= nop_for_index(idx);
706 else
707 pcr |= event_encoding(enc, idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800708 }
709out:
710 return pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700711}
712
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200713static void sparc_pmu_enable(struct pmu *pmu)
David S. Miller59abbd12009-09-10 06:28:20 -0700714{
David S. Millere7bef6b2010-01-20 02:59:47 -0800715 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
716 u64 pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700717
David S. Millere7bef6b2010-01-20 02:59:47 -0800718 if (cpuc->enabled)
719 return;
David S. Miller59abbd12009-09-10 06:28:20 -0700720
David S. Millere7bef6b2010-01-20 02:59:47 -0800721 cpuc->enabled = 1;
722 barrier();
David S. Miller59abbd12009-09-10 06:28:20 -0700723
David S. Millere7bef6b2010-01-20 02:59:47 -0800724 pcr = cpuc->pcr;
725 if (!cpuc->n_events) {
726 pcr = 0;
727 } else {
728 pcr = maybe_change_configuration(cpuc, pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700729
David S. Millere7bef6b2010-01-20 02:59:47 -0800730 /* We require that all of the events have the same
731 * configuration, so just fetch the settings from the
732 * first entry.
733 */
734 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
735 }
David S. Miller59abbd12009-09-10 06:28:20 -0700736
David S. Millere7bef6b2010-01-20 02:59:47 -0800737 pcr_ops->write(cpuc->pcr);
738}
739
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200740static void sparc_pmu_disable(struct pmu *pmu)
David S. Millere7bef6b2010-01-20 02:59:47 -0800741{
742 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
743 u64 val;
744
745 if (!cpuc->enabled)
746 return;
747
748 cpuc->enabled = 0;
749 cpuc->n_added = 0;
750
751 val = cpuc->pcr;
752 val &= ~(PCR_UTRACE | PCR_STRACE |
753 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
754 cpuc->pcr = val;
755
756 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700757}
758
David S. Millere7bef6b2010-01-20 02:59:47 -0800759static int active_event_index(struct cpu_hw_events *cpuc,
760 struct perf_event *event)
761{
762 int i;
763
764 for (i = 0; i < cpuc->n_events; i++) {
765 if (cpuc->event[i] == event)
766 break;
767 }
768 BUG_ON(i == cpuc->n_events);
769 return cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700770}
771
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200772static void sparc_pmu_start(struct perf_event *event, int flags)
773{
774 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
775 int idx = active_event_index(cpuc, event);
776
777 if (flags & PERF_EF_RELOAD) {
778 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
779 sparc_perf_event_set_period(event, &event->hw, idx);
780 }
781
782 event->hw.state = 0;
783
784 sparc_pmu_enable_event(cpuc, &event->hw, idx);
785}
786
787static void sparc_pmu_stop(struct perf_event *event, int flags)
788{
789 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
790 int idx = active_event_index(cpuc, event);
791
792 if (!(event->hw.state & PERF_HES_STOPPED)) {
793 sparc_pmu_disable_event(cpuc, &event->hw, idx);
794 event->hw.state |= PERF_HES_STOPPED;
795 }
796
797 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
798 sparc_perf_event_update(event, &event->hw, idx);
799 event->hw.state |= PERF_HES_UPTODATE;
800 }
801}
802
803static void sparc_pmu_del(struct perf_event *event, int _flags)
804{
805 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
806 unsigned long flags;
807 int i;
808
809 local_irq_save(flags);
810 perf_pmu_disable(event->pmu);
811
812 for (i = 0; i < cpuc->n_events; i++) {
813 if (event == cpuc->event[i]) {
814 /* Absorb the final count and turn off the
815 * event.
816 */
817 sparc_pmu_stop(event, PERF_EF_UPDATE);
818
819 /* Shift remaining entries down into
820 * the existing slot.
821 */
822 while (++i < cpuc->n_events) {
823 cpuc->event[i - 1] = cpuc->event[i];
824 cpuc->events[i - 1] = cpuc->events[i];
825 cpuc->current_idx[i - 1] =
826 cpuc->current_idx[i];
827 }
828
829 perf_event_update_userpage(event);
830
831 cpuc->n_events--;
832 break;
833 }
834 }
835
836 perf_pmu_enable(event->pmu);
837 local_irq_restore(flags);
838}
839
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200840static void sparc_pmu_read(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700841{
David S. Millere7bef6b2010-01-20 02:59:47 -0800842 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
843 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200844 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700845
David S. Millere7bef6b2010-01-20 02:59:47 -0800846 sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700847}
848
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200849static atomic_t active_events = ATOMIC_INIT(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700850static DEFINE_MUTEX(pmc_grab_mutex);
851
David S. Millerd1751382009-09-29 21:27:06 -0700852static void perf_stop_nmi_watchdog(void *unused)
853{
854 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
855
856 stop_nmi_watchdog(NULL);
857 cpuc->pcr = pcr_ops->read();
858}
859
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200860void perf_event_grab_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700861{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200862 if (atomic_inc_not_zero(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -0700863 return;
864
865 mutex_lock(&pmc_grab_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200866 if (atomic_read(&active_events) == 0) {
David S. Miller59abbd12009-09-10 06:28:20 -0700867 if (atomic_read(&nmi_active) > 0) {
David S. Millerd1751382009-09-29 21:27:06 -0700868 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
David S. Miller59abbd12009-09-10 06:28:20 -0700869 BUG_ON(atomic_read(&nmi_active) != 0);
870 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200871 atomic_inc(&active_events);
David S. Miller59abbd12009-09-10 06:28:20 -0700872 }
873 mutex_unlock(&pmc_grab_mutex);
874}
875
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200876void perf_event_release_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700877{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200878 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
David S. Miller59abbd12009-09-10 06:28:20 -0700879 if (atomic_read(&nmi_active) == 0)
880 on_each_cpu(start_nmi_watchdog, NULL, 1);
881 mutex_unlock(&pmc_grab_mutex);
882 }
883}
884
David S. Miller2ce4da22009-09-26 20:42:10 -0700885static const struct perf_event_map *sparc_map_cache_event(u64 config)
886{
887 unsigned int cache_type, cache_op, cache_result;
888 const struct perf_event_map *pmap;
889
890 if (!sparc_pmu->cache_map)
891 return ERR_PTR(-ENOENT);
892
893 cache_type = (config >> 0) & 0xff;
894 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
895 return ERR_PTR(-EINVAL);
896
897 cache_op = (config >> 8) & 0xff;
898 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
899 return ERR_PTR(-EINVAL);
900
901 cache_result = (config >> 16) & 0xff;
902 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
903 return ERR_PTR(-EINVAL);
904
905 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
906
907 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
908 return ERR_PTR(-ENOENT);
909
910 if (pmap->encoding == CACHE_OP_NONSENSE)
911 return ERR_PTR(-EINVAL);
912
913 return pmap;
914}
915
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200916static void hw_perf_event_destroy(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700917{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200918 perf_event_release_pmc();
David S. Miller59abbd12009-09-10 06:28:20 -0700919}
920
David S. Millera72a8a52009-09-28 17:35:20 -0700921/* Make sure all events can be scheduled into the hardware at
922 * the same time. This is simplified by the fact that we only
923 * need to support 2 simultaneous HW events.
David S. Millere7bef6b2010-01-20 02:59:47 -0800924 *
925 * As a side effect, the evts[]->hw.idx values will be assigned
926 * on success. These are pending indexes. When the events are
927 * actually programmed into the chip, these values will propagate
928 * to the per-cpu cpuc->current_idx[] slots, see the code in
929 * maybe_change_configuration() for details.
David S. Millera72a8a52009-09-28 17:35:20 -0700930 */
David S. Millere7bef6b2010-01-20 02:59:47 -0800931static int sparc_check_constraints(struct perf_event **evts,
932 unsigned long *events, int n_ev)
David S. Millera72a8a52009-09-28 17:35:20 -0700933{
David S. Millere7bef6b2010-01-20 02:59:47 -0800934 u8 msk0 = 0, msk1 = 0;
935 int idx0 = 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700936
David S. Millere7bef6b2010-01-20 02:59:47 -0800937 /* This case is possible when we are invoked from
938 * hw_perf_group_sched_in().
939 */
940 if (!n_ev)
941 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700942
Peter Zijlstra15ac9a32010-09-06 15:51:45 +0200943 if (n_ev > MAX_HWEVENTS)
David S. Millere7bef6b2010-01-20 02:59:47 -0800944 return -1;
David S. Millera72a8a52009-09-28 17:35:20 -0700945
David S. Millere7bef6b2010-01-20 02:59:47 -0800946 msk0 = perf_event_get_msk(events[0]);
947 if (n_ev == 1) {
948 if (msk0 & PIC_LOWER)
949 idx0 = 1;
950 goto success;
951 }
952 BUG_ON(n_ev != 2);
953 msk1 = perf_event_get_msk(events[1]);
David S. Millera72a8a52009-09-28 17:35:20 -0700954
David S. Millere7bef6b2010-01-20 02:59:47 -0800955 /* If both events can go on any counter, OK. */
956 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
957 msk1 == (PIC_UPPER | PIC_LOWER))
958 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700959
David S. Millere7bef6b2010-01-20 02:59:47 -0800960 /* If one event is limited to a specific counter,
961 * and the other can go on both, OK.
962 */
963 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
964 msk1 == (PIC_UPPER | PIC_LOWER)) {
965 if (msk0 & PIC_LOWER)
966 idx0 = 1;
967 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700968 }
969
David S. Millere7bef6b2010-01-20 02:59:47 -0800970 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
971 msk0 == (PIC_UPPER | PIC_LOWER)) {
972 if (msk1 & PIC_UPPER)
973 idx0 = 1;
974 goto success;
975 }
976
977 /* If the events are fixed to different counters, OK. */
978 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
979 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
980 if (msk0 & PIC_LOWER)
981 idx0 = 1;
982 goto success;
983 }
984
985 /* Otherwise, there is a conflict. */
David S. Millera72a8a52009-09-28 17:35:20 -0700986 return -1;
David S. Millere7bef6b2010-01-20 02:59:47 -0800987
988success:
989 evts[0]->hw.idx = idx0;
990 if (n_ev == 2)
991 evts[1]->hw.idx = idx0 ^ 1;
992 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700993}
994
David S. Miller01552f72009-09-27 20:43:07 -0700995static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
996{
997 int eu = 0, ek = 0, eh = 0;
998 struct perf_event *event;
999 int i, n, first;
1000
1001 n = n_prev + n_new;
1002 if (n <= 1)
1003 return 0;
1004
1005 first = 1;
1006 for (i = 0; i < n; i++) {
1007 event = evts[i];
1008 if (first) {
1009 eu = event->attr.exclude_user;
1010 ek = event->attr.exclude_kernel;
1011 eh = event->attr.exclude_hv;
1012 first = 0;
1013 } else if (event->attr.exclude_user != eu ||
1014 event->attr.exclude_kernel != ek ||
1015 event->attr.exclude_hv != eh) {
1016 return -EAGAIN;
1017 }
1018 }
1019
1020 return 0;
1021}
1022
1023static int collect_events(struct perf_event *group, int max_count,
David S. Millere7bef6b2010-01-20 02:59:47 -08001024 struct perf_event *evts[], unsigned long *events,
1025 int *current_idx)
David S. Miller01552f72009-09-27 20:43:07 -07001026{
1027 struct perf_event *event;
1028 int n = 0;
1029
1030 if (!is_software_event(group)) {
1031 if (n >= max_count)
1032 return -1;
1033 evts[n] = group;
David S. Millere7bef6b2010-01-20 02:59:47 -08001034 events[n] = group->hw.event_base;
1035 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001036 }
1037 list_for_each_entry(event, &group->sibling_list, group_entry) {
1038 if (!is_software_event(event) &&
1039 event->state != PERF_EVENT_STATE_OFF) {
1040 if (n >= max_count)
1041 return -1;
1042 evts[n] = event;
David S. Millere7bef6b2010-01-20 02:59:47 -08001043 events[n] = event->hw.event_base;
1044 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001045 }
1046 }
1047 return n;
1048}
1049
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001050static int sparc_pmu_add(struct perf_event *event, int ef_flags)
David S. Millere7bef6b2010-01-20 02:59:47 -08001051{
1052 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1053 int n0, ret = -EAGAIN;
1054 unsigned long flags;
1055
1056 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001057 perf_pmu_disable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001058
1059 n0 = cpuc->n_events;
Peter Zijlstra15ac9a32010-09-06 15:51:45 +02001060 if (n0 >= MAX_HWEVENTS)
David S. Millere7bef6b2010-01-20 02:59:47 -08001061 goto out;
1062
1063 cpuc->event[n0] = event;
1064 cpuc->events[n0] = event->hw.event_base;
1065 cpuc->current_idx[n0] = PIC_NO_INDEX;
1066
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001067 event->hw.state = PERF_HES_UPTODATE;
1068 if (!(ef_flags & PERF_EF_START))
1069 event->hw.state |= PERF_HES_STOPPED;
1070
Lin Minga13c3af2010-04-23 13:56:33 +08001071 /*
1072 * If group events scheduling transaction was started,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001073 * skip the schedulability test here, it will be performed
Lin Minga13c3af2010-04-23 13:56:33 +08001074 * at commit time(->commit_txn) as a whole
1075 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001076 if (cpuc->group_flag & PERF_EVENT_TXN)
Lin Minga13c3af2010-04-23 13:56:33 +08001077 goto nocheck;
1078
David S. Millere7bef6b2010-01-20 02:59:47 -08001079 if (check_excludes(cpuc->event, n0, 1))
1080 goto out;
1081 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1082 goto out;
1083
Lin Minga13c3af2010-04-23 13:56:33 +08001084nocheck:
David S. Millere7bef6b2010-01-20 02:59:47 -08001085 cpuc->n_events++;
1086 cpuc->n_added++;
1087
1088 ret = 0;
1089out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001090 perf_pmu_enable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001091 local_irq_restore(flags);
1092 return ret;
1093}
1094
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001095static int sparc_pmu_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001096{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001097 struct perf_event_attr *attr = &event->attr;
David S. Miller01552f72009-09-27 20:43:07 -07001098 struct perf_event *evts[MAX_HWEVENTS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001099 struct hw_perf_event *hwc = &event->hw;
David S. Millera72a8a52009-09-28 17:35:20 -07001100 unsigned long events[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -08001101 int current_idx_dmy[MAX_HWEVENTS];
David S. Miller59abbd12009-09-10 06:28:20 -07001102 const struct perf_event_map *pmap;
David S. Miller01552f72009-09-27 20:43:07 -07001103 int n;
David S. Miller59abbd12009-09-10 06:28:20 -07001104
1105 if (atomic_read(&nmi_active) < 0)
1106 return -ENODEV;
1107
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001108 switch (attr->type) {
1109 case PERF_TYPE_HARDWARE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001110 if (attr->config >= sparc_pmu->max_events)
1111 return -EINVAL;
1112 pmap = sparc_pmu->event_map(attr->config);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001113 break;
1114
1115 case PERF_TYPE_HW_CACHE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001116 pmap = sparc_map_cache_event(attr->config);
1117 if (IS_ERR(pmap))
1118 return PTR_ERR(pmap);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001119 break;
1120
1121 case PERF_TYPE_RAW:
Ingo Molnard0303d72010-09-23 08:02:09 +02001122 pmap = NULL;
1123 break;
David S. Miller59abbd12009-09-10 06:28:20 -07001124
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001125 default:
1126 return -ENOENT;
1127
1128 }
1129
David S. Millerb343ae52010-09-12 17:20:24 -07001130 if (pmap) {
1131 hwc->event_base = perf_event_encode(pmap);
1132 } else {
Ingo Molnard0303d72010-09-23 08:02:09 +02001133 /*
1134 * User gives us "(encoding << 16) | pic_mask" for
David S. Millerb343ae52010-09-12 17:20:24 -07001135 * PERF_TYPE_RAW events.
1136 */
1137 hwc->event_base = attr->config;
1138 }
1139
David S. Millere7bef6b2010-01-20 02:59:47 -08001140 /* We save the enable bits in the config_base. */
David S. Miller496c07e2009-09-10 07:10:59 -07001141 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001142 if (!attr->exclude_user)
1143 hwc->config_base |= PCR_UTRACE;
1144 if (!attr->exclude_kernel)
1145 hwc->config_base |= PCR_STRACE;
David S. Miller91b92862009-09-10 07:09:06 -07001146 if (!attr->exclude_hv)
1147 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001148
David S. Miller01552f72009-09-27 20:43:07 -07001149 n = 0;
1150 if (event->group_leader != event) {
1151 n = collect_events(event->group_leader,
Peter Zijlstra15ac9a32010-09-06 15:51:45 +02001152 MAX_HWEVENTS - 1,
David S. Millere7bef6b2010-01-20 02:59:47 -08001153 evts, events, current_idx_dmy);
David S. Miller01552f72009-09-27 20:43:07 -07001154 if (n < 0)
1155 return -EINVAL;
1156 }
David S. Millera72a8a52009-09-28 17:35:20 -07001157 events[n] = hwc->event_base;
David S. Miller01552f72009-09-27 20:43:07 -07001158 evts[n] = event;
1159
1160 if (check_excludes(evts, n, 1))
1161 return -EINVAL;
1162
David S. Millere7bef6b2010-01-20 02:59:47 -08001163 if (sparc_check_constraints(evts, events, n + 1))
David S. Millera72a8a52009-09-28 17:35:20 -07001164 return -EINVAL;
1165
David S. Millere7bef6b2010-01-20 02:59:47 -08001166 hwc->idx = PIC_NO_INDEX;
1167
David S. Miller01552f72009-09-27 20:43:07 -07001168 /* Try to do all error checking before this point, as unwinding
1169 * state after grabbing the PMC is difficult.
1170 */
1171 perf_event_grab_pmc();
1172 event->destroy = hw_perf_event_destroy;
1173
David S. Miller59abbd12009-09-10 06:28:20 -07001174 if (!hwc->sample_period) {
1175 hwc->sample_period = MAX_PERIOD;
1176 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001177 local64_set(&hwc->period_left, hwc->sample_period);
David S. Miller59abbd12009-09-10 06:28:20 -07001178 }
1179
David S. Miller59abbd12009-09-10 06:28:20 -07001180 return 0;
1181}
1182
Lin Minga13c3af2010-04-23 13:56:33 +08001183/*
1184 * Start group events scheduling transaction
1185 * Set the flag to make pmu::enable() not perform the
1186 * schedulability test, it will be performed at commit time
1187 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001188static void sparc_pmu_start_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001189{
1190 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1191
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001192 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001193 cpuhw->group_flag |= PERF_EVENT_TXN;
Lin Minga13c3af2010-04-23 13:56:33 +08001194}
1195
1196/*
1197 * Stop group events scheduling transaction
1198 * Clear the flag and pmu::enable() will perform the
1199 * schedulability test.
1200 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001201static void sparc_pmu_cancel_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001202{
1203 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1204
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001205 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001206 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001207}
1208
1209/*
1210 * Commit group events scheduling transaction
1211 * Perform the group schedulability test as a whole
1212 * Return 0 if success
1213 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001214static int sparc_pmu_commit_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001215{
1216 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1217 int n;
1218
1219 if (!sparc_pmu)
1220 return -EINVAL;
1221
1222 cpuc = &__get_cpu_var(cpu_hw_events);
1223 n = cpuc->n_events;
1224 if (check_excludes(cpuc->event, 0, n))
1225 return -EINVAL;
1226 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1227 return -EAGAIN;
1228
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001229 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001230 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001231 return 0;
1232}
1233
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001234static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001235 .pmu_enable = sparc_pmu_enable,
1236 .pmu_disable = sparc_pmu_disable,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001237 .event_init = sparc_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001238 .add = sparc_pmu_add,
1239 .del = sparc_pmu_del,
1240 .start = sparc_pmu_start,
1241 .stop = sparc_pmu_stop,
David S. Miller59abbd12009-09-10 06:28:20 -07001242 .read = sparc_pmu_read,
Lin Minga13c3af2010-04-23 13:56:33 +08001243 .start_txn = sparc_pmu_start_txn,
1244 .cancel_txn = sparc_pmu_cancel_txn,
1245 .commit_txn = sparc_pmu_commit_txn,
David S. Miller59abbd12009-09-10 06:28:20 -07001246};
1247
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001248void perf_event_print_debug(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001249{
1250 unsigned long flags;
1251 u64 pcr, pic;
1252 int cpu;
1253
1254 if (!sparc_pmu)
1255 return;
1256
1257 local_irq_save(flags);
1258
1259 cpu = smp_processor_id();
1260
1261 pcr = pcr_ops->read();
1262 read_pic(pic);
1263
1264 pr_info("\n");
1265 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1266 cpu, pcr, pic);
1267
1268 local_irq_restore(flags);
1269}
1270
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001271static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
David S. Millerd29862f2009-09-28 17:37:12 -07001272 unsigned long cmd, void *__args)
David S. Miller59abbd12009-09-10 06:28:20 -07001273{
1274 struct die_args *args = __args;
1275 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001276 struct cpu_hw_events *cpuc;
David S. Miller59abbd12009-09-10 06:28:20 -07001277 struct pt_regs *regs;
David S. Millere7bef6b2010-01-20 02:59:47 -08001278 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001279
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001280 if (!atomic_read(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001281 return NOTIFY_DONE;
1282
1283 switch (cmd) {
1284 case DIE_NMI:
1285 break;
1286
1287 default:
1288 return NOTIFY_DONE;
1289 }
1290
1291 regs = args->regs;
1292
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001293 perf_sample_data_init(&data, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001294
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001295 cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere04ed382010-01-04 23:16:03 -08001296
1297 /* If the PMU has the TOE IRQ enable bits, we need to do a
1298 * dummy write to the %pcr to clear the overflow bits and thus
1299 * the interrupt.
1300 *
1301 * Do this before we peek at the counters to determine
1302 * overflow so we don't lose any events.
1303 */
1304 if (sparc_pmu->irq_bit)
1305 pcr_ops->write(cpuc->pcr);
1306
David S. Millere7bef6b2010-01-20 02:59:47 -08001307 for (i = 0; i < cpuc->n_events; i++) {
1308 struct perf_event *event = cpuc->event[i];
1309 int idx = cpuc->current_idx[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001310 struct hw_perf_event *hwc;
David S. Miller59abbd12009-09-10 06:28:20 -07001311 u64 val;
1312
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001313 hwc = &event->hw;
1314 val = sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001315 if (val & (1ULL << 31))
1316 continue;
1317
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001318 data.period = event->hw.last_period;
1319 if (!sparc_perf_event_set_period(event, hwc, idx))
David S. Miller59abbd12009-09-10 06:28:20 -07001320 continue;
1321
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001322 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001323 sparc_pmu_stop(event, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001324 }
1325
1326 return NOTIFY_STOP;
1327}
1328
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001329static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1330 .notifier_call = perf_event_nmi_handler,
David S. Miller59abbd12009-09-10 06:28:20 -07001331};
1332
1333static bool __init supported_pmu(void)
1334{
David S. Miller28e8f9b2009-09-26 20:54:22 -07001335 if (!strcmp(sparc_pmu_type, "ultra3") ||
1336 !strcmp(sparc_pmu_type, "ultra3+") ||
1337 !strcmp(sparc_pmu_type, "ultra3i") ||
1338 !strcmp(sparc_pmu_type, "ultra4+")) {
1339 sparc_pmu = &ultra3_pmu;
David S. Miller59abbd12009-09-10 06:28:20 -07001340 return true;
1341 }
David S. Miller7eebda62009-09-26 21:23:41 -07001342 if (!strcmp(sparc_pmu_type, "niagara")) {
1343 sparc_pmu = &niagara1_pmu;
1344 return true;
1345 }
David S. Millerb73d8842009-09-10 07:22:18 -07001346 if (!strcmp(sparc_pmu_type, "niagara2")) {
1347 sparc_pmu = &niagara2_pmu;
1348 return true;
1349 }
David S. Miller59abbd12009-09-10 06:28:20 -07001350 return false;
1351}
1352
Peter Zijlstra004417a2010-11-25 18:38:29 +01001353int __init init_hw_perf_events(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001354{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001355 pr_info("Performance events: ");
David S. Miller59abbd12009-09-10 06:28:20 -07001356
1357 if (!supported_pmu()) {
1358 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001359 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001360 }
1361
1362 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1363
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001364 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001365 register_die_notifier(&perf_event_nmi_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001366
1367 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001368}
Ingo Molnarefc70d22010-12-10 00:27:23 +01001369early_initcall(init_hw_perf_events);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001370
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001371void perf_callchain_kernel(struct perf_callchain_entry *entry,
1372 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001373{
1374 unsigned long ksp, fp;
David S. Miller667f0ce2010-04-21 03:08:11 -07001375#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1376 int graph = 0;
1377#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001378
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001379 stack_trace_flush();
1380
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001381 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001382
1383 ksp = regs->u_regs[UREG_I6];
1384 fp = ksp + STACK_BIAS;
1385 do {
1386 struct sparc_stackf *sf;
1387 struct pt_regs *regs;
1388 unsigned long pc;
1389
1390 if (!kstack_valid(current_thread_info(), fp))
1391 break;
1392
1393 sf = (struct sparc_stackf *) fp;
1394 regs = (struct pt_regs *) (sf + 1);
1395
1396 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1397 if (user_mode(regs))
1398 break;
1399 pc = regs->tpc;
1400 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1401 } else {
1402 pc = sf->callers_pc;
1403 fp = (unsigned long)sf->fp + STACK_BIAS;
1404 }
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001405 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001406#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1407 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1408 int index = current->curr_ret_stack;
1409 if (current->ret_stack && index >= graph) {
1410 pc = current->ret_stack[index - graph].ret;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001411 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001412 graph++;
1413 }
1414 }
1415#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001416 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1417}
1418
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001419static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1420 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001421{
1422 unsigned long ufp;
1423
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001424 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001425
1426 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1427 do {
1428 struct sparc_stackf *usf, sf;
1429 unsigned long pc;
1430
1431 usf = (struct sparc_stackf *) ufp;
1432 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1433 break;
1434
1435 pc = sf.callers_pc;
1436 ufp = (unsigned long)sf.fp + STACK_BIAS;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001437 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001438 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1439}
1440
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001441static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1442 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001443{
1444 unsigned long ufp;
1445
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001446 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001447
David S. Miller9e8307e2010-03-29 13:08:52 -07001448 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
David S. Miller4f6dbe42010-01-19 00:26:13 -08001449 do {
1450 struct sparc_stackf32 *usf, sf;
1451 unsigned long pc;
1452
1453 usf = (struct sparc_stackf32 *) ufp;
1454 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1455 break;
1456
1457 pc = sf.callers_pc;
1458 ufp = (unsigned long)sf.fp;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001459 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001460 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1461}
1462
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001463void
1464perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001465{
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001466 flushw_user();
1467 if (test_thread_flag(TIF_32BIT))
1468 perf_callchain_user_32(entry, regs);
1469 else
1470 perf_callchain_user_64(entry, regs);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001471}