blob: 37cae676536c0d0d2c2c7734ddaaaf0c361e81cc [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/* Performance event support for sparc64.
David S. Miller59abbd12009-09-10 06:28:20 -07002 *
David S. Miller4f6dbe42010-01-19 00:26:13 -08003 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
David S. Miller59abbd12009-09-10 06:28:20 -07004 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * This code is based almost entirely upon the x86 perf event
David S. Miller59abbd12009-09-10 06:28:20 -07006 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
David S. Miller59abbd12009-09-10 06:28:20 -070016#include <linux/kprobes.h>
David S. Miller667f0ce2010-04-21 03:08:11 -070017#include <linux/ftrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070018#include <linux/kernel.h>
19#include <linux/kdebug.h>
20#include <linux/mutex.h>
21
David S. Miller4f6dbe42010-01-19 00:26:13 -080022#include <asm/stacktrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070023#include <asm/cpudata.h>
David S. Miller4f6dbe42010-01-19 00:26:13 -080024#include <asm/uaccess.h>
David S. Miller59abbd12009-09-10 06:28:20 -070025#include <asm/atomic.h>
26#include <asm/nmi.h>
27#include <asm/pcr.h>
28
David S. Miller4f6dbe42010-01-19 00:26:13 -080029#include "kstack.h"
30
David S. Miller59abbd12009-09-10 06:28:20 -070031/* Sparc64 chips have two performance counters, 32-bits each, with
32 * overflow interrupts generated on transition from 0xffffffff to 0.
33 * The counters are accessed in one go using a 64-bit register.
34 *
35 * Both counters are controlled using a single control register. The
36 * only way to stop all sampling is to clear all of the context (user,
37 * supervisor, hypervisor) sampling enable bits. But these bits apply
38 * to both counters, thus the two counters can't be enabled/disabled
39 * individually.
40 *
41 * The control register has two event fields, one for each of the two
42 * counters. It's thus nearly impossible to have one counter going
43 * while keeping the other one stopped. Therefore it is possible to
44 * get overflow interrupts for counters not currently "in use" and
45 * that condition must be checked in the overflow interrupt handler.
46 *
47 * So we use a hack, in that we program inactive counters with the
48 * "sw_count0" and "sw_count1" events. These count how many times
49 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
50 * unusual way to encode a NOP and therefore will not trigger in
51 * normal code.
52 */
53
Ingo Molnarcdd6c482009-09-21 12:02:48 +020054#define MAX_HWEVENTS 2
David S. Miller59abbd12009-09-10 06:28:20 -070055#define MAX_PERIOD ((1UL << 32) - 1)
56
57#define PIC_UPPER_INDEX 0
58#define PIC_LOWER_INDEX 1
David S. Millere7bef6b2010-01-20 02:59:47 -080059#define PIC_NO_INDEX -1
David S. Miller59abbd12009-09-10 06:28:20 -070060
Ingo Molnarcdd6c482009-09-21 12:02:48 +020061struct cpu_hw_events {
David S. Millere7bef6b2010-01-20 02:59:47 -080062 /* Number of events currently scheduled onto this cpu.
63 * This tells how many entries in the arrays below
64 * are valid.
65 */
66 int n_events;
67
68 /* Number of new events added since the last hw_perf_disable().
69 * This works because the perf event layer always adds new
70 * events inside of a perf_{disable,enable}() sequence.
71 */
72 int n_added;
73
74 /* Array of events current scheduled on this cpu. */
75 struct perf_event *event[MAX_HWEVENTS];
76
77 /* Array of encoded longs, specifying the %pcr register
78 * encoding and the mask of PIC counters this even can
79 * be scheduled on. See perf_event_encode() et al.
80 */
81 unsigned long events[MAX_HWEVENTS];
82
83 /* The current counter index assigned to an event. When the
84 * event hasn't been programmed into the cpu yet, this will
85 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
86 * we ought to schedule the event.
87 */
88 int current_idx[MAX_HWEVENTS];
89
90 /* Software copy of %pcr register on this cpu. */
David S. Millerd1751382009-09-29 21:27:06 -070091 u64 pcr;
David S. Millere7bef6b2010-01-20 02:59:47 -080092
93 /* Enabled/disable state. */
David S. Millerd1751382009-09-29 21:27:06 -070094 int enabled;
Lin Minga13c3af2010-04-23 13:56:33 +080095
96 unsigned int group_flag;
David S. Miller59abbd12009-09-10 06:28:20 -070097};
Ingo Molnarcdd6c482009-09-21 12:02:48 +020098DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
David S. Miller59abbd12009-09-10 06:28:20 -070099
David S. Millere7bef6b2010-01-20 02:59:47 -0800100/* An event map describes the characteristics of a performance
101 * counter event. In particular it gives the encoding as well as
102 * a mask telling which counters the event can be measured on.
103 */
David S. Miller59abbd12009-09-10 06:28:20 -0700104struct perf_event_map {
105 u16 encoding;
106 u8 pic_mask;
107#define PIC_NONE 0x00
108#define PIC_UPPER 0x01
109#define PIC_LOWER 0x02
110};
111
David S. Millere7bef6b2010-01-20 02:59:47 -0800112/* Encode a perf_event_map entry into a long. */
David S. Millera72a8a52009-09-28 17:35:20 -0700113static unsigned long perf_event_encode(const struct perf_event_map *pmap)
114{
115 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
116}
117
David S. Millere7bef6b2010-01-20 02:59:47 -0800118static u8 perf_event_get_msk(unsigned long val)
David S. Millera72a8a52009-09-28 17:35:20 -0700119{
David S. Millere7bef6b2010-01-20 02:59:47 -0800120 return val & 0xff;
121}
122
123static u64 perf_event_get_enc(unsigned long val)
124{
125 return val >> 16;
David S. Millera72a8a52009-09-28 17:35:20 -0700126}
127
David S. Miller2ce4da22009-09-26 20:42:10 -0700128#define C(x) PERF_COUNT_HW_CACHE_##x
129
130#define CACHE_OP_UNSUPPORTED 0xfffe
131#define CACHE_OP_NONSENSE 0xffff
132
133typedef struct perf_event_map cache_map_t
134 [PERF_COUNT_HW_CACHE_MAX]
135 [PERF_COUNT_HW_CACHE_OP_MAX]
136 [PERF_COUNT_HW_CACHE_RESULT_MAX];
137
David S. Miller59abbd12009-09-10 06:28:20 -0700138struct sparc_pmu {
139 const struct perf_event_map *(*event_map)(int);
David S. Miller2ce4da22009-09-26 20:42:10 -0700140 const cache_map_t *cache_map;
David S. Miller59abbd12009-09-10 06:28:20 -0700141 int max_events;
142 int upper_shift;
143 int lower_shift;
144 int event_mask;
David S. Miller91b92862009-09-10 07:09:06 -0700145 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -0700146 int irq_bit;
David S. Miller660d1372009-09-10 07:13:26 -0700147 int upper_nop;
148 int lower_nop;
David S. Miller59abbd12009-09-10 06:28:20 -0700149};
150
David S. Miller28e8f9b2009-09-26 20:54:22 -0700151static const struct perf_event_map ultra3_perfmon_event_map[] = {
David S. Miller59abbd12009-09-10 06:28:20 -0700152 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
153 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
154 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
155 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
156};
157
David S. Miller28e8f9b2009-09-26 20:54:22 -0700158static const struct perf_event_map *ultra3_event_map(int event_id)
David S. Miller59abbd12009-09-10 06:28:20 -0700159{
David S. Miller28e8f9b2009-09-26 20:54:22 -0700160 return &ultra3_perfmon_event_map[event_id];
David S. Miller59abbd12009-09-10 06:28:20 -0700161}
162
David S. Miller28e8f9b2009-09-26 20:54:22 -0700163static const cache_map_t ultra3_cache_map = {
David S. Miller2ce4da22009-09-26 20:42:10 -0700164[C(L1D)] = {
165 [C(OP_READ)] = {
166 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
167 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
168 },
169 [C(OP_WRITE)] = {
170 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
171 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
172 },
173 [C(OP_PREFETCH)] = {
174 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
175 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
176 },
177},
178[C(L1I)] = {
179 [C(OP_READ)] = {
180 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
181 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
182 },
183 [ C(OP_WRITE) ] = {
184 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
185 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
186 },
187 [ C(OP_PREFETCH) ] = {
188 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
189 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
190 },
191},
192[C(LL)] = {
193 [C(OP_READ)] = {
194 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
195 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
196 },
197 [C(OP_WRITE)] = {
198 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
199 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
200 },
201 [C(OP_PREFETCH)] = {
202 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
203 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
204 },
205},
206[C(DTLB)] = {
207 [C(OP_READ)] = {
208 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
209 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
210 },
211 [ C(OP_WRITE) ] = {
212 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
213 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
214 },
215 [ C(OP_PREFETCH) ] = {
216 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
217 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
218 },
219},
220[C(ITLB)] = {
221 [C(OP_READ)] = {
222 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
223 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
224 },
225 [ C(OP_WRITE) ] = {
226 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
227 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
228 },
229 [ C(OP_PREFETCH) ] = {
230 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
231 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
232 },
233},
234[C(BPU)] = {
235 [C(OP_READ)] = {
236 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
237 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
238 },
239 [ C(OP_WRITE) ] = {
240 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
241 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
242 },
243 [ C(OP_PREFETCH) ] = {
244 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
245 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
246 },
247},
248};
249
David S. Miller28e8f9b2009-09-26 20:54:22 -0700250static const struct sparc_pmu ultra3_pmu = {
251 .event_map = ultra3_event_map,
252 .cache_map = &ultra3_cache_map,
253 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
David S. Miller59abbd12009-09-10 06:28:20 -0700254 .upper_shift = 11,
255 .lower_shift = 4,
256 .event_mask = 0x3f,
David S. Miller660d1372009-09-10 07:13:26 -0700257 .upper_nop = 0x1c,
258 .lower_nop = 0x14,
David S. Miller59abbd12009-09-10 06:28:20 -0700259};
260
David S. Miller7eebda62009-09-26 21:23:41 -0700261/* Niagara1 is very limited. The upper PIC is hard-locked to count
262 * only instructions, so it is free running which creates all kinds of
David S. Miller6e804252009-09-29 15:10:23 -0700263 * problems. Some hardware designs make one wonder if the creator
David S. Miller7eebda62009-09-26 21:23:41 -0700264 * even looked at how this stuff gets used by software.
265 */
266static const struct perf_event_map niagara1_perfmon_event_map[] = {
267 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
268 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
269 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
270 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
271};
272
273static const struct perf_event_map *niagara1_event_map(int event_id)
274{
275 return &niagara1_perfmon_event_map[event_id];
276}
277
278static const cache_map_t niagara1_cache_map = {
279[C(L1D)] = {
280 [C(OP_READ)] = {
281 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
282 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
283 },
284 [C(OP_WRITE)] = {
285 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
286 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
287 },
288 [C(OP_PREFETCH)] = {
289 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
290 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
291 },
292},
293[C(L1I)] = {
294 [C(OP_READ)] = {
295 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
296 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
297 },
298 [ C(OP_WRITE) ] = {
299 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
300 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
301 },
302 [ C(OP_PREFETCH) ] = {
303 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
304 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
305 },
306},
307[C(LL)] = {
308 [C(OP_READ)] = {
309 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
310 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
311 },
312 [C(OP_WRITE)] = {
313 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
314 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
315 },
316 [C(OP_PREFETCH)] = {
317 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
318 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
319 },
320},
321[C(DTLB)] = {
322 [C(OP_READ)] = {
323 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
324 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
325 },
326 [ C(OP_WRITE) ] = {
327 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
328 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
329 },
330 [ C(OP_PREFETCH) ] = {
331 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
332 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
333 },
334},
335[C(ITLB)] = {
336 [C(OP_READ)] = {
337 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
338 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
339 },
340 [ C(OP_WRITE) ] = {
341 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
342 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
343 },
344 [ C(OP_PREFETCH) ] = {
345 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
346 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
347 },
348},
349[C(BPU)] = {
350 [C(OP_READ)] = {
351 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
352 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
353 },
354 [ C(OP_WRITE) ] = {
355 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
356 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
357 },
358 [ C(OP_PREFETCH) ] = {
359 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
360 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
361 },
362},
363};
364
365static const struct sparc_pmu niagara1_pmu = {
366 .event_map = niagara1_event_map,
367 .cache_map = &niagara1_cache_map,
368 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
369 .upper_shift = 0,
370 .lower_shift = 4,
371 .event_mask = 0x7,
372 .upper_nop = 0x0,
373 .lower_nop = 0x0,
374};
375
David S. Millerb73d8842009-09-10 07:22:18 -0700376static const struct perf_event_map niagara2_perfmon_event_map[] = {
377 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
378 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
379 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
380 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
381 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
382 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
383};
384
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200385static const struct perf_event_map *niagara2_event_map(int event_id)
David S. Millerb73d8842009-09-10 07:22:18 -0700386{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200387 return &niagara2_perfmon_event_map[event_id];
David S. Millerb73d8842009-09-10 07:22:18 -0700388}
389
David S. Millerd0b86482009-09-26 21:04:16 -0700390static const cache_map_t niagara2_cache_map = {
391[C(L1D)] = {
392 [C(OP_READ)] = {
393 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
394 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
395 },
396 [C(OP_WRITE)] = {
397 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
398 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
399 },
400 [C(OP_PREFETCH)] = {
401 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
402 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
403 },
404},
405[C(L1I)] = {
406 [C(OP_READ)] = {
407 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
408 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
409 },
410 [ C(OP_WRITE) ] = {
411 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
412 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
413 },
414 [ C(OP_PREFETCH) ] = {
415 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
416 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
417 },
418},
419[C(LL)] = {
420 [C(OP_READ)] = {
421 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
422 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
423 },
424 [C(OP_WRITE)] = {
425 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
426 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
427 },
428 [C(OP_PREFETCH)] = {
429 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
430 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
431 },
432},
433[C(DTLB)] = {
434 [C(OP_READ)] = {
435 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
436 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
437 },
438 [ C(OP_WRITE) ] = {
439 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
440 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
441 },
442 [ C(OP_PREFETCH) ] = {
443 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
444 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
445 },
446},
447[C(ITLB)] = {
448 [C(OP_READ)] = {
449 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
450 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
451 },
452 [ C(OP_WRITE) ] = {
453 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
454 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
455 },
456 [ C(OP_PREFETCH) ] = {
457 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
458 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
459 },
460},
461[C(BPU)] = {
462 [C(OP_READ)] = {
463 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
464 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
465 },
466 [ C(OP_WRITE) ] = {
467 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
468 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
469 },
470 [ C(OP_PREFETCH) ] = {
471 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
472 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
473 },
474},
475};
476
David S. Millerb73d8842009-09-10 07:22:18 -0700477static const struct sparc_pmu niagara2_pmu = {
478 .event_map = niagara2_event_map,
David S. Millerd0b86482009-09-26 21:04:16 -0700479 .cache_map = &niagara2_cache_map,
David S. Millerb73d8842009-09-10 07:22:18 -0700480 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
481 .upper_shift = 19,
482 .lower_shift = 6,
483 .event_mask = 0xfff,
484 .hv_bit = 0x8,
David S. Millerde23cf32009-10-09 00:42:40 -0700485 .irq_bit = 0x30,
David S. Millerb73d8842009-09-10 07:22:18 -0700486 .upper_nop = 0x220,
487 .lower_nop = 0x220,
488};
489
David S. Miller59abbd12009-09-10 06:28:20 -0700490static const struct sparc_pmu *sparc_pmu __read_mostly;
491
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200492static u64 event_encoding(u64 event_id, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700493{
494 if (idx == PIC_UPPER_INDEX)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200495 event_id <<= sparc_pmu->upper_shift;
David S. Miller59abbd12009-09-10 06:28:20 -0700496 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200497 event_id <<= sparc_pmu->lower_shift;
498 return event_id;
David S. Miller59abbd12009-09-10 06:28:20 -0700499}
500
501static u64 mask_for_index(int idx)
502{
503 return event_encoding(sparc_pmu->event_mask, idx);
504}
505
506static u64 nop_for_index(int idx)
507{
508 return event_encoding(idx == PIC_UPPER_INDEX ?
David S. Miller660d1372009-09-10 07:13:26 -0700509 sparc_pmu->upper_nop :
510 sparc_pmu->lower_nop, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700511}
512
David S. Millerd1751382009-09-29 21:27:06 -0700513static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700514{
515 u64 val, mask = mask_for_index(idx);
516
David S. Millerd1751382009-09-29 21:27:06 -0700517 val = cpuc->pcr;
518 val &= ~mask;
519 val |= hwc->config;
520 cpuc->pcr = val;
521
522 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700523}
524
David S. Millerd1751382009-09-29 21:27:06 -0700525static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700526{
527 u64 mask = mask_for_index(idx);
528 u64 nop = nop_for_index(idx);
David S. Millerd1751382009-09-29 21:27:06 -0700529 u64 val;
David S. Miller59abbd12009-09-10 06:28:20 -0700530
David S. Millerd1751382009-09-29 21:27:06 -0700531 val = cpuc->pcr;
532 val &= ~mask;
533 val |= nop;
534 cpuc->pcr = val;
535
536 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700537}
538
David S. Miller59abbd12009-09-10 06:28:20 -0700539static u32 read_pmc(int idx)
540{
541 u64 val;
542
543 read_pic(val);
544 if (idx == PIC_UPPER_INDEX)
545 val >>= 32;
546
547 return val & 0xffffffff;
548}
549
550static void write_pmc(int idx, u64 val)
551{
552 u64 shift, mask, pic;
553
554 shift = 0;
555 if (idx == PIC_UPPER_INDEX)
556 shift = 32;
557
558 mask = ((u64) 0xffffffff) << shift;
559 val <<= shift;
560
561 read_pic(pic);
562 pic &= ~mask;
563 pic |= val;
564 write_pic(pic);
565}
566
David S. Millere7bef6b2010-01-20 02:59:47 -0800567static u64 sparc_perf_event_update(struct perf_event *event,
568 struct hw_perf_event *hwc, int idx)
569{
570 int shift = 64 - 32;
571 u64 prev_raw_count, new_raw_count;
572 s64 delta;
573
574again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200575 prev_raw_count = local64_read(&hwc->prev_count);
David S. Millere7bef6b2010-01-20 02:59:47 -0800576 new_raw_count = read_pmc(idx);
577
Peter Zijlstrae7850592010-05-21 14:43:08 +0200578 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800579 new_raw_count) != prev_raw_count)
580 goto again;
581
582 delta = (new_raw_count << shift) - (prev_raw_count << shift);
583 delta >>= shift;
584
Peter Zijlstrae7850592010-05-21 14:43:08 +0200585 local64_add(delta, &event->count);
586 local64_sub(delta, &hwc->period_left);
David S. Millere7bef6b2010-01-20 02:59:47 -0800587
588 return new_raw_count;
589}
590
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200591static int sparc_perf_event_set_period(struct perf_event *event,
David S. Millerd29862f2009-09-28 17:37:12 -0700592 struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700593{
Peter Zijlstrae7850592010-05-21 14:43:08 +0200594 s64 left = local64_read(&hwc->period_left);
David S. Miller59abbd12009-09-10 06:28:20 -0700595 s64 period = hwc->sample_period;
596 int ret = 0;
597
598 if (unlikely(left <= -period)) {
599 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200600 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700601 hwc->last_period = period;
602 ret = 1;
603 }
604
605 if (unlikely(left <= 0)) {
606 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200607 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700608 hwc->last_period = period;
609 ret = 1;
610 }
611 if (left > MAX_PERIOD)
612 left = MAX_PERIOD;
613
Peter Zijlstrae7850592010-05-21 14:43:08 +0200614 local64_set(&hwc->prev_count, (u64)-left);
David S. Miller59abbd12009-09-10 06:28:20 -0700615
616 write_pmc(idx, (u64)(-left) & 0xffffffff);
617
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200618 perf_event_update_userpage(event);
David S. Miller59abbd12009-09-10 06:28:20 -0700619
620 return ret;
621}
622
David S. Millere7bef6b2010-01-20 02:59:47 -0800623/* If performance event entries have been added, move existing
624 * events around (if necessary) and then assign new entries to
625 * counters.
626 */
627static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
David S. Miller59abbd12009-09-10 06:28:20 -0700628{
David S. Millere7bef6b2010-01-20 02:59:47 -0800629 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700630
David S. Millere7bef6b2010-01-20 02:59:47 -0800631 if (!cpuc->n_added)
632 goto out;
David S. Miller59abbd12009-09-10 06:28:20 -0700633
David S. Millere7bef6b2010-01-20 02:59:47 -0800634 /* Read in the counters which are moving. */
635 for (i = 0; i < cpuc->n_events; i++) {
636 struct perf_event *cp = cpuc->event[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700637
David S. Millere7bef6b2010-01-20 02:59:47 -0800638 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
639 cpuc->current_idx[i] != cp->hw.idx) {
640 sparc_perf_event_update(cp, &cp->hw,
641 cpuc->current_idx[i]);
642 cpuc->current_idx[i] = PIC_NO_INDEX;
643 }
644 }
David S. Miller59abbd12009-09-10 06:28:20 -0700645
David S. Millere7bef6b2010-01-20 02:59:47 -0800646 /* Assign to counters all unassigned events. */
647 for (i = 0; i < cpuc->n_events; i++) {
648 struct perf_event *cp = cpuc->event[i];
649 struct hw_perf_event *hwc = &cp->hw;
650 int idx = hwc->idx;
651 u64 enc;
652
653 if (cpuc->current_idx[i] != PIC_NO_INDEX)
654 continue;
655
656 sparc_perf_event_set_period(cp, hwc, idx);
657 cpuc->current_idx[i] = idx;
658
659 enc = perf_event_get_enc(cpuc->events[i]);
David S. Millerb7d45c32010-06-23 11:39:02 -0700660 pcr &= ~mask_for_index(idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800661 pcr |= event_encoding(enc, idx);
662 }
663out:
664 return pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700665}
666
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200667static void sparc_pmu_pmu_enable(struct pmu *pmu)
David S. Miller59abbd12009-09-10 06:28:20 -0700668{
David S. Millere7bef6b2010-01-20 02:59:47 -0800669 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
670 u64 pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700671
David S. Millere7bef6b2010-01-20 02:59:47 -0800672 if (cpuc->enabled)
673 return;
David S. Miller59abbd12009-09-10 06:28:20 -0700674
David S. Millere7bef6b2010-01-20 02:59:47 -0800675 cpuc->enabled = 1;
676 barrier();
David S. Miller59abbd12009-09-10 06:28:20 -0700677
David S. Millere7bef6b2010-01-20 02:59:47 -0800678 pcr = cpuc->pcr;
679 if (!cpuc->n_events) {
680 pcr = 0;
681 } else {
682 pcr = maybe_change_configuration(cpuc, pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700683
David S. Millere7bef6b2010-01-20 02:59:47 -0800684 /* We require that all of the events have the same
685 * configuration, so just fetch the settings from the
686 * first entry.
687 */
688 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
689 }
David S. Miller59abbd12009-09-10 06:28:20 -0700690
David S. Millere7bef6b2010-01-20 02:59:47 -0800691 pcr_ops->write(cpuc->pcr);
692}
693
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200694static void sparc_pmu_pmu_disable(struct pmu *pmu)
David S. Millere7bef6b2010-01-20 02:59:47 -0800695{
696 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
697 u64 val;
698
699 if (!cpuc->enabled)
700 return;
701
702 cpuc->enabled = 0;
703 cpuc->n_added = 0;
704
705 val = cpuc->pcr;
706 val &= ~(PCR_UTRACE | PCR_STRACE |
707 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
708 cpuc->pcr = val;
709
710 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700711}
712
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200713static void sparc_pmu_disable(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700714{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200715 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
716 struct hw_perf_event *hwc = &event->hw;
David S. Millere7bef6b2010-01-20 02:59:47 -0800717 unsigned long flags;
718 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700719
David S. Millere7bef6b2010-01-20 02:59:47 -0800720 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200721 perf_pmu_disable(event->pmu);
David S. Miller59abbd12009-09-10 06:28:20 -0700722
David S. Millere7bef6b2010-01-20 02:59:47 -0800723 for (i = 0; i < cpuc->n_events; i++) {
724 if (event == cpuc->event[i]) {
725 int idx = cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700726
David S. Millere7bef6b2010-01-20 02:59:47 -0800727 /* Shift remaining entries down into
728 * the existing slot.
729 */
730 while (++i < cpuc->n_events) {
731 cpuc->event[i - 1] = cpuc->event[i];
732 cpuc->events[i - 1] = cpuc->events[i];
733 cpuc->current_idx[i - 1] =
734 cpuc->current_idx[i];
735 }
David S. Miller59abbd12009-09-10 06:28:20 -0700736
David S. Millere7bef6b2010-01-20 02:59:47 -0800737 /* Absorb the final count and turn off the
738 * event.
739 */
740 sparc_pmu_disable_event(cpuc, hwc, idx);
741 barrier();
742 sparc_perf_event_update(event, hwc, idx);
743
744 perf_event_update_userpage(event);
745
746 cpuc->n_events--;
747 break;
748 }
749 }
750
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200751 perf_pmu_enable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -0800752 local_irq_restore(flags);
753}
754
755static int active_event_index(struct cpu_hw_events *cpuc,
756 struct perf_event *event)
757{
758 int i;
759
760 for (i = 0; i < cpuc->n_events; i++) {
761 if (cpuc->event[i] == event)
762 break;
763 }
764 BUG_ON(i == cpuc->n_events);
765 return cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700766}
767
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200768static void sparc_pmu_read(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700769{
David S. Millere7bef6b2010-01-20 02:59:47 -0800770 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
771 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200772 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700773
David S. Millere7bef6b2010-01-20 02:59:47 -0800774 sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700775}
776
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200777static void sparc_pmu_unthrottle(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700778{
David S. Millerd1751382009-09-29 21:27:06 -0700779 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere7bef6b2010-01-20 02:59:47 -0800780 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200781 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700782
David S. Millere7bef6b2010-01-20 02:59:47 -0800783 sparc_pmu_enable_event(cpuc, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700784}
785
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200786static atomic_t active_events = ATOMIC_INIT(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700787static DEFINE_MUTEX(pmc_grab_mutex);
788
David S. Millerd1751382009-09-29 21:27:06 -0700789static void perf_stop_nmi_watchdog(void *unused)
790{
791 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
792
793 stop_nmi_watchdog(NULL);
794 cpuc->pcr = pcr_ops->read();
795}
796
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200797void perf_event_grab_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700798{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200799 if (atomic_inc_not_zero(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -0700800 return;
801
802 mutex_lock(&pmc_grab_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200803 if (atomic_read(&active_events) == 0) {
David S. Miller59abbd12009-09-10 06:28:20 -0700804 if (atomic_read(&nmi_active) > 0) {
David S. Millerd1751382009-09-29 21:27:06 -0700805 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
David S. Miller59abbd12009-09-10 06:28:20 -0700806 BUG_ON(atomic_read(&nmi_active) != 0);
807 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200808 atomic_inc(&active_events);
David S. Miller59abbd12009-09-10 06:28:20 -0700809 }
810 mutex_unlock(&pmc_grab_mutex);
811}
812
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200813void perf_event_release_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700814{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200815 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
David S. Miller59abbd12009-09-10 06:28:20 -0700816 if (atomic_read(&nmi_active) == 0)
817 on_each_cpu(start_nmi_watchdog, NULL, 1);
818 mutex_unlock(&pmc_grab_mutex);
819 }
820}
821
David S. Miller2ce4da22009-09-26 20:42:10 -0700822static const struct perf_event_map *sparc_map_cache_event(u64 config)
823{
824 unsigned int cache_type, cache_op, cache_result;
825 const struct perf_event_map *pmap;
826
827 if (!sparc_pmu->cache_map)
828 return ERR_PTR(-ENOENT);
829
830 cache_type = (config >> 0) & 0xff;
831 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
832 return ERR_PTR(-EINVAL);
833
834 cache_op = (config >> 8) & 0xff;
835 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
836 return ERR_PTR(-EINVAL);
837
838 cache_result = (config >> 16) & 0xff;
839 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
840 return ERR_PTR(-EINVAL);
841
842 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
843
844 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
845 return ERR_PTR(-ENOENT);
846
847 if (pmap->encoding == CACHE_OP_NONSENSE)
848 return ERR_PTR(-EINVAL);
849
850 return pmap;
851}
852
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200853static void hw_perf_event_destroy(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700854{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200855 perf_event_release_pmc();
David S. Miller59abbd12009-09-10 06:28:20 -0700856}
857
David S. Millera72a8a52009-09-28 17:35:20 -0700858/* Make sure all events can be scheduled into the hardware at
859 * the same time. This is simplified by the fact that we only
860 * need to support 2 simultaneous HW events.
David S. Millere7bef6b2010-01-20 02:59:47 -0800861 *
862 * As a side effect, the evts[]->hw.idx values will be assigned
863 * on success. These are pending indexes. When the events are
864 * actually programmed into the chip, these values will propagate
865 * to the per-cpu cpuc->current_idx[] slots, see the code in
866 * maybe_change_configuration() for details.
David S. Millera72a8a52009-09-28 17:35:20 -0700867 */
David S. Millere7bef6b2010-01-20 02:59:47 -0800868static int sparc_check_constraints(struct perf_event **evts,
869 unsigned long *events, int n_ev)
David S. Millera72a8a52009-09-28 17:35:20 -0700870{
David S. Millere7bef6b2010-01-20 02:59:47 -0800871 u8 msk0 = 0, msk1 = 0;
872 int idx0 = 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700873
David S. Millere7bef6b2010-01-20 02:59:47 -0800874 /* This case is possible when we are invoked from
875 * hw_perf_group_sched_in().
876 */
877 if (!n_ev)
878 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700879
David S. Millere7bef6b2010-01-20 02:59:47 -0800880 if (n_ev > perf_max_events)
881 return -1;
David S. Millera72a8a52009-09-28 17:35:20 -0700882
David S. Millere7bef6b2010-01-20 02:59:47 -0800883 msk0 = perf_event_get_msk(events[0]);
884 if (n_ev == 1) {
885 if (msk0 & PIC_LOWER)
886 idx0 = 1;
887 goto success;
888 }
889 BUG_ON(n_ev != 2);
890 msk1 = perf_event_get_msk(events[1]);
David S. Millera72a8a52009-09-28 17:35:20 -0700891
David S. Millere7bef6b2010-01-20 02:59:47 -0800892 /* If both events can go on any counter, OK. */
893 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
894 msk1 == (PIC_UPPER | PIC_LOWER))
895 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700896
David S. Millere7bef6b2010-01-20 02:59:47 -0800897 /* If one event is limited to a specific counter,
898 * and the other can go on both, OK.
899 */
900 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
901 msk1 == (PIC_UPPER | PIC_LOWER)) {
902 if (msk0 & PIC_LOWER)
903 idx0 = 1;
904 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700905 }
906
David S. Millere7bef6b2010-01-20 02:59:47 -0800907 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
908 msk0 == (PIC_UPPER | PIC_LOWER)) {
909 if (msk1 & PIC_UPPER)
910 idx0 = 1;
911 goto success;
912 }
913
914 /* If the events are fixed to different counters, OK. */
915 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
916 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
917 if (msk0 & PIC_LOWER)
918 idx0 = 1;
919 goto success;
920 }
921
922 /* Otherwise, there is a conflict. */
David S. Millera72a8a52009-09-28 17:35:20 -0700923 return -1;
David S. Millere7bef6b2010-01-20 02:59:47 -0800924
925success:
926 evts[0]->hw.idx = idx0;
927 if (n_ev == 2)
928 evts[1]->hw.idx = idx0 ^ 1;
929 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700930}
931
David S. Miller01552f72009-09-27 20:43:07 -0700932static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
933{
934 int eu = 0, ek = 0, eh = 0;
935 struct perf_event *event;
936 int i, n, first;
937
938 n = n_prev + n_new;
939 if (n <= 1)
940 return 0;
941
942 first = 1;
943 for (i = 0; i < n; i++) {
944 event = evts[i];
945 if (first) {
946 eu = event->attr.exclude_user;
947 ek = event->attr.exclude_kernel;
948 eh = event->attr.exclude_hv;
949 first = 0;
950 } else if (event->attr.exclude_user != eu ||
951 event->attr.exclude_kernel != ek ||
952 event->attr.exclude_hv != eh) {
953 return -EAGAIN;
954 }
955 }
956
957 return 0;
958}
959
960static int collect_events(struct perf_event *group, int max_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800961 struct perf_event *evts[], unsigned long *events,
962 int *current_idx)
David S. Miller01552f72009-09-27 20:43:07 -0700963{
964 struct perf_event *event;
965 int n = 0;
966
967 if (!is_software_event(group)) {
968 if (n >= max_count)
969 return -1;
970 evts[n] = group;
David S. Millere7bef6b2010-01-20 02:59:47 -0800971 events[n] = group->hw.event_base;
972 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -0700973 }
974 list_for_each_entry(event, &group->sibling_list, group_entry) {
975 if (!is_software_event(event) &&
976 event->state != PERF_EVENT_STATE_OFF) {
977 if (n >= max_count)
978 return -1;
979 evts[n] = event;
David S. Millere7bef6b2010-01-20 02:59:47 -0800980 events[n] = event->hw.event_base;
981 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -0700982 }
983 }
984 return n;
985}
986
David S. Millere7bef6b2010-01-20 02:59:47 -0800987static int sparc_pmu_enable(struct perf_event *event)
988{
989 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
990 int n0, ret = -EAGAIN;
991 unsigned long flags;
992
993 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200994 perf_pmu_disable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -0800995
996 n0 = cpuc->n_events;
997 if (n0 >= perf_max_events)
998 goto out;
999
1000 cpuc->event[n0] = event;
1001 cpuc->events[n0] = event->hw.event_base;
1002 cpuc->current_idx[n0] = PIC_NO_INDEX;
1003
Lin Minga13c3af2010-04-23 13:56:33 +08001004 /*
1005 * If group events scheduling transaction was started,
1006 * skip the schedulability test here, it will be peformed
1007 * at commit time(->commit_txn) as a whole
1008 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001009 if (cpuc->group_flag & PERF_EVENT_TXN)
Lin Minga13c3af2010-04-23 13:56:33 +08001010 goto nocheck;
1011
David S. Millere7bef6b2010-01-20 02:59:47 -08001012 if (check_excludes(cpuc->event, n0, 1))
1013 goto out;
1014 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1015 goto out;
1016
Lin Minga13c3af2010-04-23 13:56:33 +08001017nocheck:
David S. Millere7bef6b2010-01-20 02:59:47 -08001018 cpuc->n_events++;
1019 cpuc->n_added++;
1020
1021 ret = 0;
1022out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001023 perf_pmu_enable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001024 local_irq_restore(flags);
1025 return ret;
1026}
1027
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001028static int sparc_pmu_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001029{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001030 struct perf_event_attr *attr = &event->attr;
David S. Miller01552f72009-09-27 20:43:07 -07001031 struct perf_event *evts[MAX_HWEVENTS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001032 struct hw_perf_event *hwc = &event->hw;
David S. Millera72a8a52009-09-28 17:35:20 -07001033 unsigned long events[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -08001034 int current_idx_dmy[MAX_HWEVENTS];
David S. Miller59abbd12009-09-10 06:28:20 -07001035 const struct perf_event_map *pmap;
David S. Miller01552f72009-09-27 20:43:07 -07001036 int n;
David S. Miller59abbd12009-09-10 06:28:20 -07001037
1038 if (atomic_read(&nmi_active) < 0)
1039 return -ENODEV;
1040
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001041 switch (attr->type) {
1042 case PERF_TYPE_HARDWARE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001043 if (attr->config >= sparc_pmu->max_events)
1044 return -EINVAL;
1045 pmap = sparc_pmu->event_map(attr->config);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001046 break;
1047
1048 case PERF_TYPE_HW_CACHE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001049 pmap = sparc_map_cache_event(attr->config);
1050 if (IS_ERR(pmap))
1051 return PTR_ERR(pmap);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001052 break;
1053
1054 case PERF_TYPE_RAW:
David S. Miller59abbd12009-09-10 06:28:20 -07001055 return -EOPNOTSUPP;
1056
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001057 default:
1058 return -ENOENT;
1059
1060 }
1061
David S. Millere7bef6b2010-01-20 02:59:47 -08001062 /* We save the enable bits in the config_base. */
David S. Miller496c07e2009-09-10 07:10:59 -07001063 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001064 if (!attr->exclude_user)
1065 hwc->config_base |= PCR_UTRACE;
1066 if (!attr->exclude_kernel)
1067 hwc->config_base |= PCR_STRACE;
David S. Miller91b92862009-09-10 07:09:06 -07001068 if (!attr->exclude_hv)
1069 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001070
David S. Millera72a8a52009-09-28 17:35:20 -07001071 hwc->event_base = perf_event_encode(pmap);
1072
David S. Miller01552f72009-09-27 20:43:07 -07001073 n = 0;
1074 if (event->group_leader != event) {
1075 n = collect_events(event->group_leader,
1076 perf_max_events - 1,
David S. Millere7bef6b2010-01-20 02:59:47 -08001077 evts, events, current_idx_dmy);
David S. Miller01552f72009-09-27 20:43:07 -07001078 if (n < 0)
1079 return -EINVAL;
1080 }
David S. Millera72a8a52009-09-28 17:35:20 -07001081 events[n] = hwc->event_base;
David S. Miller01552f72009-09-27 20:43:07 -07001082 evts[n] = event;
1083
1084 if (check_excludes(evts, n, 1))
1085 return -EINVAL;
1086
David S. Millere7bef6b2010-01-20 02:59:47 -08001087 if (sparc_check_constraints(evts, events, n + 1))
David S. Millera72a8a52009-09-28 17:35:20 -07001088 return -EINVAL;
1089
David S. Millere7bef6b2010-01-20 02:59:47 -08001090 hwc->idx = PIC_NO_INDEX;
1091
David S. Miller01552f72009-09-27 20:43:07 -07001092 /* Try to do all error checking before this point, as unwinding
1093 * state after grabbing the PMC is difficult.
1094 */
1095 perf_event_grab_pmc();
1096 event->destroy = hw_perf_event_destroy;
1097
David S. Miller59abbd12009-09-10 06:28:20 -07001098 if (!hwc->sample_period) {
1099 hwc->sample_period = MAX_PERIOD;
1100 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001101 local64_set(&hwc->period_left, hwc->sample_period);
David S. Miller59abbd12009-09-10 06:28:20 -07001102 }
1103
David S. Miller59abbd12009-09-10 06:28:20 -07001104 return 0;
1105}
1106
Lin Minga13c3af2010-04-23 13:56:33 +08001107/*
1108 * Start group events scheduling transaction
1109 * Set the flag to make pmu::enable() not perform the
1110 * schedulability test, it will be performed at commit time
1111 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001112static void sparc_pmu_start_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001113{
1114 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1115
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001116 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001117 cpuhw->group_flag |= PERF_EVENT_TXN;
Lin Minga13c3af2010-04-23 13:56:33 +08001118}
1119
1120/*
1121 * Stop group events scheduling transaction
1122 * Clear the flag and pmu::enable() will perform the
1123 * schedulability test.
1124 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001125static void sparc_pmu_cancel_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001126{
1127 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1128
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001129 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001130 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001131}
1132
1133/*
1134 * Commit group events scheduling transaction
1135 * Perform the group schedulability test as a whole
1136 * Return 0 if success
1137 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001138static int sparc_pmu_commit_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001139{
1140 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1141 int n;
1142
1143 if (!sparc_pmu)
1144 return -EINVAL;
1145
1146 cpuc = &__get_cpu_var(cpu_hw_events);
1147 n = cpuc->n_events;
1148 if (check_excludes(cpuc->event, 0, n))
1149 return -EINVAL;
1150 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1151 return -EAGAIN;
1152
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001153 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001154 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001155 return 0;
1156}
1157
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001158static struct pmu pmu = {
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001159 .pmu_enable = sparc_pmu_pmu_enable,
1160 .pmu_disable = sparc_pmu_pmu_disable,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001161 .event_init = sparc_pmu_event_init,
David S. Miller59abbd12009-09-10 06:28:20 -07001162 .enable = sparc_pmu_enable,
1163 .disable = sparc_pmu_disable,
1164 .read = sparc_pmu_read,
1165 .unthrottle = sparc_pmu_unthrottle,
Lin Minga13c3af2010-04-23 13:56:33 +08001166 .start_txn = sparc_pmu_start_txn,
1167 .cancel_txn = sparc_pmu_cancel_txn,
1168 .commit_txn = sparc_pmu_commit_txn,
David S. Miller59abbd12009-09-10 06:28:20 -07001169};
1170
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001171void perf_event_print_debug(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001172{
1173 unsigned long flags;
1174 u64 pcr, pic;
1175 int cpu;
1176
1177 if (!sparc_pmu)
1178 return;
1179
1180 local_irq_save(flags);
1181
1182 cpu = smp_processor_id();
1183
1184 pcr = pcr_ops->read();
1185 read_pic(pic);
1186
1187 pr_info("\n");
1188 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1189 cpu, pcr, pic);
1190
1191 local_irq_restore(flags);
1192}
1193
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001194static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
David S. Millerd29862f2009-09-28 17:37:12 -07001195 unsigned long cmd, void *__args)
David S. Miller59abbd12009-09-10 06:28:20 -07001196{
1197 struct die_args *args = __args;
1198 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001199 struct cpu_hw_events *cpuc;
David S. Miller59abbd12009-09-10 06:28:20 -07001200 struct pt_regs *regs;
David S. Millere7bef6b2010-01-20 02:59:47 -08001201 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001202
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001203 if (!atomic_read(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001204 return NOTIFY_DONE;
1205
1206 switch (cmd) {
1207 case DIE_NMI:
1208 break;
1209
1210 default:
1211 return NOTIFY_DONE;
1212 }
1213
1214 regs = args->regs;
1215
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001216 perf_sample_data_init(&data, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001217
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001218 cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere04ed382010-01-04 23:16:03 -08001219
1220 /* If the PMU has the TOE IRQ enable bits, we need to do a
1221 * dummy write to the %pcr to clear the overflow bits and thus
1222 * the interrupt.
1223 *
1224 * Do this before we peek at the counters to determine
1225 * overflow so we don't lose any events.
1226 */
1227 if (sparc_pmu->irq_bit)
1228 pcr_ops->write(cpuc->pcr);
1229
David S. Millere7bef6b2010-01-20 02:59:47 -08001230 for (i = 0; i < cpuc->n_events; i++) {
1231 struct perf_event *event = cpuc->event[i];
1232 int idx = cpuc->current_idx[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001233 struct hw_perf_event *hwc;
David S. Miller59abbd12009-09-10 06:28:20 -07001234 u64 val;
1235
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001236 hwc = &event->hw;
1237 val = sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001238 if (val & (1ULL << 31))
1239 continue;
1240
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001241 data.period = event->hw.last_period;
1242 if (!sparc_perf_event_set_period(event, hwc, idx))
David S. Miller59abbd12009-09-10 06:28:20 -07001243 continue;
1244
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001245 if (perf_event_overflow(event, 1, &data, regs))
David S. Millerd1751382009-09-29 21:27:06 -07001246 sparc_pmu_disable_event(cpuc, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001247 }
1248
1249 return NOTIFY_STOP;
1250}
1251
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001252static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1253 .notifier_call = perf_event_nmi_handler,
David S. Miller59abbd12009-09-10 06:28:20 -07001254};
1255
1256static bool __init supported_pmu(void)
1257{
David S. Miller28e8f9b2009-09-26 20:54:22 -07001258 if (!strcmp(sparc_pmu_type, "ultra3") ||
1259 !strcmp(sparc_pmu_type, "ultra3+") ||
1260 !strcmp(sparc_pmu_type, "ultra3i") ||
1261 !strcmp(sparc_pmu_type, "ultra4+")) {
1262 sparc_pmu = &ultra3_pmu;
David S. Miller59abbd12009-09-10 06:28:20 -07001263 return true;
1264 }
David S. Miller7eebda62009-09-26 21:23:41 -07001265 if (!strcmp(sparc_pmu_type, "niagara")) {
1266 sparc_pmu = &niagara1_pmu;
1267 return true;
1268 }
David S. Millerb73d8842009-09-10 07:22:18 -07001269 if (!strcmp(sparc_pmu_type, "niagara2")) {
1270 sparc_pmu = &niagara2_pmu;
1271 return true;
1272 }
David S. Miller59abbd12009-09-10 06:28:20 -07001273 return false;
1274}
1275
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001276void __init init_hw_perf_events(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001277{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001278 pr_info("Performance events: ");
David S. Miller59abbd12009-09-10 06:28:20 -07001279
1280 if (!supported_pmu()) {
1281 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1282 return;
1283 }
1284
1285 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1286
David S. Millere7bef6b2010-01-20 02:59:47 -08001287 /* All sparc64 PMUs currently have 2 events. */
1288 perf_max_events = 2;
David S. Miller59abbd12009-09-10 06:28:20 -07001289
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001290 perf_pmu_register(&pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001291 register_die_notifier(&perf_event_nmi_notifier);
David S. Miller59abbd12009-09-10 06:28:20 -07001292}
David S. Miller4f6dbe42010-01-19 00:26:13 -08001293
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001294void perf_callchain_kernel(struct perf_callchain_entry *entry,
1295 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001296{
1297 unsigned long ksp, fp;
David S. Miller667f0ce2010-04-21 03:08:11 -07001298#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1299 int graph = 0;
1300#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001301
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001302 stack_trace_flush();
1303
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001304 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001305
1306 ksp = regs->u_regs[UREG_I6];
1307 fp = ksp + STACK_BIAS;
1308 do {
1309 struct sparc_stackf *sf;
1310 struct pt_regs *regs;
1311 unsigned long pc;
1312
1313 if (!kstack_valid(current_thread_info(), fp))
1314 break;
1315
1316 sf = (struct sparc_stackf *) fp;
1317 regs = (struct pt_regs *) (sf + 1);
1318
1319 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1320 if (user_mode(regs))
1321 break;
1322 pc = regs->tpc;
1323 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1324 } else {
1325 pc = sf->callers_pc;
1326 fp = (unsigned long)sf->fp + STACK_BIAS;
1327 }
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001328 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001329#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1330 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1331 int index = current->curr_ret_stack;
1332 if (current->ret_stack && index >= graph) {
1333 pc = current->ret_stack[index - graph].ret;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001334 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001335 graph++;
1336 }
1337 }
1338#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001339 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1340}
1341
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001342static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1343 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001344{
1345 unsigned long ufp;
1346
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001347 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001348
1349 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1350 do {
1351 struct sparc_stackf *usf, sf;
1352 unsigned long pc;
1353
1354 usf = (struct sparc_stackf *) ufp;
1355 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1356 break;
1357
1358 pc = sf.callers_pc;
1359 ufp = (unsigned long)sf.fp + STACK_BIAS;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001360 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001361 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1362}
1363
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001364static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1365 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001366{
1367 unsigned long ufp;
1368
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001369 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001370
David S. Miller9e8307e2010-03-29 13:08:52 -07001371 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
David S. Miller4f6dbe42010-01-19 00:26:13 -08001372 do {
1373 struct sparc_stackf32 *usf, sf;
1374 unsigned long pc;
1375
1376 usf = (struct sparc_stackf32 *) ufp;
1377 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1378 break;
1379
1380 pc = sf.callers_pc;
1381 ufp = (unsigned long)sf.fp;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001382 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001383 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1384}
1385
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001386void
1387perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001388{
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001389 flushw_user();
1390 if (test_thread_flag(TIF_32BIT))
1391 perf_callchain_user_32(entry, regs);
1392 else
1393 perf_callchain_user_64(entry, regs);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001394}