blob: 602eca8ef8c436f04a90dd9ac0b4540cf0bed4e8 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/* Performance event support for sparc64.
David S. Miller59abbd12009-09-10 06:28:20 -07002 *
David S. Miller4f6dbe42010-01-19 00:26:13 -08003 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
David S. Miller59abbd12009-09-10 06:28:20 -07004 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * This code is based almost entirely upon the x86 perf event
David S. Miller59abbd12009-09-10 06:28:20 -07006 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
David S. Miller59abbd12009-09-10 06:28:20 -070016#include <linux/kprobes.h>
David S. Miller667f0ce2010-04-21 03:08:11 -070017#include <linux/ftrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070018#include <linux/kernel.h>
19#include <linux/kdebug.h>
20#include <linux/mutex.h>
21
David S. Miller4f6dbe42010-01-19 00:26:13 -080022#include <asm/stacktrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070023#include <asm/cpudata.h>
David S. Miller4f6dbe42010-01-19 00:26:13 -080024#include <asm/uaccess.h>
Arun Sharma60063492011-07-26 16:09:06 -070025#include <linux/atomic.h>
David S. Miller59abbd12009-09-10 06:28:20 -070026#include <asm/nmi.h>
27#include <asm/pcr.h>
David Howellsd550bbd2012-03-28 18:30:03 +010028#include <asm/perfctr.h>
29#include <asm/cacheflush.h>
David S. Miller59abbd12009-09-10 06:28:20 -070030
Sam Ravnborgcb1b8202011-04-21 15:45:45 -070031#include "kernel.h"
David S. Miller4f6dbe42010-01-19 00:26:13 -080032#include "kstack.h"
33
David S. Miller59abbd12009-09-10 06:28:20 -070034/* Sparc64 chips have two performance counters, 32-bits each, with
35 * overflow interrupts generated on transition from 0xffffffff to 0.
36 * The counters are accessed in one go using a 64-bit register.
37 *
38 * Both counters are controlled using a single control register. The
39 * only way to stop all sampling is to clear all of the context (user,
40 * supervisor, hypervisor) sampling enable bits. But these bits apply
41 * to both counters, thus the two counters can't be enabled/disabled
42 * individually.
43 *
44 * The control register has two event fields, one for each of the two
45 * counters. It's thus nearly impossible to have one counter going
46 * while keeping the other one stopped. Therefore it is possible to
47 * get overflow interrupts for counters not currently "in use" and
48 * that condition must be checked in the overflow interrupt handler.
49 *
50 * So we use a hack, in that we program inactive counters with the
51 * "sw_count0" and "sw_count1" events. These count how many times
52 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
53 * unusual way to encode a NOP and therefore will not trigger in
54 * normal code.
55 */
56
Ingo Molnarcdd6c482009-09-21 12:02:48 +020057#define MAX_HWEVENTS 2
David S. Miller59abbd12009-09-10 06:28:20 -070058#define MAX_PERIOD ((1UL << 32) - 1)
59
60#define PIC_UPPER_INDEX 0
61#define PIC_LOWER_INDEX 1
David S. Millere7bef6b2010-01-20 02:59:47 -080062#define PIC_NO_INDEX -1
David S. Miller59abbd12009-09-10 06:28:20 -070063
Ingo Molnarcdd6c482009-09-21 12:02:48 +020064struct cpu_hw_events {
David S. Millere7bef6b2010-01-20 02:59:47 -080065 /* Number of events currently scheduled onto this cpu.
66 * This tells how many entries in the arrays below
67 * are valid.
68 */
69 int n_events;
70
71 /* Number of new events added since the last hw_perf_disable().
72 * This works because the perf event layer always adds new
73 * events inside of a perf_{disable,enable}() sequence.
74 */
75 int n_added;
76
77 /* Array of events current scheduled on this cpu. */
78 struct perf_event *event[MAX_HWEVENTS];
79
80 /* Array of encoded longs, specifying the %pcr register
81 * encoding and the mask of PIC counters this even can
82 * be scheduled on. See perf_event_encode() et al.
83 */
84 unsigned long events[MAX_HWEVENTS];
85
86 /* The current counter index assigned to an event. When the
87 * event hasn't been programmed into the cpu yet, this will
88 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
89 * we ought to schedule the event.
90 */
91 int current_idx[MAX_HWEVENTS];
92
93 /* Software copy of %pcr register on this cpu. */
David S. Millerd1751382009-09-29 21:27:06 -070094 u64 pcr;
David S. Millere7bef6b2010-01-20 02:59:47 -080095
96 /* Enabled/disable state. */
David S. Millerd1751382009-09-29 21:27:06 -070097 int enabled;
Lin Minga13c3af2010-04-23 13:56:33 +080098
99 unsigned int group_flag;
David S. Miller59abbd12009-09-10 06:28:20 -0700100};
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200101DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
David S. Miller59abbd12009-09-10 06:28:20 -0700102
David S. Millere7bef6b2010-01-20 02:59:47 -0800103/* An event map describes the characteristics of a performance
104 * counter event. In particular it gives the encoding as well as
105 * a mask telling which counters the event can be measured on.
106 */
David S. Miller59abbd12009-09-10 06:28:20 -0700107struct perf_event_map {
108 u16 encoding;
109 u8 pic_mask;
110#define PIC_NONE 0x00
111#define PIC_UPPER 0x01
112#define PIC_LOWER 0x02
113};
114
David S. Millere7bef6b2010-01-20 02:59:47 -0800115/* Encode a perf_event_map entry into a long. */
David S. Millera72a8a52009-09-28 17:35:20 -0700116static unsigned long perf_event_encode(const struct perf_event_map *pmap)
117{
118 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
119}
120
David S. Millere7bef6b2010-01-20 02:59:47 -0800121static u8 perf_event_get_msk(unsigned long val)
David S. Millera72a8a52009-09-28 17:35:20 -0700122{
David S. Millere7bef6b2010-01-20 02:59:47 -0800123 return val & 0xff;
124}
125
126static u64 perf_event_get_enc(unsigned long val)
127{
128 return val >> 16;
David S. Millera72a8a52009-09-28 17:35:20 -0700129}
130
David S. Miller2ce4da22009-09-26 20:42:10 -0700131#define C(x) PERF_COUNT_HW_CACHE_##x
132
133#define CACHE_OP_UNSUPPORTED 0xfffe
134#define CACHE_OP_NONSENSE 0xffff
135
136typedef struct perf_event_map cache_map_t
137 [PERF_COUNT_HW_CACHE_MAX]
138 [PERF_COUNT_HW_CACHE_OP_MAX]
139 [PERF_COUNT_HW_CACHE_RESULT_MAX];
140
David S. Miller59abbd12009-09-10 06:28:20 -0700141struct sparc_pmu {
142 const struct perf_event_map *(*event_map)(int);
David S. Miller2ce4da22009-09-26 20:42:10 -0700143 const cache_map_t *cache_map;
David S. Miller59abbd12009-09-10 06:28:20 -0700144 int max_events;
145 int upper_shift;
146 int lower_shift;
147 int event_mask;
David S. Miller91b92862009-09-10 07:09:06 -0700148 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -0700149 int irq_bit;
David S. Miller660d1372009-09-10 07:13:26 -0700150 int upper_nop;
151 int lower_nop;
David S. Miller59abbd12009-09-10 06:28:20 -0700152};
153
David S. Miller28e8f9b2009-09-26 20:54:22 -0700154static const struct perf_event_map ultra3_perfmon_event_map[] = {
David S. Miller59abbd12009-09-10 06:28:20 -0700155 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
156 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
157 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
158 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
159};
160
David S. Miller28e8f9b2009-09-26 20:54:22 -0700161static const struct perf_event_map *ultra3_event_map(int event_id)
David S. Miller59abbd12009-09-10 06:28:20 -0700162{
David S. Miller28e8f9b2009-09-26 20:54:22 -0700163 return &ultra3_perfmon_event_map[event_id];
David S. Miller59abbd12009-09-10 06:28:20 -0700164}
165
David S. Miller28e8f9b2009-09-26 20:54:22 -0700166static const cache_map_t ultra3_cache_map = {
David S. Miller2ce4da22009-09-26 20:42:10 -0700167[C(L1D)] = {
168 [C(OP_READ)] = {
169 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
170 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
171 },
172 [C(OP_WRITE)] = {
173 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
174 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
175 },
176 [C(OP_PREFETCH)] = {
177 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
178 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
179 },
180},
181[C(L1I)] = {
182 [C(OP_READ)] = {
183 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
184 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
185 },
186 [ C(OP_WRITE) ] = {
187 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
188 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
189 },
190 [ C(OP_PREFETCH) ] = {
191 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
192 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
193 },
194},
195[C(LL)] = {
196 [C(OP_READ)] = {
197 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
198 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
199 },
200 [C(OP_WRITE)] = {
201 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
202 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
203 },
204 [C(OP_PREFETCH)] = {
205 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
206 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
207 },
208},
209[C(DTLB)] = {
210 [C(OP_READ)] = {
211 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
212 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
213 },
214 [ C(OP_WRITE) ] = {
215 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
216 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
217 },
218 [ C(OP_PREFETCH) ] = {
219 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
220 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
221 },
222},
223[C(ITLB)] = {
224 [C(OP_READ)] = {
225 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
226 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
227 },
228 [ C(OP_WRITE) ] = {
229 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
230 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
231 },
232 [ C(OP_PREFETCH) ] = {
233 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
234 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
235 },
236},
237[C(BPU)] = {
238 [C(OP_READ)] = {
239 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
240 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
241 },
242 [ C(OP_WRITE) ] = {
243 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
244 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
245 },
246 [ C(OP_PREFETCH) ] = {
247 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
248 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
249 },
250},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200251[C(NODE)] = {
252 [C(OP_READ)] = {
253 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
254 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
255 },
256 [ C(OP_WRITE) ] = {
257 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
258 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
259 },
260 [ C(OP_PREFETCH) ] = {
261 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
262 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
263 },
264},
David S. Miller2ce4da22009-09-26 20:42:10 -0700265};
266
David S. Miller28e8f9b2009-09-26 20:54:22 -0700267static const struct sparc_pmu ultra3_pmu = {
268 .event_map = ultra3_event_map,
269 .cache_map = &ultra3_cache_map,
270 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
David S. Miller59abbd12009-09-10 06:28:20 -0700271 .upper_shift = 11,
272 .lower_shift = 4,
273 .event_mask = 0x3f,
David S. Miller660d1372009-09-10 07:13:26 -0700274 .upper_nop = 0x1c,
275 .lower_nop = 0x14,
David S. Miller59abbd12009-09-10 06:28:20 -0700276};
277
David S. Miller7eebda62009-09-26 21:23:41 -0700278/* Niagara1 is very limited. The upper PIC is hard-locked to count
279 * only instructions, so it is free running which creates all kinds of
David S. Miller6e804252009-09-29 15:10:23 -0700280 * problems. Some hardware designs make one wonder if the creator
David S. Miller7eebda62009-09-26 21:23:41 -0700281 * even looked at how this stuff gets used by software.
282 */
283static const struct perf_event_map niagara1_perfmon_event_map[] = {
284 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
285 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
286 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
287 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
288};
289
290static const struct perf_event_map *niagara1_event_map(int event_id)
291{
292 return &niagara1_perfmon_event_map[event_id];
293}
294
295static const cache_map_t niagara1_cache_map = {
296[C(L1D)] = {
297 [C(OP_READ)] = {
298 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
299 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
300 },
301 [C(OP_WRITE)] = {
302 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
303 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
304 },
305 [C(OP_PREFETCH)] = {
306 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
307 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
308 },
309},
310[C(L1I)] = {
311 [C(OP_READ)] = {
312 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
313 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
314 },
315 [ C(OP_WRITE) ] = {
316 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
317 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
318 },
319 [ C(OP_PREFETCH) ] = {
320 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
321 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
322 },
323},
324[C(LL)] = {
325 [C(OP_READ)] = {
326 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
327 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
328 },
329 [C(OP_WRITE)] = {
330 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
331 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
332 },
333 [C(OP_PREFETCH)] = {
334 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
335 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
336 },
337},
338[C(DTLB)] = {
339 [C(OP_READ)] = {
340 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
341 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
342 },
343 [ C(OP_WRITE) ] = {
344 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
345 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
346 },
347 [ C(OP_PREFETCH) ] = {
348 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
349 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
350 },
351},
352[C(ITLB)] = {
353 [C(OP_READ)] = {
354 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
355 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
356 },
357 [ C(OP_WRITE) ] = {
358 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
359 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
360 },
361 [ C(OP_PREFETCH) ] = {
362 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
363 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
364 },
365},
366[C(BPU)] = {
367 [C(OP_READ)] = {
368 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
369 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
370 },
371 [ C(OP_WRITE) ] = {
372 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
373 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
374 },
375 [ C(OP_PREFETCH) ] = {
376 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
377 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
378 },
379},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200380[C(NODE)] = {
381 [C(OP_READ)] = {
382 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
383 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
384 },
385 [ C(OP_WRITE) ] = {
386 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
387 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
388 },
389 [ C(OP_PREFETCH) ] = {
390 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
391 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
392 },
393},
David S. Miller7eebda62009-09-26 21:23:41 -0700394};
395
396static const struct sparc_pmu niagara1_pmu = {
397 .event_map = niagara1_event_map,
398 .cache_map = &niagara1_cache_map,
399 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
400 .upper_shift = 0,
401 .lower_shift = 4,
402 .event_mask = 0x7,
403 .upper_nop = 0x0,
404 .lower_nop = 0x0,
405};
406
David S. Millerb73d8842009-09-10 07:22:18 -0700407static const struct perf_event_map niagara2_perfmon_event_map[] = {
408 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
409 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
410 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
411 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
412 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
413 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
414};
415
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200416static const struct perf_event_map *niagara2_event_map(int event_id)
David S. Millerb73d8842009-09-10 07:22:18 -0700417{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200418 return &niagara2_perfmon_event_map[event_id];
David S. Millerb73d8842009-09-10 07:22:18 -0700419}
420
David S. Millerd0b86482009-09-26 21:04:16 -0700421static const cache_map_t niagara2_cache_map = {
422[C(L1D)] = {
423 [C(OP_READ)] = {
424 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
425 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
426 },
427 [C(OP_WRITE)] = {
428 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
429 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
430 },
431 [C(OP_PREFETCH)] = {
432 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
433 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
434 },
435},
436[C(L1I)] = {
437 [C(OP_READ)] = {
438 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
439 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
440 },
441 [ C(OP_WRITE) ] = {
442 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
443 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
444 },
445 [ C(OP_PREFETCH) ] = {
446 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
447 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
448 },
449},
450[C(LL)] = {
451 [C(OP_READ)] = {
452 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
453 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
454 },
455 [C(OP_WRITE)] = {
456 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
457 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
458 },
459 [C(OP_PREFETCH)] = {
460 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
461 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
462 },
463},
464[C(DTLB)] = {
465 [C(OP_READ)] = {
466 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
467 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
468 },
469 [ C(OP_WRITE) ] = {
470 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
471 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
472 },
473 [ C(OP_PREFETCH) ] = {
474 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
475 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
476 },
477},
478[C(ITLB)] = {
479 [C(OP_READ)] = {
480 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
481 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
482 },
483 [ C(OP_WRITE) ] = {
484 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
485 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
486 },
487 [ C(OP_PREFETCH) ] = {
488 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
489 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
490 },
491},
492[C(BPU)] = {
493 [C(OP_READ)] = {
494 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
495 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
496 },
497 [ C(OP_WRITE) ] = {
498 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
499 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
500 },
501 [ C(OP_PREFETCH) ] = {
502 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
503 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
504 },
505},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200506[C(NODE)] = {
507 [C(OP_READ)] = {
508 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
509 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
510 },
511 [ C(OP_WRITE) ] = {
512 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
513 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
514 },
515 [ C(OP_PREFETCH) ] = {
516 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
517 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
518 },
519},
David S. Millerd0b86482009-09-26 21:04:16 -0700520};
521
David S. Millerb73d8842009-09-10 07:22:18 -0700522static const struct sparc_pmu niagara2_pmu = {
523 .event_map = niagara2_event_map,
David S. Millerd0b86482009-09-26 21:04:16 -0700524 .cache_map = &niagara2_cache_map,
David S. Millerb73d8842009-09-10 07:22:18 -0700525 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
526 .upper_shift = 19,
527 .lower_shift = 6,
528 .event_mask = 0xfff,
529 .hv_bit = 0x8,
David S. Millerde23cf32009-10-09 00:42:40 -0700530 .irq_bit = 0x30,
David S. Millerb73d8842009-09-10 07:22:18 -0700531 .upper_nop = 0x220,
532 .lower_nop = 0x220,
533};
534
David S. Miller59abbd12009-09-10 06:28:20 -0700535static const struct sparc_pmu *sparc_pmu __read_mostly;
536
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200537static u64 event_encoding(u64 event_id, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700538{
539 if (idx == PIC_UPPER_INDEX)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200540 event_id <<= sparc_pmu->upper_shift;
David S. Miller59abbd12009-09-10 06:28:20 -0700541 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200542 event_id <<= sparc_pmu->lower_shift;
543 return event_id;
David S. Miller59abbd12009-09-10 06:28:20 -0700544}
545
546static u64 mask_for_index(int idx)
547{
548 return event_encoding(sparc_pmu->event_mask, idx);
549}
550
551static u64 nop_for_index(int idx)
552{
553 return event_encoding(idx == PIC_UPPER_INDEX ?
David S. Miller660d1372009-09-10 07:13:26 -0700554 sparc_pmu->upper_nop :
555 sparc_pmu->lower_nop, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700556}
557
David S. Millerd1751382009-09-29 21:27:06 -0700558static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700559{
David S. Millerf603fa362012-10-16 13:05:25 -0700560 u64 enc, val, mask = mask_for_index(idx);
561
562 enc = perf_event_get_enc(cpuc->events[idx]);
David S. Miller59abbd12009-09-10 06:28:20 -0700563
David S. Millerd1751382009-09-29 21:27:06 -0700564 val = cpuc->pcr;
565 val &= ~mask;
David S. Millerf603fa362012-10-16 13:05:25 -0700566 val |= event_encoding(enc, idx);
David S. Millerd1751382009-09-29 21:27:06 -0700567 cpuc->pcr = val;
568
569 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700570}
571
David S. Millerd1751382009-09-29 21:27:06 -0700572static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700573{
574 u64 mask = mask_for_index(idx);
575 u64 nop = nop_for_index(idx);
David S. Millerd1751382009-09-29 21:27:06 -0700576 u64 val;
David S. Miller59abbd12009-09-10 06:28:20 -0700577
David S. Millerd1751382009-09-29 21:27:06 -0700578 val = cpuc->pcr;
579 val &= ~mask;
580 val |= nop;
581 cpuc->pcr = val;
582
583 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700584}
585
David S. Miller59abbd12009-09-10 06:28:20 -0700586static u32 read_pmc(int idx)
587{
588 u64 val;
589
590 read_pic(val);
591 if (idx == PIC_UPPER_INDEX)
592 val >>= 32;
593
594 return val & 0xffffffff;
595}
596
597static void write_pmc(int idx, u64 val)
598{
599 u64 shift, mask, pic;
600
601 shift = 0;
602 if (idx == PIC_UPPER_INDEX)
603 shift = 32;
604
605 mask = ((u64) 0xffffffff) << shift;
606 val <<= shift;
607
608 read_pic(pic);
609 pic &= ~mask;
610 pic |= val;
611 write_pic(pic);
612}
613
David S. Millere7bef6b2010-01-20 02:59:47 -0800614static u64 sparc_perf_event_update(struct perf_event *event,
615 struct hw_perf_event *hwc, int idx)
616{
617 int shift = 64 - 32;
618 u64 prev_raw_count, new_raw_count;
619 s64 delta;
620
621again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200622 prev_raw_count = local64_read(&hwc->prev_count);
David S. Millere7bef6b2010-01-20 02:59:47 -0800623 new_raw_count = read_pmc(idx);
624
Peter Zijlstrae7850592010-05-21 14:43:08 +0200625 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800626 new_raw_count) != prev_raw_count)
627 goto again;
628
629 delta = (new_raw_count << shift) - (prev_raw_count << shift);
630 delta >>= shift;
631
Peter Zijlstrae7850592010-05-21 14:43:08 +0200632 local64_add(delta, &event->count);
633 local64_sub(delta, &hwc->period_left);
David S. Millere7bef6b2010-01-20 02:59:47 -0800634
635 return new_raw_count;
636}
637
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200638static int sparc_perf_event_set_period(struct perf_event *event,
David S. Millerd29862f2009-09-28 17:37:12 -0700639 struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700640{
Peter Zijlstrae7850592010-05-21 14:43:08 +0200641 s64 left = local64_read(&hwc->period_left);
David S. Miller59abbd12009-09-10 06:28:20 -0700642 s64 period = hwc->sample_period;
643 int ret = 0;
644
645 if (unlikely(left <= -period)) {
646 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200647 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700648 hwc->last_period = period;
649 ret = 1;
650 }
651
652 if (unlikely(left <= 0)) {
653 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200654 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700655 hwc->last_period = period;
656 ret = 1;
657 }
658 if (left > MAX_PERIOD)
659 left = MAX_PERIOD;
660
Peter Zijlstrae7850592010-05-21 14:43:08 +0200661 local64_set(&hwc->prev_count, (u64)-left);
David S. Miller59abbd12009-09-10 06:28:20 -0700662
663 write_pmc(idx, (u64)(-left) & 0xffffffff);
664
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200665 perf_event_update_userpage(event);
David S. Miller59abbd12009-09-10 06:28:20 -0700666
667 return ret;
668}
669
David S. Millere7bef6b2010-01-20 02:59:47 -0800670/* If performance event entries have been added, move existing
671 * events around (if necessary) and then assign new entries to
672 * counters.
673 */
674static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
David S. Miller59abbd12009-09-10 06:28:20 -0700675{
David S. Millere7bef6b2010-01-20 02:59:47 -0800676 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700677
David S. Millere7bef6b2010-01-20 02:59:47 -0800678 if (!cpuc->n_added)
679 goto out;
David S. Miller59abbd12009-09-10 06:28:20 -0700680
David S. Millere7bef6b2010-01-20 02:59:47 -0800681 /* Read in the counters which are moving. */
682 for (i = 0; i < cpuc->n_events; i++) {
683 struct perf_event *cp = cpuc->event[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700684
David S. Millere7bef6b2010-01-20 02:59:47 -0800685 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
686 cpuc->current_idx[i] != cp->hw.idx) {
687 sparc_perf_event_update(cp, &cp->hw,
688 cpuc->current_idx[i]);
689 cpuc->current_idx[i] = PIC_NO_INDEX;
690 }
691 }
David S. Miller59abbd12009-09-10 06:28:20 -0700692
David S. Millere7bef6b2010-01-20 02:59:47 -0800693 /* Assign to counters all unassigned events. */
694 for (i = 0; i < cpuc->n_events; i++) {
695 struct perf_event *cp = cpuc->event[i];
696 struct hw_perf_event *hwc = &cp->hw;
697 int idx = hwc->idx;
698 u64 enc;
699
700 if (cpuc->current_idx[i] != PIC_NO_INDEX)
701 continue;
702
703 sparc_perf_event_set_period(cp, hwc, idx);
704 cpuc->current_idx[i] = idx;
705
706 enc = perf_event_get_enc(cpuc->events[i]);
David S. Millerb7d45c32010-06-23 11:39:02 -0700707 pcr &= ~mask_for_index(idx);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200708 if (hwc->state & PERF_HES_STOPPED)
709 pcr |= nop_for_index(idx);
710 else
711 pcr |= event_encoding(enc, idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800712 }
713out:
714 return pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700715}
716
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200717static void sparc_pmu_enable(struct pmu *pmu)
David S. Miller59abbd12009-09-10 06:28:20 -0700718{
David S. Millere7bef6b2010-01-20 02:59:47 -0800719 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
720 u64 pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700721
David S. Millere7bef6b2010-01-20 02:59:47 -0800722 if (cpuc->enabled)
723 return;
David S. Miller59abbd12009-09-10 06:28:20 -0700724
David S. Millere7bef6b2010-01-20 02:59:47 -0800725 cpuc->enabled = 1;
726 barrier();
David S. Miller59abbd12009-09-10 06:28:20 -0700727
David S. Millere7bef6b2010-01-20 02:59:47 -0800728 pcr = cpuc->pcr;
729 if (!cpuc->n_events) {
730 pcr = 0;
731 } else {
732 pcr = maybe_change_configuration(cpuc, pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700733
David S. Millere7bef6b2010-01-20 02:59:47 -0800734 /* We require that all of the events have the same
735 * configuration, so just fetch the settings from the
736 * first entry.
737 */
738 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
739 }
David S. Miller59abbd12009-09-10 06:28:20 -0700740
David S. Millere7bef6b2010-01-20 02:59:47 -0800741 pcr_ops->write(cpuc->pcr);
742}
743
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200744static void sparc_pmu_disable(struct pmu *pmu)
David S. Millere7bef6b2010-01-20 02:59:47 -0800745{
746 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
747 u64 val;
748
749 if (!cpuc->enabled)
750 return;
751
752 cpuc->enabled = 0;
753 cpuc->n_added = 0;
754
755 val = cpuc->pcr;
756 val &= ~(PCR_UTRACE | PCR_STRACE |
757 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
758 cpuc->pcr = val;
759
760 pcr_ops->write(cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700761}
762
David S. Millere7bef6b2010-01-20 02:59:47 -0800763static int active_event_index(struct cpu_hw_events *cpuc,
764 struct perf_event *event)
765{
766 int i;
767
768 for (i = 0; i < cpuc->n_events; i++) {
769 if (cpuc->event[i] == event)
770 break;
771 }
772 BUG_ON(i == cpuc->n_events);
773 return cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700774}
775
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200776static void sparc_pmu_start(struct perf_event *event, int flags)
777{
778 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
779 int idx = active_event_index(cpuc, event);
780
781 if (flags & PERF_EF_RELOAD) {
782 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
783 sparc_perf_event_set_period(event, &event->hw, idx);
784 }
785
786 event->hw.state = 0;
787
788 sparc_pmu_enable_event(cpuc, &event->hw, idx);
789}
790
791static void sparc_pmu_stop(struct perf_event *event, int flags)
792{
793 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
794 int idx = active_event_index(cpuc, event);
795
796 if (!(event->hw.state & PERF_HES_STOPPED)) {
797 sparc_pmu_disable_event(cpuc, &event->hw, idx);
798 event->hw.state |= PERF_HES_STOPPED;
799 }
800
801 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
802 sparc_perf_event_update(event, &event->hw, idx);
803 event->hw.state |= PERF_HES_UPTODATE;
804 }
805}
806
807static void sparc_pmu_del(struct perf_event *event, int _flags)
808{
809 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
810 unsigned long flags;
811 int i;
812
813 local_irq_save(flags);
814 perf_pmu_disable(event->pmu);
815
816 for (i = 0; i < cpuc->n_events; i++) {
817 if (event == cpuc->event[i]) {
818 /* Absorb the final count and turn off the
819 * event.
820 */
821 sparc_pmu_stop(event, PERF_EF_UPDATE);
822
823 /* Shift remaining entries down into
824 * the existing slot.
825 */
826 while (++i < cpuc->n_events) {
827 cpuc->event[i - 1] = cpuc->event[i];
828 cpuc->events[i - 1] = cpuc->events[i];
829 cpuc->current_idx[i - 1] =
830 cpuc->current_idx[i];
831 }
832
833 perf_event_update_userpage(event);
834
835 cpuc->n_events--;
836 break;
837 }
838 }
839
840 perf_pmu_enable(event->pmu);
841 local_irq_restore(flags);
842}
843
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200844static void sparc_pmu_read(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700845{
David S. Millere7bef6b2010-01-20 02:59:47 -0800846 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
847 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200848 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700849
David S. Millere7bef6b2010-01-20 02:59:47 -0800850 sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700851}
852
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200853static atomic_t active_events = ATOMIC_INIT(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700854static DEFINE_MUTEX(pmc_grab_mutex);
855
David S. Millerd1751382009-09-29 21:27:06 -0700856static void perf_stop_nmi_watchdog(void *unused)
857{
858 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
859
860 stop_nmi_watchdog(NULL);
861 cpuc->pcr = pcr_ops->read();
862}
863
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200864void perf_event_grab_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700865{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200866 if (atomic_inc_not_zero(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -0700867 return;
868
869 mutex_lock(&pmc_grab_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200870 if (atomic_read(&active_events) == 0) {
David S. Miller59abbd12009-09-10 06:28:20 -0700871 if (atomic_read(&nmi_active) > 0) {
David S. Millerd1751382009-09-29 21:27:06 -0700872 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
David S. Miller59abbd12009-09-10 06:28:20 -0700873 BUG_ON(atomic_read(&nmi_active) != 0);
874 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200875 atomic_inc(&active_events);
David S. Miller59abbd12009-09-10 06:28:20 -0700876 }
877 mutex_unlock(&pmc_grab_mutex);
878}
879
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200880void perf_event_release_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700881{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200882 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
David S. Miller59abbd12009-09-10 06:28:20 -0700883 if (atomic_read(&nmi_active) == 0)
884 on_each_cpu(start_nmi_watchdog, NULL, 1);
885 mutex_unlock(&pmc_grab_mutex);
886 }
887}
888
David S. Miller2ce4da22009-09-26 20:42:10 -0700889static const struct perf_event_map *sparc_map_cache_event(u64 config)
890{
891 unsigned int cache_type, cache_op, cache_result;
892 const struct perf_event_map *pmap;
893
894 if (!sparc_pmu->cache_map)
895 return ERR_PTR(-ENOENT);
896
897 cache_type = (config >> 0) & 0xff;
898 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
899 return ERR_PTR(-EINVAL);
900
901 cache_op = (config >> 8) & 0xff;
902 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
903 return ERR_PTR(-EINVAL);
904
905 cache_result = (config >> 16) & 0xff;
906 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
907 return ERR_PTR(-EINVAL);
908
909 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
910
911 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
912 return ERR_PTR(-ENOENT);
913
914 if (pmap->encoding == CACHE_OP_NONSENSE)
915 return ERR_PTR(-EINVAL);
916
917 return pmap;
918}
919
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200920static void hw_perf_event_destroy(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700921{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200922 perf_event_release_pmc();
David S. Miller59abbd12009-09-10 06:28:20 -0700923}
924
David S. Millera72a8a52009-09-28 17:35:20 -0700925/* Make sure all events can be scheduled into the hardware at
926 * the same time. This is simplified by the fact that we only
927 * need to support 2 simultaneous HW events.
David S. Millere7bef6b2010-01-20 02:59:47 -0800928 *
929 * As a side effect, the evts[]->hw.idx values will be assigned
930 * on success. These are pending indexes. When the events are
931 * actually programmed into the chip, these values will propagate
932 * to the per-cpu cpuc->current_idx[] slots, see the code in
933 * maybe_change_configuration() for details.
David S. Millera72a8a52009-09-28 17:35:20 -0700934 */
David S. Millere7bef6b2010-01-20 02:59:47 -0800935static int sparc_check_constraints(struct perf_event **evts,
936 unsigned long *events, int n_ev)
David S. Millera72a8a52009-09-28 17:35:20 -0700937{
David S. Millere7bef6b2010-01-20 02:59:47 -0800938 u8 msk0 = 0, msk1 = 0;
939 int idx0 = 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700940
David S. Millere7bef6b2010-01-20 02:59:47 -0800941 /* This case is possible when we are invoked from
942 * hw_perf_group_sched_in().
943 */
944 if (!n_ev)
945 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700946
Peter Zijlstra15ac9a32010-09-06 15:51:45 +0200947 if (n_ev > MAX_HWEVENTS)
David S. Millere7bef6b2010-01-20 02:59:47 -0800948 return -1;
David S. Millera72a8a52009-09-28 17:35:20 -0700949
David S. Millere7bef6b2010-01-20 02:59:47 -0800950 msk0 = perf_event_get_msk(events[0]);
951 if (n_ev == 1) {
952 if (msk0 & PIC_LOWER)
953 idx0 = 1;
954 goto success;
955 }
956 BUG_ON(n_ev != 2);
957 msk1 = perf_event_get_msk(events[1]);
David S. Millera72a8a52009-09-28 17:35:20 -0700958
David S. Millere7bef6b2010-01-20 02:59:47 -0800959 /* If both events can go on any counter, OK. */
960 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
961 msk1 == (PIC_UPPER | PIC_LOWER))
962 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700963
David S. Millere7bef6b2010-01-20 02:59:47 -0800964 /* If one event is limited to a specific counter,
965 * and the other can go on both, OK.
966 */
967 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
968 msk1 == (PIC_UPPER | PIC_LOWER)) {
969 if (msk0 & PIC_LOWER)
970 idx0 = 1;
971 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700972 }
973
David S. Millere7bef6b2010-01-20 02:59:47 -0800974 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
975 msk0 == (PIC_UPPER | PIC_LOWER)) {
976 if (msk1 & PIC_UPPER)
977 idx0 = 1;
978 goto success;
979 }
980
981 /* If the events are fixed to different counters, OK. */
982 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
983 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
984 if (msk0 & PIC_LOWER)
985 idx0 = 1;
986 goto success;
987 }
988
989 /* Otherwise, there is a conflict. */
David S. Millera72a8a52009-09-28 17:35:20 -0700990 return -1;
David S. Millere7bef6b2010-01-20 02:59:47 -0800991
992success:
993 evts[0]->hw.idx = idx0;
994 if (n_ev == 2)
995 evts[1]->hw.idx = idx0 ^ 1;
996 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700997}
998
David S. Miller01552f72009-09-27 20:43:07 -0700999static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
1000{
1001 int eu = 0, ek = 0, eh = 0;
1002 struct perf_event *event;
1003 int i, n, first;
1004
1005 n = n_prev + n_new;
1006 if (n <= 1)
1007 return 0;
1008
1009 first = 1;
1010 for (i = 0; i < n; i++) {
1011 event = evts[i];
1012 if (first) {
1013 eu = event->attr.exclude_user;
1014 ek = event->attr.exclude_kernel;
1015 eh = event->attr.exclude_hv;
1016 first = 0;
1017 } else if (event->attr.exclude_user != eu ||
1018 event->attr.exclude_kernel != ek ||
1019 event->attr.exclude_hv != eh) {
1020 return -EAGAIN;
1021 }
1022 }
1023
1024 return 0;
1025}
1026
1027static int collect_events(struct perf_event *group, int max_count,
David S. Millere7bef6b2010-01-20 02:59:47 -08001028 struct perf_event *evts[], unsigned long *events,
1029 int *current_idx)
David S. Miller01552f72009-09-27 20:43:07 -07001030{
1031 struct perf_event *event;
1032 int n = 0;
1033
1034 if (!is_software_event(group)) {
1035 if (n >= max_count)
1036 return -1;
1037 evts[n] = group;
David S. Millere7bef6b2010-01-20 02:59:47 -08001038 events[n] = group->hw.event_base;
1039 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001040 }
1041 list_for_each_entry(event, &group->sibling_list, group_entry) {
1042 if (!is_software_event(event) &&
1043 event->state != PERF_EVENT_STATE_OFF) {
1044 if (n >= max_count)
1045 return -1;
1046 evts[n] = event;
David S. Millere7bef6b2010-01-20 02:59:47 -08001047 events[n] = event->hw.event_base;
1048 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001049 }
1050 }
1051 return n;
1052}
1053
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001054static int sparc_pmu_add(struct perf_event *event, int ef_flags)
David S. Millere7bef6b2010-01-20 02:59:47 -08001055{
1056 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1057 int n0, ret = -EAGAIN;
1058 unsigned long flags;
1059
1060 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001061 perf_pmu_disable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001062
1063 n0 = cpuc->n_events;
Peter Zijlstra15ac9a32010-09-06 15:51:45 +02001064 if (n0 >= MAX_HWEVENTS)
David S. Millere7bef6b2010-01-20 02:59:47 -08001065 goto out;
1066
1067 cpuc->event[n0] = event;
1068 cpuc->events[n0] = event->hw.event_base;
1069 cpuc->current_idx[n0] = PIC_NO_INDEX;
1070
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001071 event->hw.state = PERF_HES_UPTODATE;
1072 if (!(ef_flags & PERF_EF_START))
1073 event->hw.state |= PERF_HES_STOPPED;
1074
Lin Minga13c3af2010-04-23 13:56:33 +08001075 /*
1076 * If group events scheduling transaction was started,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001077 * skip the schedulability test here, it will be performed
Lin Minga13c3af2010-04-23 13:56:33 +08001078 * at commit time(->commit_txn) as a whole
1079 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001080 if (cpuc->group_flag & PERF_EVENT_TXN)
Lin Minga13c3af2010-04-23 13:56:33 +08001081 goto nocheck;
1082
David S. Millere7bef6b2010-01-20 02:59:47 -08001083 if (check_excludes(cpuc->event, n0, 1))
1084 goto out;
1085 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1086 goto out;
1087
Lin Minga13c3af2010-04-23 13:56:33 +08001088nocheck:
David S. Millere7bef6b2010-01-20 02:59:47 -08001089 cpuc->n_events++;
1090 cpuc->n_added++;
1091
1092 ret = 0;
1093out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001094 perf_pmu_enable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001095 local_irq_restore(flags);
1096 return ret;
1097}
1098
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001099static int sparc_pmu_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001100{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001101 struct perf_event_attr *attr = &event->attr;
David S. Miller01552f72009-09-27 20:43:07 -07001102 struct perf_event *evts[MAX_HWEVENTS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001103 struct hw_perf_event *hwc = &event->hw;
David S. Millera72a8a52009-09-28 17:35:20 -07001104 unsigned long events[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -08001105 int current_idx_dmy[MAX_HWEVENTS];
David S. Miller59abbd12009-09-10 06:28:20 -07001106 const struct perf_event_map *pmap;
David S. Miller01552f72009-09-27 20:43:07 -07001107 int n;
David S. Miller59abbd12009-09-10 06:28:20 -07001108
1109 if (atomic_read(&nmi_active) < 0)
1110 return -ENODEV;
1111
Stephane Eranian2481c5f2012-02-09 23:20:59 +01001112 /* does not support taken branch sampling */
1113 if (has_branch_stack(event))
1114 return -EOPNOTSUPP;
1115
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001116 switch (attr->type) {
1117 case PERF_TYPE_HARDWARE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001118 if (attr->config >= sparc_pmu->max_events)
1119 return -EINVAL;
1120 pmap = sparc_pmu->event_map(attr->config);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001121 break;
1122
1123 case PERF_TYPE_HW_CACHE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001124 pmap = sparc_map_cache_event(attr->config);
1125 if (IS_ERR(pmap))
1126 return PTR_ERR(pmap);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001127 break;
1128
1129 case PERF_TYPE_RAW:
Ingo Molnard0303d72010-09-23 08:02:09 +02001130 pmap = NULL;
1131 break;
David S. Miller59abbd12009-09-10 06:28:20 -07001132
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001133 default:
1134 return -ENOENT;
1135
1136 }
1137
David S. Millerb343ae52010-09-12 17:20:24 -07001138 if (pmap) {
1139 hwc->event_base = perf_event_encode(pmap);
1140 } else {
Ingo Molnard0303d72010-09-23 08:02:09 +02001141 /*
1142 * User gives us "(encoding << 16) | pic_mask" for
David S. Millerb343ae52010-09-12 17:20:24 -07001143 * PERF_TYPE_RAW events.
1144 */
1145 hwc->event_base = attr->config;
1146 }
1147
David S. Millere7bef6b2010-01-20 02:59:47 -08001148 /* We save the enable bits in the config_base. */
David S. Miller496c07e2009-09-10 07:10:59 -07001149 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001150 if (!attr->exclude_user)
1151 hwc->config_base |= PCR_UTRACE;
1152 if (!attr->exclude_kernel)
1153 hwc->config_base |= PCR_STRACE;
David S. Miller91b92862009-09-10 07:09:06 -07001154 if (!attr->exclude_hv)
1155 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001156
David S. Miller01552f72009-09-27 20:43:07 -07001157 n = 0;
1158 if (event->group_leader != event) {
1159 n = collect_events(event->group_leader,
Peter Zijlstra15ac9a32010-09-06 15:51:45 +02001160 MAX_HWEVENTS - 1,
David S. Millere7bef6b2010-01-20 02:59:47 -08001161 evts, events, current_idx_dmy);
David S. Miller01552f72009-09-27 20:43:07 -07001162 if (n < 0)
1163 return -EINVAL;
1164 }
David S. Millera72a8a52009-09-28 17:35:20 -07001165 events[n] = hwc->event_base;
David S. Miller01552f72009-09-27 20:43:07 -07001166 evts[n] = event;
1167
1168 if (check_excludes(evts, n, 1))
1169 return -EINVAL;
1170
David S. Millere7bef6b2010-01-20 02:59:47 -08001171 if (sparc_check_constraints(evts, events, n + 1))
David S. Millera72a8a52009-09-28 17:35:20 -07001172 return -EINVAL;
1173
David S. Millere7bef6b2010-01-20 02:59:47 -08001174 hwc->idx = PIC_NO_INDEX;
1175
David S. Miller01552f72009-09-27 20:43:07 -07001176 /* Try to do all error checking before this point, as unwinding
1177 * state after grabbing the PMC is difficult.
1178 */
1179 perf_event_grab_pmc();
1180 event->destroy = hw_perf_event_destroy;
1181
David S. Miller59abbd12009-09-10 06:28:20 -07001182 if (!hwc->sample_period) {
1183 hwc->sample_period = MAX_PERIOD;
1184 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001185 local64_set(&hwc->period_left, hwc->sample_period);
David S. Miller59abbd12009-09-10 06:28:20 -07001186 }
1187
David S. Miller59abbd12009-09-10 06:28:20 -07001188 return 0;
1189}
1190
Lin Minga13c3af2010-04-23 13:56:33 +08001191/*
1192 * Start group events scheduling transaction
1193 * Set the flag to make pmu::enable() not perform the
1194 * schedulability test, it will be performed at commit time
1195 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001196static void sparc_pmu_start_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001197{
1198 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1199
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001200 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001201 cpuhw->group_flag |= PERF_EVENT_TXN;
Lin Minga13c3af2010-04-23 13:56:33 +08001202}
1203
1204/*
1205 * Stop group events scheduling transaction
1206 * Clear the flag and pmu::enable() will perform the
1207 * schedulability test.
1208 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001209static void sparc_pmu_cancel_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001210{
1211 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1212
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001213 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001214 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001215}
1216
1217/*
1218 * Commit group events scheduling transaction
1219 * Perform the group schedulability test as a whole
1220 * Return 0 if success
1221 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001222static int sparc_pmu_commit_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001223{
1224 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1225 int n;
1226
1227 if (!sparc_pmu)
1228 return -EINVAL;
1229
1230 cpuc = &__get_cpu_var(cpu_hw_events);
1231 n = cpuc->n_events;
1232 if (check_excludes(cpuc->event, 0, n))
1233 return -EINVAL;
1234 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1235 return -EAGAIN;
1236
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001237 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001238 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001239 return 0;
1240}
1241
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001242static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001243 .pmu_enable = sparc_pmu_enable,
1244 .pmu_disable = sparc_pmu_disable,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001245 .event_init = sparc_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001246 .add = sparc_pmu_add,
1247 .del = sparc_pmu_del,
1248 .start = sparc_pmu_start,
1249 .stop = sparc_pmu_stop,
David S. Miller59abbd12009-09-10 06:28:20 -07001250 .read = sparc_pmu_read,
Lin Minga13c3af2010-04-23 13:56:33 +08001251 .start_txn = sparc_pmu_start_txn,
1252 .cancel_txn = sparc_pmu_cancel_txn,
1253 .commit_txn = sparc_pmu_commit_txn,
David S. Miller59abbd12009-09-10 06:28:20 -07001254};
1255
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001256void perf_event_print_debug(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001257{
1258 unsigned long flags;
1259 u64 pcr, pic;
1260 int cpu;
1261
1262 if (!sparc_pmu)
1263 return;
1264
1265 local_irq_save(flags);
1266
1267 cpu = smp_processor_id();
1268
1269 pcr = pcr_ops->read();
1270 read_pic(pic);
1271
1272 pr_info("\n");
1273 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1274 cpu, pcr, pic);
1275
1276 local_irq_restore(flags);
1277}
1278
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001279static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
David S. Millerd29862f2009-09-28 17:37:12 -07001280 unsigned long cmd, void *__args)
David S. Miller59abbd12009-09-10 06:28:20 -07001281{
1282 struct die_args *args = __args;
1283 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001284 struct cpu_hw_events *cpuc;
David S. Miller59abbd12009-09-10 06:28:20 -07001285 struct pt_regs *regs;
David S. Millere7bef6b2010-01-20 02:59:47 -08001286 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001287
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001288 if (!atomic_read(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001289 return NOTIFY_DONE;
1290
1291 switch (cmd) {
1292 case DIE_NMI:
1293 break;
1294
1295 default:
1296 return NOTIFY_DONE;
1297 }
1298
1299 regs = args->regs;
1300
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001301 perf_sample_data_init(&data, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001302
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001303 cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere04ed382010-01-04 23:16:03 -08001304
1305 /* If the PMU has the TOE IRQ enable bits, we need to do a
1306 * dummy write to the %pcr to clear the overflow bits and thus
1307 * the interrupt.
1308 *
1309 * Do this before we peek at the counters to determine
1310 * overflow so we don't lose any events.
1311 */
1312 if (sparc_pmu->irq_bit)
1313 pcr_ops->write(cpuc->pcr);
1314
David S. Millere7bef6b2010-01-20 02:59:47 -08001315 for (i = 0; i < cpuc->n_events; i++) {
1316 struct perf_event *event = cpuc->event[i];
1317 int idx = cpuc->current_idx[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001318 struct hw_perf_event *hwc;
David S. Miller59abbd12009-09-10 06:28:20 -07001319 u64 val;
1320
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001321 hwc = &event->hw;
1322 val = sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001323 if (val & (1ULL << 31))
1324 continue;
1325
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001326 data.period = event->hw.last_period;
1327 if (!sparc_perf_event_set_period(event, hwc, idx))
David S. Miller59abbd12009-09-10 06:28:20 -07001328 continue;
1329
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001330 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001331 sparc_pmu_stop(event, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001332 }
1333
1334 return NOTIFY_STOP;
1335}
1336
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001337static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1338 .notifier_call = perf_event_nmi_handler,
David S. Miller59abbd12009-09-10 06:28:20 -07001339};
1340
1341static bool __init supported_pmu(void)
1342{
David S. Miller28e8f9b2009-09-26 20:54:22 -07001343 if (!strcmp(sparc_pmu_type, "ultra3") ||
1344 !strcmp(sparc_pmu_type, "ultra3+") ||
1345 !strcmp(sparc_pmu_type, "ultra3i") ||
1346 !strcmp(sparc_pmu_type, "ultra4+")) {
1347 sparc_pmu = &ultra3_pmu;
David S. Miller59abbd12009-09-10 06:28:20 -07001348 return true;
1349 }
David S. Miller7eebda62009-09-26 21:23:41 -07001350 if (!strcmp(sparc_pmu_type, "niagara")) {
1351 sparc_pmu = &niagara1_pmu;
1352 return true;
1353 }
David S. Miller4ba991d2011-07-27 21:06:16 -07001354 if (!strcmp(sparc_pmu_type, "niagara2") ||
1355 !strcmp(sparc_pmu_type, "niagara3")) {
David S. Millerb73d8842009-09-10 07:22:18 -07001356 sparc_pmu = &niagara2_pmu;
1357 return true;
1358 }
David S. Miller59abbd12009-09-10 06:28:20 -07001359 return false;
1360}
1361
Peter Zijlstra004417a2010-11-25 18:38:29 +01001362int __init init_hw_perf_events(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001363{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001364 pr_info("Performance events: ");
David S. Miller59abbd12009-09-10 06:28:20 -07001365
1366 if (!supported_pmu()) {
1367 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001368 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001369 }
1370
1371 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1372
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001373 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001374 register_die_notifier(&perf_event_nmi_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001375
1376 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001377}
Ingo Molnarefc70d22010-12-10 00:27:23 +01001378early_initcall(init_hw_perf_events);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001379
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001380void perf_callchain_kernel(struct perf_callchain_entry *entry,
1381 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001382{
1383 unsigned long ksp, fp;
David S. Miller667f0ce2010-04-21 03:08:11 -07001384#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1385 int graph = 0;
1386#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001387
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001388 stack_trace_flush();
1389
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001390 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001391
1392 ksp = regs->u_regs[UREG_I6];
1393 fp = ksp + STACK_BIAS;
1394 do {
1395 struct sparc_stackf *sf;
1396 struct pt_regs *regs;
1397 unsigned long pc;
1398
1399 if (!kstack_valid(current_thread_info(), fp))
1400 break;
1401
1402 sf = (struct sparc_stackf *) fp;
1403 regs = (struct pt_regs *) (sf + 1);
1404
1405 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1406 if (user_mode(regs))
1407 break;
1408 pc = regs->tpc;
1409 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1410 } else {
1411 pc = sf->callers_pc;
1412 fp = (unsigned long)sf->fp + STACK_BIAS;
1413 }
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001414 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001415#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1416 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1417 int index = current->curr_ret_stack;
1418 if (current->ret_stack && index >= graph) {
1419 pc = current->ret_stack[index - graph].ret;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001420 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001421 graph++;
1422 }
1423 }
1424#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001425 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1426}
1427
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001428static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1429 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001430{
1431 unsigned long ufp;
1432
David S. Miller4f6dbe42010-01-19 00:26:13 -08001433 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1434 do {
1435 struct sparc_stackf *usf, sf;
1436 unsigned long pc;
1437
1438 usf = (struct sparc_stackf *) ufp;
1439 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1440 break;
1441
1442 pc = sf.callers_pc;
1443 ufp = (unsigned long)sf.fp + STACK_BIAS;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001444 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001445 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1446}
1447
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001448static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1449 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001450{
1451 unsigned long ufp;
1452
David S. Miller9e8307e2010-03-29 13:08:52 -07001453 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
David S. Miller4f6dbe42010-01-19 00:26:13 -08001454 do {
1455 struct sparc_stackf32 *usf, sf;
1456 unsigned long pc;
1457
1458 usf = (struct sparc_stackf32 *) ufp;
1459 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1460 break;
1461
1462 pc = sf.callers_pc;
1463 ufp = (unsigned long)sf.fp;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001464 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001465 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1466}
1467
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001468void
1469perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001470{
David S. Miller846514f2012-10-14 17:59:40 -07001471 perf_callchain_store(entry, regs->tpc);
1472
1473 if (!current->mm)
1474 return;
1475
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001476 flushw_user();
1477 if (test_thread_flag(TIF_32BIT))
1478 perf_callchain_user_32(entry, regs);
1479 else
1480 perf_callchain_user_64(entry, regs);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001481}