blob: 0cd5487b3413b40ed80963112500e77a36d88e22 [file] [log] [blame]
David S. Miller59abbd12009-09-10 06:28:20 -07001/* Performance counter support for sparc64.
2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4 *
5 * This code is based almost entirely upon the x86 perf counter
6 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
15#include <linux/perf_counter.h>
16#include <linux/kprobes.h>
17#include <linux/kernel.h>
18#include <linux/kdebug.h>
19#include <linux/mutex.h>
20
21#include <asm/cpudata.h>
22#include <asm/atomic.h>
23#include <asm/nmi.h>
24#include <asm/pcr.h>
25
26/* Sparc64 chips have two performance counters, 32-bits each, with
27 * overflow interrupts generated on transition from 0xffffffff to 0.
28 * The counters are accessed in one go using a 64-bit register.
29 *
30 * Both counters are controlled using a single control register. The
31 * only way to stop all sampling is to clear all of the context (user,
32 * supervisor, hypervisor) sampling enable bits. But these bits apply
33 * to both counters, thus the two counters can't be enabled/disabled
34 * individually.
35 *
36 * The control register has two event fields, one for each of the two
37 * counters. It's thus nearly impossible to have one counter going
38 * while keeping the other one stopped. Therefore it is possible to
39 * get overflow interrupts for counters not currently "in use" and
40 * that condition must be checked in the overflow interrupt handler.
41 *
42 * So we use a hack, in that we program inactive counters with the
43 * "sw_count0" and "sw_count1" events. These count how many times
44 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
45 * unusual way to encode a NOP and therefore will not trigger in
46 * normal code.
47 */
48
49#define MAX_HWCOUNTERS 2
50#define MAX_PERIOD ((1UL << 32) - 1)
51
52#define PIC_UPPER_INDEX 0
53#define PIC_LOWER_INDEX 1
54
55#define PIC_UPPER_NOP 0x1c
56#define PIC_LOWER_NOP 0x14
57
58struct cpu_hw_counters {
59 struct perf_counter *counters[MAX_HWCOUNTERS];
60 unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
61 unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
62 int enabled;
63};
64DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, };
65
66struct perf_event_map {
67 u16 encoding;
68 u8 pic_mask;
69#define PIC_NONE 0x00
70#define PIC_UPPER 0x01
71#define PIC_LOWER 0x02
72};
73
74struct sparc_pmu {
75 const struct perf_event_map *(*event_map)(int);
76 int max_events;
77 int upper_shift;
78 int lower_shift;
79 int event_mask;
David S. Miller91b92862009-09-10 07:09:06 -070080 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -070081 int irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -070082};
83
84static const struct perf_event_map ultra3i_perfmon_event_map[] = {
85 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
86 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
87 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
88 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
89};
90
91static const struct perf_event_map *ultra3i_event_map(int event)
92{
93 return &ultra3i_perfmon_event_map[event];
94}
95
96static const struct sparc_pmu ultra3i_pmu = {
97 .event_map = ultra3i_event_map,
98 .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map),
99 .upper_shift = 11,
100 .lower_shift = 4,
101 .event_mask = 0x3f,
102};
103
104static const struct sparc_pmu *sparc_pmu __read_mostly;
105
106static u64 event_encoding(u64 event, int idx)
107{
108 if (idx == PIC_UPPER_INDEX)
109 event <<= sparc_pmu->upper_shift;
110 else
111 event <<= sparc_pmu->lower_shift;
112 return event;
113}
114
115static u64 mask_for_index(int idx)
116{
117 return event_encoding(sparc_pmu->event_mask, idx);
118}
119
120static u64 nop_for_index(int idx)
121{
122 return event_encoding(idx == PIC_UPPER_INDEX ?
123 PIC_UPPER_NOP : PIC_LOWER_NOP, idx);
124}
125
126static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
127 int idx)
128{
129 u64 val, mask = mask_for_index(idx);
130
131 val = pcr_ops->read();
132 pcr_ops->write((val & ~mask) | hwc->config);
133}
134
135static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
136 int idx)
137{
138 u64 mask = mask_for_index(idx);
139 u64 nop = nop_for_index(idx);
140 u64 val = pcr_ops->read();
141
142 pcr_ops->write((val & ~mask) | nop);
143}
144
145void hw_perf_enable(void)
146{
147 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
148 u64 val;
149 int i;
150
151 if (cpuc->enabled)
152 return;
153
154 cpuc->enabled = 1;
155 barrier();
156
157 val = pcr_ops->read();
158
159 for (i = 0; i < MAX_HWCOUNTERS; i++) {
160 struct perf_counter *cp = cpuc->counters[i];
161 struct hw_perf_counter *hwc;
162
163 if (!cp)
164 continue;
165 hwc = &cp->hw;
166 val |= hwc->config_base;
167 }
168
169 pcr_ops->write(val);
170}
171
172void hw_perf_disable(void)
173{
174 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
175 u64 val;
176
177 if (!cpuc->enabled)
178 return;
179
180 cpuc->enabled = 0;
181
182 val = pcr_ops->read();
David S. Miller496c07e2009-09-10 07:10:59 -0700183 val &= ~(PCR_UTRACE | PCR_STRACE |
184 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
David S. Miller59abbd12009-09-10 06:28:20 -0700185 pcr_ops->write(val);
186}
187
188static u32 read_pmc(int idx)
189{
190 u64 val;
191
192 read_pic(val);
193 if (idx == PIC_UPPER_INDEX)
194 val >>= 32;
195
196 return val & 0xffffffff;
197}
198
199static void write_pmc(int idx, u64 val)
200{
201 u64 shift, mask, pic;
202
203 shift = 0;
204 if (idx == PIC_UPPER_INDEX)
205 shift = 32;
206
207 mask = ((u64) 0xffffffff) << shift;
208 val <<= shift;
209
210 read_pic(pic);
211 pic &= ~mask;
212 pic |= val;
213 write_pic(pic);
214}
215
216static int sparc_perf_counter_set_period(struct perf_counter *counter,
217 struct hw_perf_counter *hwc, int idx)
218{
219 s64 left = atomic64_read(&hwc->period_left);
220 s64 period = hwc->sample_period;
221 int ret = 0;
222
223 if (unlikely(left <= -period)) {
224 left = period;
225 atomic64_set(&hwc->period_left, left);
226 hwc->last_period = period;
227 ret = 1;
228 }
229
230 if (unlikely(left <= 0)) {
231 left += period;
232 atomic64_set(&hwc->period_left, left);
233 hwc->last_period = period;
234 ret = 1;
235 }
236 if (left > MAX_PERIOD)
237 left = MAX_PERIOD;
238
239 atomic64_set(&hwc->prev_count, (u64)-left);
240
241 write_pmc(idx, (u64)(-left) & 0xffffffff);
242
243 perf_counter_update_userpage(counter);
244
245 return ret;
246}
247
248static int sparc_pmu_enable(struct perf_counter *counter)
249{
250 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
251 struct hw_perf_counter *hwc = &counter->hw;
252 int idx = hwc->idx;
253
254 if (test_and_set_bit(idx, cpuc->used_mask))
255 return -EAGAIN;
256
257 sparc_pmu_disable_counter(hwc, idx);
258
259 cpuc->counters[idx] = counter;
260 set_bit(idx, cpuc->active_mask);
261
262 sparc_perf_counter_set_period(counter, hwc, idx);
263 sparc_pmu_enable_counter(hwc, idx);
264 perf_counter_update_userpage(counter);
265 return 0;
266}
267
268static u64 sparc_perf_counter_update(struct perf_counter *counter,
269 struct hw_perf_counter *hwc, int idx)
270{
271 int shift = 64 - 32;
272 u64 prev_raw_count, new_raw_count;
273 s64 delta;
274
275again:
276 prev_raw_count = atomic64_read(&hwc->prev_count);
277 new_raw_count = read_pmc(idx);
278
279 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
280 new_raw_count) != prev_raw_count)
281 goto again;
282
283 delta = (new_raw_count << shift) - (prev_raw_count << shift);
284 delta >>= shift;
285
286 atomic64_add(delta, &counter->count);
287 atomic64_sub(delta, &hwc->period_left);
288
289 return new_raw_count;
290}
291
292static void sparc_pmu_disable(struct perf_counter *counter)
293{
294 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
295 struct hw_perf_counter *hwc = &counter->hw;
296 int idx = hwc->idx;
297
298 clear_bit(idx, cpuc->active_mask);
299 sparc_pmu_disable_counter(hwc, idx);
300
301 barrier();
302
303 sparc_perf_counter_update(counter, hwc, idx);
304 cpuc->counters[idx] = NULL;
305 clear_bit(idx, cpuc->used_mask);
306
307 perf_counter_update_userpage(counter);
308}
309
310static void sparc_pmu_read(struct perf_counter *counter)
311{
312 struct hw_perf_counter *hwc = &counter->hw;
313 sparc_perf_counter_update(counter, hwc, hwc->idx);
314}
315
316static void sparc_pmu_unthrottle(struct perf_counter *counter)
317{
318 struct hw_perf_counter *hwc = &counter->hw;
319 sparc_pmu_enable_counter(hwc, hwc->idx);
320}
321
322static atomic_t active_counters = ATOMIC_INIT(0);
323static DEFINE_MUTEX(pmc_grab_mutex);
324
325void perf_counter_grab_pmc(void)
326{
327 if (atomic_inc_not_zero(&active_counters))
328 return;
329
330 mutex_lock(&pmc_grab_mutex);
331 if (atomic_read(&active_counters) == 0) {
332 if (atomic_read(&nmi_active) > 0) {
333 on_each_cpu(stop_nmi_watchdog, NULL, 1);
334 BUG_ON(atomic_read(&nmi_active) != 0);
335 }
336 atomic_inc(&active_counters);
337 }
338 mutex_unlock(&pmc_grab_mutex);
339}
340
341void perf_counter_release_pmc(void)
342{
343 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) {
344 if (atomic_read(&nmi_active) == 0)
345 on_each_cpu(start_nmi_watchdog, NULL, 1);
346 mutex_unlock(&pmc_grab_mutex);
347 }
348}
349
350static void hw_perf_counter_destroy(struct perf_counter *counter)
351{
352 perf_counter_release_pmc();
353}
354
355static int __hw_perf_counter_init(struct perf_counter *counter)
356{
357 struct perf_counter_attr *attr = &counter->attr;
358 struct hw_perf_counter *hwc = &counter->hw;
359 const struct perf_event_map *pmap;
360 u64 enc;
361
362 if (atomic_read(&nmi_active) < 0)
363 return -ENODEV;
364
365 if (attr->type != PERF_TYPE_HARDWARE)
366 return -EOPNOTSUPP;
367
368 if (attr->config >= sparc_pmu->max_events)
369 return -EINVAL;
370
371 perf_counter_grab_pmc();
372 counter->destroy = hw_perf_counter_destroy;
373
374 /* We save the enable bits in the config_base. So to
375 * turn off sampling just write 'config', and to enable
376 * things write 'config | config_base'.
377 */
David S. Miller496c07e2009-09-10 07:10:59 -0700378 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -0700379 if (!attr->exclude_user)
380 hwc->config_base |= PCR_UTRACE;
381 if (!attr->exclude_kernel)
382 hwc->config_base |= PCR_STRACE;
David S. Miller91b92862009-09-10 07:09:06 -0700383 if (!attr->exclude_hv)
384 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -0700385
386 if (!hwc->sample_period) {
387 hwc->sample_period = MAX_PERIOD;
388 hwc->last_period = hwc->sample_period;
389 atomic64_set(&hwc->period_left, hwc->sample_period);
390 }
391
392 pmap = sparc_pmu->event_map(attr->config);
393
394 enc = pmap->encoding;
395 if (pmap->pic_mask & PIC_UPPER) {
396 hwc->idx = PIC_UPPER_INDEX;
397 enc <<= sparc_pmu->upper_shift;
398 } else {
399 hwc->idx = PIC_LOWER_INDEX;
400 enc <<= sparc_pmu->lower_shift;
401 }
402
403 hwc->config |= enc;
404 return 0;
405}
406
407static const struct pmu pmu = {
408 .enable = sparc_pmu_enable,
409 .disable = sparc_pmu_disable,
410 .read = sparc_pmu_read,
411 .unthrottle = sparc_pmu_unthrottle,
412};
413
414const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
415{
416 int err = __hw_perf_counter_init(counter);
417
418 if (err)
419 return ERR_PTR(err);
420 return &pmu;
421}
422
423void perf_counter_print_debug(void)
424{
425 unsigned long flags;
426 u64 pcr, pic;
427 int cpu;
428
429 if (!sparc_pmu)
430 return;
431
432 local_irq_save(flags);
433
434 cpu = smp_processor_id();
435
436 pcr = pcr_ops->read();
437 read_pic(pic);
438
439 pr_info("\n");
440 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
441 cpu, pcr, pic);
442
443 local_irq_restore(flags);
444}
445
446static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
447 unsigned long cmd, void *__args)
448{
449 struct die_args *args = __args;
450 struct perf_sample_data data;
451 struct cpu_hw_counters *cpuc;
452 struct pt_regs *regs;
453 int idx;
454
455 if (!atomic_read(&active_counters))
456 return NOTIFY_DONE;
457
458 switch (cmd) {
459 case DIE_NMI:
460 break;
461
462 default:
463 return NOTIFY_DONE;
464 }
465
466 regs = args->regs;
467
468 data.regs = regs;
469 data.addr = 0;
470
471 cpuc = &__get_cpu_var(cpu_hw_counters);
472 for (idx = 0; idx < MAX_HWCOUNTERS; idx++) {
473 struct perf_counter *counter = cpuc->counters[idx];
474 struct hw_perf_counter *hwc;
475 u64 val;
476
477 if (!test_bit(idx, cpuc->active_mask))
478 continue;
479 hwc = &counter->hw;
480 val = sparc_perf_counter_update(counter, hwc, idx);
481 if (val & (1ULL << 31))
482 continue;
483
484 data.period = counter->hw.last_period;
485 if (!sparc_perf_counter_set_period(counter, hwc, idx))
486 continue;
487
488 if (perf_counter_overflow(counter, 1, &data))
489 sparc_pmu_disable_counter(hwc, idx);
490 }
491
492 return NOTIFY_STOP;
493}
494
495static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
496 .notifier_call = perf_counter_nmi_handler,
497};
498
499static bool __init supported_pmu(void)
500{
501 if (!strcmp(sparc_pmu_type, "ultra3i")) {
502 sparc_pmu = &ultra3i_pmu;
503 return true;
504 }
505 return false;
506}
507
508void __init init_hw_perf_counters(void)
509{
510 pr_info("Performance counters: ");
511
512 if (!supported_pmu()) {
513 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
514 return;
515 }
516
517 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
518
519 /* All sparc64 PMUs currently have 2 counters. But this simple
520 * driver only supports one active counter at a time.
521 */
522 perf_max_counters = 1;
523
524 register_die_notifier(&perf_counter_nmi_notifier);
525}