blob: e82ae89666f0e08cc349e89eacad78a7b26d11ea [file] [log] [blame]
David S. Miller3eb80572009-01-21 21:30:23 -08001/* pcr.c: Generic sparc64 performance counter infrastructure.
2 *
3 * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
Paul Gortmaker066bcac2011-07-22 13:18:16 -04006#include <linux/export.h>
David S. Miller3eb80572009-01-21 21:30:23 -08007#include <linux/init.h>
8#include <linux/irq.h>
9
Peter Zijlstrae360adb2010-10-14 14:01:34 +080010#include <linux/irq_work.h>
David S. Miller9960e9e2010-04-07 04:41:33 -070011#include <linux/ftrace.h>
David S. Miller5686f9c2009-09-10 05:59:24 -070012
David S. Miller3eb80572009-01-21 21:30:23 -080013#include <asm/pil.h>
14#include <asm/pcr.h>
David S. Millere5553a62009-01-29 21:22:47 -080015#include <asm/nmi.h>
Paul Gortmakerc2068da2011-08-01 13:42:48 -040016#include <asm/spitfire.h>
David S. Miller3eb80572009-01-21 21:30:23 -080017
18/* This code is shared between various users of the performance
19 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
Ingo Molnarcdd6c482009-09-21 12:02:48 +020020 * perf_event support layer.
David S. Miller3eb80572009-01-21 21:30:23 -080021 */
22
David S. Millere5553a62009-01-29 21:22:47 -080023#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
24#define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
25 PCR_N2_TOE_OV1 | \
26 (2 << PCR_N2_SL1_SHIFT) | \
27 (0xff << PCR_N2_MASK1_SHIFT))
28
29u64 pcr_enable;
30unsigned int picl_shift;
31
David S. Miller3eb80572009-01-21 21:30:23 -080032/* Performance counter interrupts run unmasked at PIL level 15.
33 * Therefore we can't do things like wakeups and other work
34 * that expects IRQ disabling to be adhered to in locking etc.
35 *
36 * Therefore in such situations we defer the work by signalling
37 * a lower level cpu IRQ.
38 */
David S. Miller9960e9e2010-04-07 04:41:33 -070039void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
David S. Miller3eb80572009-01-21 21:30:23 -080040{
David S. Miller5686f9c2009-09-10 05:59:24 -070041 struct pt_regs *old_regs;
42
David S. Miller3eb80572009-01-21 21:30:23 -080043 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
David S. Miller5686f9c2009-09-10 05:59:24 -070044
45 old_regs = set_irq_regs(regs);
46 irq_enter();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080047#ifdef CONFIG_IRQ_WORK
48 irq_work_run();
David S. Miller5686f9c2009-09-10 05:59:24 -070049#endif
50 irq_exit();
51 set_irq_regs(old_regs);
David S. Miller3eb80572009-01-21 21:30:23 -080052}
53
Peter Zijlstrae360adb2010-10-14 14:01:34 +080054void arch_irq_work_raise(void)
David S. Miller3eb80572009-01-21 21:30:23 -080055{
56 set_softint(1 << PIL_DEFERRED_PCR_WORK);
57}
58
59const struct pcr_ops *pcr_ops;
60EXPORT_SYMBOL_GPL(pcr_ops);
61
David S. Miller0bab20b2012-08-16 21:16:22 -070062static u64 direct_pcr_read(unsigned long reg_num)
David S. Miller3eb80572009-01-21 21:30:23 -080063{
64 u64 val;
65
David S. Miller0bab20b2012-08-16 21:16:22 -070066 WARN_ON_ONCE(reg_num != 0);
David S. Miller09d053c2012-08-16 23:19:32 -070067 __asm__ __volatile__("rd %%pcr, %0" : "=r" (val));
David S. Miller3eb80572009-01-21 21:30:23 -080068 return val;
69}
70
David S. Miller0bab20b2012-08-16 21:16:22 -070071static void direct_pcr_write(unsigned long reg_num, u64 val)
David S. Miller3eb80572009-01-21 21:30:23 -080072{
David S. Miller0bab20b2012-08-16 21:16:22 -070073 WARN_ON_ONCE(reg_num != 0);
David S. Miller09d053c2012-08-16 23:19:32 -070074 __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val));
75}
76
77static u64 direct_pic_read(unsigned long reg_num)
78{
79 u64 val;
80
81 WARN_ON_ONCE(reg_num != 0);
82 __asm__ __volatile__("rd %%pic, %0" : "=r" (val));
83 return val;
84}
85
86static void direct_pic_write(unsigned long reg_num, u64 val)
87{
88 WARN_ON_ONCE(reg_num != 0);
89
90 /* Blackbird errata workaround. See commentary in
91 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
92 * for more information.
93 */
94 __asm__ __volatile__("ba,pt %%xcc, 99f\n\t"
95 " nop\n\t"
96 ".align 64\n"
97 "99:wr %0, 0x0, %%pic\n\t"
98 "rd %%pic, %%g0" : : "r" (val));
David S. Miller3eb80572009-01-21 21:30:23 -080099}
100
101static const struct pcr_ops direct_pcr_ops = {
David S. Miller09d053c2012-08-16 23:19:32 -0700102 .read_pcr = direct_pcr_read,
103 .write_pcr = direct_pcr_write,
104 .read_pic = direct_pic_read,
105 .write_pic = direct_pic_write,
David S. Miller3eb80572009-01-21 21:30:23 -0800106};
107
David S. Miller0bab20b2012-08-16 21:16:22 -0700108static void n2_pcr_write(unsigned long reg_num, u64 val)
David S. Miller3eb80572009-01-21 21:30:23 -0800109{
110 unsigned long ret;
111
David S. Miller0bab20b2012-08-16 21:16:22 -0700112 WARN_ON_ONCE(reg_num != 0);
David S. Miller314ff522011-07-27 20:46:25 -0700113 if (val & PCR_N2_HTRACE) {
114 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
115 if (ret != HV_EOK)
David S. Miller09d053c2012-08-16 23:19:32 -0700116 direct_pcr_write(reg_num, val);
David S. Miller314ff522011-07-27 20:46:25 -0700117 } else
David S. Miller09d053c2012-08-16 23:19:32 -0700118 direct_pcr_write(reg_num, val);
David S. Miller3eb80572009-01-21 21:30:23 -0800119}
120
121static const struct pcr_ops n2_pcr_ops = {
David S. Miller09d053c2012-08-16 23:19:32 -0700122 .read_pcr = direct_pcr_read,
123 .write_pcr = n2_pcr_write,
124 .read_pic = direct_pic_read,
125 .write_pic = direct_pic_write,
David S. Miller3eb80572009-01-21 21:30:23 -0800126};
127
128static unsigned long perf_hsvc_group;
129static unsigned long perf_hsvc_major;
130static unsigned long perf_hsvc_minor;
131
132static int __init register_perf_hsvc(void)
133{
134 if (tlb_type == hypervisor) {
135 switch (sun4v_chip_type) {
136 case SUN4V_CHIP_NIAGARA1:
137 perf_hsvc_group = HV_GRP_NIAG_PERF;
138 break;
139
140 case SUN4V_CHIP_NIAGARA2:
141 perf_hsvc_group = HV_GRP_N2_CPU;
142 break;
143
David S. Miller4ba991d2011-07-27 21:06:16 -0700144 case SUN4V_CHIP_NIAGARA3:
145 perf_hsvc_group = HV_GRP_KT_CPU;
146 break;
147
David S. Miller3eb80572009-01-21 21:30:23 -0800148 default:
149 return -ENODEV;
150 }
151
152
153 perf_hsvc_major = 1;
154 perf_hsvc_minor = 0;
155 if (sun4v_hvapi_register(perf_hsvc_group,
156 perf_hsvc_major,
157 &perf_hsvc_minor)) {
158 printk("perfmon: Could not register hvapi.\n");
159 return -ENODEV;
160 }
161 }
162 return 0;
163}
164
165static void __init unregister_perf_hsvc(void)
166{
167 if (tlb_type != hypervisor)
168 return;
169 sun4v_hvapi_unregister(perf_hsvc_group);
170}
171
172int __init pcr_arch_init(void)
173{
174 int err = register_perf_hsvc();
175
176 if (err)
177 return err;
178
179 switch (tlb_type) {
180 case hypervisor:
181 pcr_ops = &n2_pcr_ops;
David S. Millere5553a62009-01-29 21:22:47 -0800182 pcr_enable = PCR_N2_ENABLE;
183 picl_shift = 2;
David S. Miller3eb80572009-01-21 21:30:23 -0800184 break;
185
David S. Miller3eb80572009-01-21 21:30:23 -0800186 case cheetah:
187 case cheetah_plus:
188 pcr_ops = &direct_pcr_ops;
David S. Millere5553a62009-01-29 21:22:47 -0800189 pcr_enable = PCR_SUN4U_ENABLE;
David S. Miller3eb80572009-01-21 21:30:23 -0800190 break;
191
David S. Miller1c2f61d2009-02-05 23:59:04 -0800192 case spitfire:
193 /* UltraSPARC-I/II and derivatives lack a profile
194 * counter overflow interrupt so we can't make use of
195 * their hardware currently.
196 */
197 /* fallthrough */
David S. Miller3eb80572009-01-21 21:30:23 -0800198 default:
199 err = -ENODEV;
200 goto out_unregister;
201 }
202
David S. Millere5553a62009-01-29 21:22:47 -0800203 return nmi_init();
David S. Miller3eb80572009-01-21 21:30:23 -0800204
205out_unregister:
206 unregister_perf_hsvc();
207 return err;
208}