blob: e408fc5d0c183442ce897245e50e3aaebba56365 [file] [log] [blame]
David S. Miller3eb80572009-01-21 21:30:23 -08001/* pcr.c: Generic sparc64 performance counter infrastructure.
2 *
3 * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
Paul Gortmaker066bcac2011-07-22 13:18:16 -04006#include <linux/export.h>
David S. Miller3eb80572009-01-21 21:30:23 -08007#include <linux/init.h>
8#include <linux/irq.h>
9
Peter Zijlstrae360adb2010-10-14 14:01:34 +080010#include <linux/irq_work.h>
David S. Miller9960e9e2010-04-07 04:41:33 -070011#include <linux/ftrace.h>
David S. Miller5686f9c2009-09-10 05:59:24 -070012
David S. Miller3eb80572009-01-21 21:30:23 -080013#include <asm/pil.h>
14#include <asm/pcr.h>
David S. Millere5553a62009-01-29 21:22:47 -080015#include <asm/nmi.h>
Paul Gortmakerc2068da2011-08-01 13:42:48 -040016#include <asm/spitfire.h>
David S. Miller3eb80572009-01-21 21:30:23 -080017
18/* This code is shared between various users of the performance
19 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
Ingo Molnarcdd6c482009-09-21 12:02:48 +020020 * perf_event support layer.
David S. Miller3eb80572009-01-21 21:30:23 -080021 */
22
23/* Performance counter interrupts run unmasked at PIL level 15.
24 * Therefore we can't do things like wakeups and other work
25 * that expects IRQ disabling to be adhered to in locking etc.
26 *
27 * Therefore in such situations we defer the work by signalling
28 * a lower level cpu IRQ.
29 */
David S. Miller9960e9e2010-04-07 04:41:33 -070030void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
David S. Miller3eb80572009-01-21 21:30:23 -080031{
David S. Miller5686f9c2009-09-10 05:59:24 -070032 struct pt_regs *old_regs;
33
David S. Miller3eb80572009-01-21 21:30:23 -080034 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
David S. Miller5686f9c2009-09-10 05:59:24 -070035
36 old_regs = set_irq_regs(regs);
37 irq_enter();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080038#ifdef CONFIG_IRQ_WORK
39 irq_work_run();
David S. Miller5686f9c2009-09-10 05:59:24 -070040#endif
41 irq_exit();
42 set_irq_regs(old_regs);
David S. Miller3eb80572009-01-21 21:30:23 -080043}
44
Peter Zijlstrae360adb2010-10-14 14:01:34 +080045void arch_irq_work_raise(void)
David S. Miller3eb80572009-01-21 21:30:23 -080046{
47 set_softint(1 << PIL_DEFERRED_PCR_WORK);
48}
49
50const struct pcr_ops *pcr_ops;
51EXPORT_SYMBOL_GPL(pcr_ops);
52
David S. Miller0bab20b2012-08-16 21:16:22 -070053static u64 direct_pcr_read(unsigned long reg_num)
David S. Miller3eb80572009-01-21 21:30:23 -080054{
55 u64 val;
56
David S. Miller0bab20b2012-08-16 21:16:22 -070057 WARN_ON_ONCE(reg_num != 0);
David S. Miller09d053c2012-08-16 23:19:32 -070058 __asm__ __volatile__("rd %%pcr, %0" : "=r" (val));
David S. Miller3eb80572009-01-21 21:30:23 -080059 return val;
60}
61
David S. Miller0bab20b2012-08-16 21:16:22 -070062static void direct_pcr_write(unsigned long reg_num, u64 val)
David S. Miller3eb80572009-01-21 21:30:23 -080063{
David S. Miller0bab20b2012-08-16 21:16:22 -070064 WARN_ON_ONCE(reg_num != 0);
David S. Miller09d053c2012-08-16 23:19:32 -070065 __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val));
66}
67
68static u64 direct_pic_read(unsigned long reg_num)
69{
70 u64 val;
71
72 WARN_ON_ONCE(reg_num != 0);
73 __asm__ __volatile__("rd %%pic, %0" : "=r" (val));
74 return val;
75}
76
77static void direct_pic_write(unsigned long reg_num, u64 val)
78{
79 WARN_ON_ONCE(reg_num != 0);
80
81 /* Blackbird errata workaround. See commentary in
82 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
83 * for more information.
84 */
85 __asm__ __volatile__("ba,pt %%xcc, 99f\n\t"
86 " nop\n\t"
87 ".align 64\n"
88 "99:wr %0, 0x0, %%pic\n\t"
89 "rd %%pic, %%g0" : : "r" (val));
David S. Miller3eb80572009-01-21 21:30:23 -080090}
91
David S. Miller73a6b052012-08-16 23:26:01 -070092static u64 direct_picl_value(unsigned int nmi_hz)
93{
94 u32 delta = local_cpu_data().clock_tick / nmi_hz;
95
96 return ((u64)((0 - delta) & 0xffffffff)) << 32;
97}
98
David S. Miller3eb80572009-01-21 21:30:23 -080099static const struct pcr_ops direct_pcr_ops = {
David S. Millerce4a9252012-08-16 23:31:59 -0700100 .read_pcr = direct_pcr_read,
101 .write_pcr = direct_pcr_write,
102 .read_pic = direct_pic_read,
103 .write_pic = direct_pic_write,
104 .nmi_picl_value = direct_picl_value,
105 .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE),
106 .pcr_nmi_disable = PCR_PIC_PRIV,
David S. Miller3eb80572009-01-21 21:30:23 -0800107};
108
David S. Miller0bab20b2012-08-16 21:16:22 -0700109static void n2_pcr_write(unsigned long reg_num, u64 val)
David S. Miller3eb80572009-01-21 21:30:23 -0800110{
111 unsigned long ret;
112
David S. Miller0bab20b2012-08-16 21:16:22 -0700113 WARN_ON_ONCE(reg_num != 0);
David S. Miller314ff522011-07-27 20:46:25 -0700114 if (val & PCR_N2_HTRACE) {
115 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
116 if (ret != HV_EOK)
David S. Miller09d053c2012-08-16 23:19:32 -0700117 direct_pcr_write(reg_num, val);
David S. Miller314ff522011-07-27 20:46:25 -0700118 } else
David S. Miller09d053c2012-08-16 23:19:32 -0700119 direct_pcr_write(reg_num, val);
David S. Miller3eb80572009-01-21 21:30:23 -0800120}
121
David S. Miller73a6b052012-08-16 23:26:01 -0700122static u64 n2_picl_value(unsigned int nmi_hz)
123{
124 u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
125
126 return ((u64)((0 - delta) & 0xffffffff)) << 32;
127}
128
David S. Miller3eb80572009-01-21 21:30:23 -0800129static const struct pcr_ops n2_pcr_ops = {
David S. Millerce4a9252012-08-16 23:31:59 -0700130 .read_pcr = direct_pcr_read,
131 .write_pcr = n2_pcr_write,
132 .read_pic = direct_pic_read,
133 .write_pic = direct_pic_write,
134 .nmi_picl_value = n2_picl_value,
135 .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE |
136 PCR_N2_TOE_OV1 |
137 (2 << PCR_N2_SL1_SHIFT) |
138 (0xff << PCR_N2_MASK1_SHIFT)),
139 .pcr_nmi_disable = PCR_PIC_PRIV,
David S. Miller3eb80572009-01-21 21:30:23 -0800140};
141
142static unsigned long perf_hsvc_group;
143static unsigned long perf_hsvc_major;
144static unsigned long perf_hsvc_minor;
145
146static int __init register_perf_hsvc(void)
147{
148 if (tlb_type == hypervisor) {
149 switch (sun4v_chip_type) {
150 case SUN4V_CHIP_NIAGARA1:
151 perf_hsvc_group = HV_GRP_NIAG_PERF;
152 break;
153
154 case SUN4V_CHIP_NIAGARA2:
155 perf_hsvc_group = HV_GRP_N2_CPU;
156 break;
157
David S. Miller4ba991d2011-07-27 21:06:16 -0700158 case SUN4V_CHIP_NIAGARA3:
159 perf_hsvc_group = HV_GRP_KT_CPU;
160 break;
161
David S. Miller3eb80572009-01-21 21:30:23 -0800162 default:
163 return -ENODEV;
164 }
165
166
167 perf_hsvc_major = 1;
168 perf_hsvc_minor = 0;
169 if (sun4v_hvapi_register(perf_hsvc_group,
170 perf_hsvc_major,
171 &perf_hsvc_minor)) {
172 printk("perfmon: Could not register hvapi.\n");
173 return -ENODEV;
174 }
175 }
176 return 0;
177}
178
179static void __init unregister_perf_hsvc(void)
180{
181 if (tlb_type != hypervisor)
182 return;
183 sun4v_hvapi_unregister(perf_hsvc_group);
184}
185
186int __init pcr_arch_init(void)
187{
188 int err = register_perf_hsvc();
189
190 if (err)
191 return err;
192
193 switch (tlb_type) {
194 case hypervisor:
195 pcr_ops = &n2_pcr_ops;
196 break;
197
David S. Miller3eb80572009-01-21 21:30:23 -0800198 case cheetah:
199 case cheetah_plus:
200 pcr_ops = &direct_pcr_ops;
201 break;
202
David S. Miller1c2f61d2009-02-05 23:59:04 -0800203 case spitfire:
204 /* UltraSPARC-I/II and derivatives lack a profile
205 * counter overflow interrupt so we can't make use of
206 * their hardware currently.
207 */
208 /* fallthrough */
David S. Miller3eb80572009-01-21 21:30:23 -0800209 default:
210 err = -ENODEV;
211 goto out_unregister;
212 }
213
David S. Millere5553a62009-01-29 21:22:47 -0800214 return nmi_init();
David S. Miller3eb80572009-01-21 21:30:23 -0800215
216out_unregister:
217 unregister_perf_hsvc();
218 return err;
219}