| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 1 | /* pcr.c: Generic sparc64 performance counter infrastructure. | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 2009 David S. Miller (davem@davemloft.net) | 
 | 4 |  */ | 
 | 5 | #include <linux/kernel.h> | 
 | 6 | #include <linux/module.h> | 
 | 7 | #include <linux/init.h> | 
 | 8 | #include <linux/irq.h> | 
 | 9 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 10 | #include <linux/irq_work.h> | 
| David S. Miller | 9960e9e | 2010-04-07 04:41:33 -0700 | [diff] [blame] | 11 | #include <linux/ftrace.h> | 
| David S. Miller | 5686f9c | 2009-09-10 05:59:24 -0700 | [diff] [blame] | 12 |  | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 13 | #include <asm/pil.h> | 
 | 14 | #include <asm/pcr.h> | 
| David S. Miller | e5553a6 | 2009-01-29 21:22:47 -0800 | [diff] [blame] | 15 | #include <asm/nmi.h> | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 16 |  | 
 | 17 | /* This code is shared between various users of the performance | 
 | 18 |  * counters.  Users will be oprofile, pseudo-NMI watchdog, and the | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 19 |  * perf_event support layer. | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 20 |  */ | 
 | 21 |  | 
| David S. Miller | e5553a6 | 2009-01-29 21:22:47 -0800 | [diff] [blame] | 22 | #define PCR_SUN4U_ENABLE	(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE) | 
 | 23 | #define PCR_N2_ENABLE		(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \ | 
 | 24 | 				 PCR_N2_TOE_OV1 | \ | 
 | 25 | 				 (2 << PCR_N2_SL1_SHIFT) | \ | 
 | 26 | 				 (0xff << PCR_N2_MASK1_SHIFT)) | 
 | 27 |  | 
 | 28 | u64 pcr_enable; | 
 | 29 | unsigned int picl_shift; | 
 | 30 |  | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 31 | /* Performance counter interrupts run unmasked at PIL level 15. | 
 | 32 |  * Therefore we can't do things like wakeups and other work | 
 | 33 |  * that expects IRQ disabling to be adhered to in locking etc. | 
 | 34 |  * | 
 | 35 |  * Therefore in such situations we defer the work by signalling | 
 | 36 |  * a lower level cpu IRQ. | 
 | 37 |  */ | 
| David S. Miller | 9960e9e | 2010-04-07 04:41:33 -0700 | [diff] [blame] | 38 | void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 39 | { | 
| David S. Miller | 5686f9c | 2009-09-10 05:59:24 -0700 | [diff] [blame] | 40 | 	struct pt_regs *old_regs; | 
 | 41 |  | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 42 | 	clear_softint(1 << PIL_DEFERRED_PCR_WORK); | 
| David S. Miller | 5686f9c | 2009-09-10 05:59:24 -0700 | [diff] [blame] | 43 |  | 
 | 44 | 	old_regs = set_irq_regs(regs); | 
 | 45 | 	irq_enter(); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 46 | #ifdef CONFIG_IRQ_WORK | 
 | 47 | 	irq_work_run(); | 
| David S. Miller | 5686f9c | 2009-09-10 05:59:24 -0700 | [diff] [blame] | 48 | #endif | 
 | 49 | 	irq_exit(); | 
 | 50 | 	set_irq_regs(old_regs); | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 51 | } | 
 | 52 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 53 | void arch_irq_work_raise(void) | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 54 | { | 
 | 55 | 	set_softint(1 << PIL_DEFERRED_PCR_WORK); | 
 | 56 | } | 
 | 57 |  | 
 | 58 | const struct pcr_ops *pcr_ops; | 
 | 59 | EXPORT_SYMBOL_GPL(pcr_ops); | 
 | 60 |  | 
 | 61 | static u64 direct_pcr_read(void) | 
 | 62 | { | 
 | 63 | 	u64 val; | 
 | 64 |  | 
 | 65 | 	read_pcr(val); | 
 | 66 | 	return val; | 
 | 67 | } | 
 | 68 |  | 
 | 69 | static void direct_pcr_write(u64 val) | 
 | 70 | { | 
 | 71 | 	write_pcr(val); | 
 | 72 | } | 
 | 73 |  | 
 | 74 | static const struct pcr_ops direct_pcr_ops = { | 
 | 75 | 	.read	= direct_pcr_read, | 
 | 76 | 	.write	= direct_pcr_write, | 
 | 77 | }; | 
 | 78 |  | 
 | 79 | static void n2_pcr_write(u64 val) | 
 | 80 | { | 
 | 81 | 	unsigned long ret; | 
 | 82 |  | 
 | 83 | 	ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val); | 
 | 84 | 	if (val != HV_EOK) | 
 | 85 | 		write_pcr(val); | 
 | 86 | } | 
 | 87 |  | 
 | 88 | static const struct pcr_ops n2_pcr_ops = { | 
 | 89 | 	.read	= direct_pcr_read, | 
 | 90 | 	.write	= n2_pcr_write, | 
 | 91 | }; | 
 | 92 |  | 
 | 93 | static unsigned long perf_hsvc_group; | 
 | 94 | static unsigned long perf_hsvc_major; | 
 | 95 | static unsigned long perf_hsvc_minor; | 
 | 96 |  | 
 | 97 | static int __init register_perf_hsvc(void) | 
 | 98 | { | 
 | 99 | 	if (tlb_type == hypervisor) { | 
 | 100 | 		switch (sun4v_chip_type) { | 
 | 101 | 		case SUN4V_CHIP_NIAGARA1: | 
 | 102 | 			perf_hsvc_group = HV_GRP_NIAG_PERF; | 
 | 103 | 			break; | 
 | 104 |  | 
 | 105 | 		case SUN4V_CHIP_NIAGARA2: | 
 | 106 | 			perf_hsvc_group = HV_GRP_N2_CPU; | 
 | 107 | 			break; | 
 | 108 |  | 
 | 109 | 		default: | 
 | 110 | 			return -ENODEV; | 
 | 111 | 		} | 
 | 112 |  | 
 | 113 |  | 
 | 114 | 		perf_hsvc_major = 1; | 
 | 115 | 		perf_hsvc_minor = 0; | 
 | 116 | 		if (sun4v_hvapi_register(perf_hsvc_group, | 
 | 117 | 					 perf_hsvc_major, | 
 | 118 | 					 &perf_hsvc_minor)) { | 
 | 119 | 			printk("perfmon: Could not register hvapi.\n"); | 
 | 120 | 			return -ENODEV; | 
 | 121 | 		} | 
 | 122 | 	} | 
 | 123 | 	return 0; | 
 | 124 | } | 
 | 125 |  | 
 | 126 | static void __init unregister_perf_hsvc(void) | 
 | 127 | { | 
 | 128 | 	if (tlb_type != hypervisor) | 
 | 129 | 		return; | 
 | 130 | 	sun4v_hvapi_unregister(perf_hsvc_group); | 
 | 131 | } | 
 | 132 |  | 
 | 133 | int __init pcr_arch_init(void) | 
 | 134 | { | 
 | 135 | 	int err = register_perf_hsvc(); | 
 | 136 |  | 
 | 137 | 	if (err) | 
 | 138 | 		return err; | 
 | 139 |  | 
 | 140 | 	switch (tlb_type) { | 
 | 141 | 	case hypervisor: | 
 | 142 | 		pcr_ops = &n2_pcr_ops; | 
| David S. Miller | e5553a6 | 2009-01-29 21:22:47 -0800 | [diff] [blame] | 143 | 		pcr_enable = PCR_N2_ENABLE; | 
 | 144 | 		picl_shift = 2; | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 145 | 		break; | 
 | 146 |  | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 147 | 	case cheetah: | 
 | 148 | 	case cheetah_plus: | 
 | 149 | 		pcr_ops = &direct_pcr_ops; | 
| David S. Miller | e5553a6 | 2009-01-29 21:22:47 -0800 | [diff] [blame] | 150 | 		pcr_enable = PCR_SUN4U_ENABLE; | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 151 | 		break; | 
 | 152 |  | 
| David S. Miller | 1c2f61d | 2009-02-05 23:59:04 -0800 | [diff] [blame] | 153 | 	case spitfire: | 
 | 154 | 		/* UltraSPARC-I/II and derivatives lack a profile | 
 | 155 | 		 * counter overflow interrupt so we can't make use of | 
 | 156 | 		 * their hardware currently. | 
 | 157 | 		 */ | 
 | 158 | 		/* fallthrough */ | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 159 | 	default: | 
 | 160 | 		err = -ENODEV; | 
 | 161 | 		goto out_unregister; | 
 | 162 | 	} | 
 | 163 |  | 
| David S. Miller | e5553a6 | 2009-01-29 21:22:47 -0800 | [diff] [blame] | 164 | 	return nmi_init(); | 
| David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 165 |  | 
 | 166 | out_unregister: | 
 | 167 | 	unregister_perf_hsvc(); | 
 | 168 | 	return err; | 
 | 169 | } | 
 | 170 |  | 
 | 171 | arch_initcall(pcr_arch_init); |