| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1 | /* Performance event support for sparc64. | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 2 |  * | 
| David S. Miller | 4f6dbe4 | 2010-01-19 00:26:13 -0800 | [diff] [blame] | 3 |  * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net> | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 4 |  * | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5 |  * This code is based almost entirely upon the x86 perf event | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 6 |  * code, which is: | 
 | 7 |  * | 
 | 8 |  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | 
 | 9 |  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 
 | 10 |  *  Copyright (C) 2009 Jaswinder Singh Rajput | 
 | 11 |  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 
 | 12 |  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
 | 13 |  */ | 
 | 14 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 15 | #include <linux/perf_event.h> | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 16 | #include <linux/kprobes.h> | 
| David S. Miller | 667f0ce | 2010-04-21 03:08:11 -0700 | [diff] [blame] | 17 | #include <linux/ftrace.h> | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 18 | #include <linux/kernel.h> | 
 | 19 | #include <linux/kdebug.h> | 
 | 20 | #include <linux/mutex.h> | 
 | 21 |  | 
| David S. Miller | 4f6dbe4 | 2010-01-19 00:26:13 -0800 | [diff] [blame] | 22 | #include <asm/stacktrace.h> | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 23 | #include <asm/cpudata.h> | 
| David S. Miller | 4f6dbe4 | 2010-01-19 00:26:13 -0800 | [diff] [blame] | 24 | #include <asm/uaccess.h> | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 25 | #include <asm/atomic.h> | 
 | 26 | #include <asm/nmi.h> | 
 | 27 | #include <asm/pcr.h> | 
 | 28 |  | 
| David S. Miller | 4f6dbe4 | 2010-01-19 00:26:13 -0800 | [diff] [blame] | 29 | #include "kstack.h" | 
 | 30 |  | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 31 | /* Sparc64 chips have two performance counters, 32-bits each, with | 
 | 32 |  * overflow interrupts generated on transition from 0xffffffff to 0. | 
 | 33 |  * The counters are accessed in one go using a 64-bit register. | 
 | 34 |  * | 
 | 35 |  * Both counters are controlled using a single control register.  The | 
 | 36 |  * only way to stop all sampling is to clear all of the context (user, | 
 | 37 |  * supervisor, hypervisor) sampling enable bits.  But these bits apply | 
 | 38 |  * to both counters, thus the two counters can't be enabled/disabled | 
 | 39 |  * individually. | 
 | 40 |  * | 
 | 41 |  * The control register has two event fields, one for each of the two | 
 | 42 |  * counters.  It's thus nearly impossible to have one counter going | 
 | 43 |  * while keeping the other one stopped.  Therefore it is possible to | 
 | 44 |  * get overflow interrupts for counters not currently "in use" and | 
 | 45 |  * that condition must be checked in the overflow interrupt handler. | 
 | 46 |  * | 
 | 47 |  * So we use a hack, in that we program inactive counters with the | 
 | 48 |  * "sw_count0" and "sw_count1" events.  These count how many times | 
 | 49 |  * the instruction "sethi %hi(0xfc000), %g0" is executed.  It's an | 
 | 50 |  * unusual way to encode a NOP and therefore will not trigger in | 
 | 51 |  * normal code. | 
 | 52 |  */ | 
 | 53 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 54 | #define MAX_HWEVENTS			2 | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 55 | #define MAX_PERIOD			((1UL << 32) - 1) | 
 | 56 |  | 
 | 57 | #define PIC_UPPER_INDEX			0 | 
 | 58 | #define PIC_LOWER_INDEX			1 | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 59 | #define PIC_NO_INDEX			-1 | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 60 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 61 | struct cpu_hw_events { | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 62 | 	/* Number of events currently scheduled onto this cpu. | 
 | 63 | 	 * This tells how many entries in the arrays below | 
 | 64 | 	 * are valid. | 
 | 65 | 	 */ | 
 | 66 | 	int			n_events; | 
 | 67 |  | 
 | 68 | 	/* Number of new events added since the last hw_perf_disable(). | 
 | 69 | 	 * This works because the perf event layer always adds new | 
 | 70 | 	 * events inside of a perf_{disable,enable}() sequence. | 
 | 71 | 	 */ | 
 | 72 | 	int			n_added; | 
 | 73 |  | 
 | 74 | 	/* Array of events current scheduled on this cpu.  */ | 
 | 75 | 	struct perf_event	*event[MAX_HWEVENTS]; | 
 | 76 |  | 
 | 77 | 	/* Array of encoded longs, specifying the %pcr register | 
 | 78 | 	 * encoding and the mask of PIC counters this even can | 
 | 79 | 	 * be scheduled on.  See perf_event_encode() et al. | 
 | 80 | 	 */ | 
 | 81 | 	unsigned long		events[MAX_HWEVENTS]; | 
 | 82 |  | 
 | 83 | 	/* The current counter index assigned to an event.  When the | 
 | 84 | 	 * event hasn't been programmed into the cpu yet, this will | 
 | 85 | 	 * hold PIC_NO_INDEX.  The event->hw.idx value tells us where | 
 | 86 | 	 * we ought to schedule the event. | 
 | 87 | 	 */ | 
 | 88 | 	int			current_idx[MAX_HWEVENTS]; | 
 | 89 |  | 
 | 90 | 	/* Software copy of %pcr register on this cpu.  */ | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 91 | 	u64			pcr; | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 92 |  | 
 | 93 | 	/* Enabled/disable state.  */ | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 94 | 	int			enabled; | 
| Lin Ming | a13c3af | 2010-04-23 13:56:33 +0800 | [diff] [blame] | 95 |  | 
 | 96 | 	unsigned int		group_flag; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 97 | }; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 98 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 99 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 100 | /* An event map describes the characteristics of a performance | 
 | 101 |  * counter event.  In particular it gives the encoding as well as | 
 | 102 |  * a mask telling which counters the event can be measured on. | 
 | 103 |  */ | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 104 | struct perf_event_map { | 
 | 105 | 	u16	encoding; | 
 | 106 | 	u8	pic_mask; | 
 | 107 | #define PIC_NONE	0x00 | 
 | 108 | #define PIC_UPPER	0x01 | 
 | 109 | #define PIC_LOWER	0x02 | 
 | 110 | }; | 
 | 111 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 112 | /* Encode a perf_event_map entry into a long.  */ | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 113 | static unsigned long perf_event_encode(const struct perf_event_map *pmap) | 
 | 114 | { | 
 | 115 | 	return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; | 
 | 116 | } | 
 | 117 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 118 | static u8 perf_event_get_msk(unsigned long val) | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 119 | { | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 120 | 	return val & 0xff; | 
 | 121 | } | 
 | 122 |  | 
 | 123 | static u64 perf_event_get_enc(unsigned long val) | 
 | 124 | { | 
 | 125 | 	return val >> 16; | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 126 | } | 
 | 127 |  | 
| David S. Miller | 2ce4da2 | 2009-09-26 20:42:10 -0700 | [diff] [blame] | 128 | #define C(x) PERF_COUNT_HW_CACHE_##x | 
 | 129 |  | 
 | 130 | #define CACHE_OP_UNSUPPORTED	0xfffe | 
 | 131 | #define CACHE_OP_NONSENSE	0xffff | 
 | 132 |  | 
 | 133 | typedef struct perf_event_map cache_map_t | 
 | 134 | 				[PERF_COUNT_HW_CACHE_MAX] | 
 | 135 | 				[PERF_COUNT_HW_CACHE_OP_MAX] | 
 | 136 | 				[PERF_COUNT_HW_CACHE_RESULT_MAX]; | 
 | 137 |  | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 138 | struct sparc_pmu { | 
 | 139 | 	const struct perf_event_map	*(*event_map)(int); | 
| David S. Miller | 2ce4da2 | 2009-09-26 20:42:10 -0700 | [diff] [blame] | 140 | 	const cache_map_t		*cache_map; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 141 | 	int				max_events; | 
 | 142 | 	int				upper_shift; | 
 | 143 | 	int				lower_shift; | 
 | 144 | 	int				event_mask; | 
| David S. Miller | 91b9286 | 2009-09-10 07:09:06 -0700 | [diff] [blame] | 145 | 	int				hv_bit; | 
| David S. Miller | 496c07e | 2009-09-10 07:10:59 -0700 | [diff] [blame] | 146 | 	int				irq_bit; | 
| David S. Miller | 660d137 | 2009-09-10 07:13:26 -0700 | [diff] [blame] | 147 | 	int				upper_nop; | 
 | 148 | 	int				lower_nop; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 149 | }; | 
 | 150 |  | 
| David S. Miller | 28e8f9b | 2009-09-26 20:54:22 -0700 | [diff] [blame] | 151 | static const struct perf_event_map ultra3_perfmon_event_map[] = { | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 152 | 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, | 
 | 153 | 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | 
 | 154 | 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | 
 | 155 | 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | 
 | 156 | }; | 
 | 157 |  | 
| David S. Miller | 28e8f9b | 2009-09-26 20:54:22 -0700 | [diff] [blame] | 158 | static const struct perf_event_map *ultra3_event_map(int event_id) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 159 | { | 
| David S. Miller | 28e8f9b | 2009-09-26 20:54:22 -0700 | [diff] [blame] | 160 | 	return &ultra3_perfmon_event_map[event_id]; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 161 | } | 
 | 162 |  | 
| David S. Miller | 28e8f9b | 2009-09-26 20:54:22 -0700 | [diff] [blame] | 163 | static const cache_map_t ultra3_cache_map = { | 
| David S. Miller | 2ce4da2 | 2009-09-26 20:42:10 -0700 | [diff] [blame] | 164 | [C(L1D)] = { | 
 | 165 | 	[C(OP_READ)] = { | 
 | 166 | 		[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | 
 | 167 | 		[C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | 
 | 168 | 	}, | 
 | 169 | 	[C(OP_WRITE)] = { | 
 | 170 | 		[C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, | 
 | 171 | 		[C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, | 
 | 172 | 	}, | 
 | 173 | 	[C(OP_PREFETCH)] = { | 
 | 174 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 175 | 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 176 | 	}, | 
 | 177 | }, | 
 | 178 | [C(L1I)] = { | 
 | 179 | 	[C(OP_READ)] = { | 
 | 180 | 		[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | 
 | 181 | 		[C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | 
 | 182 | 	}, | 
 | 183 | 	[ C(OP_WRITE) ] = { | 
 | 184 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | 
 | 185 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE }, | 
 | 186 | 	}, | 
 | 187 | 	[ C(OP_PREFETCH) ] = { | 
 | 188 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 189 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 190 | 	}, | 
 | 191 | }, | 
 | 192 | [C(LL)] = { | 
 | 193 | 	[C(OP_READ)] = { | 
 | 194 | 		[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, | 
 | 195 | 		[C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, | 
 | 196 | 	}, | 
 | 197 | 	[C(OP_WRITE)] = { | 
 | 198 | 		[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, | 
 | 199 | 		[C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, | 
 | 200 | 	}, | 
 | 201 | 	[C(OP_PREFETCH)] = { | 
 | 202 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 203 | 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 204 | 	}, | 
 | 205 | }, | 
 | 206 | [C(DTLB)] = { | 
 | 207 | 	[C(OP_READ)] = { | 
 | 208 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 209 | 		[C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, | 
 | 210 | 	}, | 
 | 211 | 	[ C(OP_WRITE) ] = { | 
 | 212 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 213 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 214 | 	}, | 
 | 215 | 	[ C(OP_PREFETCH) ] = { | 
 | 216 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 217 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 218 | 	}, | 
 | 219 | }, | 
 | 220 | [C(ITLB)] = { | 
 | 221 | 	[C(OP_READ)] = { | 
 | 222 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 223 | 		[C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, | 
 | 224 | 	}, | 
 | 225 | 	[ C(OP_WRITE) ] = { | 
 | 226 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 227 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 228 | 	}, | 
 | 229 | 	[ C(OP_PREFETCH) ] = { | 
 | 230 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 231 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 232 | 	}, | 
 | 233 | }, | 
 | 234 | [C(BPU)] = { | 
 | 235 | 	[C(OP_READ)] = { | 
 | 236 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 237 | 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 238 | 	}, | 
 | 239 | 	[ C(OP_WRITE) ] = { | 
 | 240 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 241 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 242 | 	}, | 
 | 243 | 	[ C(OP_PREFETCH) ] = { | 
 | 244 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 245 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 246 | 	}, | 
 | 247 | }, | 
 | 248 | }; | 
 | 249 |  | 
| David S. Miller | 28e8f9b | 2009-09-26 20:54:22 -0700 | [diff] [blame] | 250 | static const struct sparc_pmu ultra3_pmu = { | 
 | 251 | 	.event_map	= ultra3_event_map, | 
 | 252 | 	.cache_map	= &ultra3_cache_map, | 
 | 253 | 	.max_events	= ARRAY_SIZE(ultra3_perfmon_event_map), | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 254 | 	.upper_shift	= 11, | 
 | 255 | 	.lower_shift	= 4, | 
 | 256 | 	.event_mask	= 0x3f, | 
| David S. Miller | 660d137 | 2009-09-10 07:13:26 -0700 | [diff] [blame] | 257 | 	.upper_nop	= 0x1c, | 
 | 258 | 	.lower_nop	= 0x14, | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 259 | }; | 
 | 260 |  | 
| David S. Miller | 7eebda6 | 2009-09-26 21:23:41 -0700 | [diff] [blame] | 261 | /* Niagara1 is very limited.  The upper PIC is hard-locked to count | 
 | 262 |  * only instructions, so it is free running which creates all kinds of | 
| David S. Miller | 6e80425 | 2009-09-29 15:10:23 -0700 | [diff] [blame] | 263 |  * problems.  Some hardware designs make one wonder if the creator | 
| David S. Miller | 7eebda6 | 2009-09-26 21:23:41 -0700 | [diff] [blame] | 264 |  * even looked at how this stuff gets used by software. | 
 | 265 |  */ | 
 | 266 | static const struct perf_event_map niagara1_perfmon_event_map[] = { | 
 | 267 | 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, | 
 | 268 | 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, | 
 | 269 | 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, | 
 | 270 | 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, | 
 | 271 | }; | 
 | 272 |  | 
 | 273 | static const struct perf_event_map *niagara1_event_map(int event_id) | 
 | 274 | { | 
 | 275 | 	return &niagara1_perfmon_event_map[event_id]; | 
 | 276 | } | 
 | 277 |  | 
 | 278 | static const cache_map_t niagara1_cache_map = { | 
 | 279 | [C(L1D)] = { | 
 | 280 | 	[C(OP_READ)] = { | 
 | 281 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 282 | 		[C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | 
 | 283 | 	}, | 
 | 284 | 	[C(OP_WRITE)] = { | 
 | 285 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 286 | 		[C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | 
 | 287 | 	}, | 
 | 288 | 	[C(OP_PREFETCH)] = { | 
 | 289 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 290 | 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 291 | 	}, | 
 | 292 | }, | 
 | 293 | [C(L1I)] = { | 
 | 294 | 	[C(OP_READ)] = { | 
 | 295 | 		[C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, | 
 | 296 | 		[C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, | 
 | 297 | 	}, | 
 | 298 | 	[ C(OP_WRITE) ] = { | 
 | 299 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | 
 | 300 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE }, | 
 | 301 | 	}, | 
 | 302 | 	[ C(OP_PREFETCH) ] = { | 
 | 303 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 304 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 305 | 	}, | 
 | 306 | }, | 
 | 307 | [C(LL)] = { | 
 | 308 | 	[C(OP_READ)] = { | 
 | 309 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 310 | 		[C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | 
 | 311 | 	}, | 
 | 312 | 	[C(OP_WRITE)] = { | 
 | 313 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 314 | 		[C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | 
 | 315 | 	}, | 
 | 316 | 	[C(OP_PREFETCH)] = { | 
 | 317 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 318 | 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 319 | 	}, | 
 | 320 | }, | 
 | 321 | [C(DTLB)] = { | 
 | 322 | 	[C(OP_READ)] = { | 
 | 323 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 324 | 		[C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, | 
 | 325 | 	}, | 
 | 326 | 	[ C(OP_WRITE) ] = { | 
 | 327 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 328 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 329 | 	}, | 
 | 330 | 	[ C(OP_PREFETCH) ] = { | 
 | 331 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 332 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 333 | 	}, | 
 | 334 | }, | 
 | 335 | [C(ITLB)] = { | 
 | 336 | 	[C(OP_READ)] = { | 
 | 337 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 338 | 		[C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, | 
 | 339 | 	}, | 
 | 340 | 	[ C(OP_WRITE) ] = { | 
 | 341 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 342 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 343 | 	}, | 
 | 344 | 	[ C(OP_PREFETCH) ] = { | 
 | 345 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 346 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 347 | 	}, | 
 | 348 | }, | 
 | 349 | [C(BPU)] = { | 
 | 350 | 	[C(OP_READ)] = { | 
 | 351 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 352 | 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 353 | 	}, | 
 | 354 | 	[ C(OP_WRITE) ] = { | 
 | 355 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 356 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 357 | 	}, | 
 | 358 | 	[ C(OP_PREFETCH) ] = { | 
 | 359 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 360 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 361 | 	}, | 
 | 362 | }, | 
 | 363 | }; | 
 | 364 |  | 
 | 365 | static const struct sparc_pmu niagara1_pmu = { | 
 | 366 | 	.event_map	= niagara1_event_map, | 
 | 367 | 	.cache_map	= &niagara1_cache_map, | 
 | 368 | 	.max_events	= ARRAY_SIZE(niagara1_perfmon_event_map), | 
 | 369 | 	.upper_shift	= 0, | 
 | 370 | 	.lower_shift	= 4, | 
 | 371 | 	.event_mask	= 0x7, | 
 | 372 | 	.upper_nop	= 0x0, | 
 | 373 | 	.lower_nop	= 0x0, | 
 | 374 | }; | 
 | 375 |  | 
| David S. Miller | b73d884 | 2009-09-10 07:22:18 -0700 | [diff] [blame] | 376 | static const struct perf_event_map niagara2_perfmon_event_map[] = { | 
 | 377 | 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | 
 | 378 | 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | 
 | 379 | 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, | 
 | 380 | 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, | 
 | 381 | 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, | 
 | 382 | 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, | 
 | 383 | }; | 
 | 384 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 385 | static const struct perf_event_map *niagara2_event_map(int event_id) | 
| David S. Miller | b73d884 | 2009-09-10 07:22:18 -0700 | [diff] [blame] | 386 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 387 | 	return &niagara2_perfmon_event_map[event_id]; | 
| David S. Miller | b73d884 | 2009-09-10 07:22:18 -0700 | [diff] [blame] | 388 | } | 
 | 389 |  | 
| David S. Miller | d0b8648 | 2009-09-26 21:04:16 -0700 | [diff] [blame] | 390 | static const cache_map_t niagara2_cache_map = { | 
 | 391 | [C(L1D)] = { | 
 | 392 | 	[C(OP_READ)] = { | 
 | 393 | 		[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | 
 | 394 | 		[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | 
 | 395 | 	}, | 
 | 396 | 	[C(OP_WRITE)] = { | 
 | 397 | 		[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | 
 | 398 | 		[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | 
 | 399 | 	}, | 
 | 400 | 	[C(OP_PREFETCH)] = { | 
 | 401 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 402 | 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 403 | 	}, | 
 | 404 | }, | 
 | 405 | [C(L1I)] = { | 
 | 406 | 	[C(OP_READ)] = { | 
 | 407 | 		[C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, | 
 | 408 | 		[C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, | 
 | 409 | 	}, | 
 | 410 | 	[ C(OP_WRITE) ] = { | 
 | 411 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | 
 | 412 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE }, | 
 | 413 | 	}, | 
 | 414 | 	[ C(OP_PREFETCH) ] = { | 
 | 415 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 416 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 417 | 	}, | 
 | 418 | }, | 
 | 419 | [C(LL)] = { | 
 | 420 | 	[C(OP_READ)] = { | 
 | 421 | 		[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | 
 | 422 | 		[C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, | 
 | 423 | 	}, | 
 | 424 | 	[C(OP_WRITE)] = { | 
 | 425 | 		[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | 
 | 426 | 		[C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, | 
 | 427 | 	}, | 
 | 428 | 	[C(OP_PREFETCH)] = { | 
 | 429 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 430 | 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 431 | 	}, | 
 | 432 | }, | 
 | 433 | [C(DTLB)] = { | 
 | 434 | 	[C(OP_READ)] = { | 
 | 435 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 436 | 		[C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, | 
 | 437 | 	}, | 
 | 438 | 	[ C(OP_WRITE) ] = { | 
 | 439 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 440 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 441 | 	}, | 
 | 442 | 	[ C(OP_PREFETCH) ] = { | 
 | 443 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 444 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 445 | 	}, | 
 | 446 | }, | 
 | 447 | [C(ITLB)] = { | 
 | 448 | 	[C(OP_READ)] = { | 
 | 449 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 450 | 		[C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, | 
 | 451 | 	}, | 
 | 452 | 	[ C(OP_WRITE) ] = { | 
 | 453 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 454 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 455 | 	}, | 
 | 456 | 	[ C(OP_PREFETCH) ] = { | 
 | 457 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 458 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 459 | 	}, | 
 | 460 | }, | 
 | 461 | [C(BPU)] = { | 
 | 462 | 	[C(OP_READ)] = { | 
 | 463 | 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 464 | 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | 
 | 465 | 	}, | 
 | 466 | 	[ C(OP_WRITE) ] = { | 
 | 467 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 468 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 469 | 	}, | 
 | 470 | 	[ C(OP_PREFETCH) ] = { | 
 | 471 | 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | 
 | 472 | 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED }, | 
 | 473 | 	}, | 
 | 474 | }, | 
 | 475 | }; | 
 | 476 |  | 
| David S. Miller | b73d884 | 2009-09-10 07:22:18 -0700 | [diff] [blame] | 477 | static const struct sparc_pmu niagara2_pmu = { | 
 | 478 | 	.event_map	= niagara2_event_map, | 
| David S. Miller | d0b8648 | 2009-09-26 21:04:16 -0700 | [diff] [blame] | 479 | 	.cache_map	= &niagara2_cache_map, | 
| David S. Miller | b73d884 | 2009-09-10 07:22:18 -0700 | [diff] [blame] | 480 | 	.max_events	= ARRAY_SIZE(niagara2_perfmon_event_map), | 
 | 481 | 	.upper_shift	= 19, | 
 | 482 | 	.lower_shift	= 6, | 
 | 483 | 	.event_mask	= 0xfff, | 
 | 484 | 	.hv_bit		= 0x8, | 
| David S. Miller | de23cf3 | 2009-10-09 00:42:40 -0700 | [diff] [blame] | 485 | 	.irq_bit	= 0x30, | 
| David S. Miller | b73d884 | 2009-09-10 07:22:18 -0700 | [diff] [blame] | 486 | 	.upper_nop	= 0x220, | 
 | 487 | 	.lower_nop	= 0x220, | 
 | 488 | }; | 
 | 489 |  | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 490 | static const struct sparc_pmu *sparc_pmu __read_mostly; | 
 | 491 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 492 | static u64 event_encoding(u64 event_id, int idx) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 493 | { | 
 | 494 | 	if (idx == PIC_UPPER_INDEX) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 495 | 		event_id <<= sparc_pmu->upper_shift; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 496 | 	else | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 497 | 		event_id <<= sparc_pmu->lower_shift; | 
 | 498 | 	return event_id; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 499 | } | 
 | 500 |  | 
 | 501 | static u64 mask_for_index(int idx) | 
 | 502 | { | 
 | 503 | 	return event_encoding(sparc_pmu->event_mask, idx); | 
 | 504 | } | 
 | 505 |  | 
 | 506 | static u64 nop_for_index(int idx) | 
 | 507 | { | 
 | 508 | 	return event_encoding(idx == PIC_UPPER_INDEX ? | 
| David S. Miller | 660d137 | 2009-09-10 07:13:26 -0700 | [diff] [blame] | 509 | 			      sparc_pmu->upper_nop : | 
 | 510 | 			      sparc_pmu->lower_nop, idx); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 511 | } | 
 | 512 |  | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 513 | static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 514 | { | 
 | 515 | 	u64 val, mask = mask_for_index(idx); | 
 | 516 |  | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 517 | 	val = cpuc->pcr; | 
 | 518 | 	val &= ~mask; | 
 | 519 | 	val |= hwc->config; | 
 | 520 | 	cpuc->pcr = val; | 
 | 521 |  | 
 | 522 | 	pcr_ops->write(cpuc->pcr); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 523 | } | 
 | 524 |  | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 525 | static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 526 | { | 
 | 527 | 	u64 mask = mask_for_index(idx); | 
 | 528 | 	u64 nop = nop_for_index(idx); | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 529 | 	u64 val; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 530 |  | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 531 | 	val = cpuc->pcr; | 
 | 532 | 	val &= ~mask; | 
 | 533 | 	val |= nop; | 
 | 534 | 	cpuc->pcr = val; | 
 | 535 |  | 
 | 536 | 	pcr_ops->write(cpuc->pcr); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 537 | } | 
 | 538 |  | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 539 | static u32 read_pmc(int idx) | 
 | 540 | { | 
 | 541 | 	u64 val; | 
 | 542 |  | 
 | 543 | 	read_pic(val); | 
 | 544 | 	if (idx == PIC_UPPER_INDEX) | 
 | 545 | 		val >>= 32; | 
 | 546 |  | 
 | 547 | 	return val & 0xffffffff; | 
 | 548 | } | 
 | 549 |  | 
 | 550 | static void write_pmc(int idx, u64 val) | 
 | 551 | { | 
 | 552 | 	u64 shift, mask, pic; | 
 | 553 |  | 
 | 554 | 	shift = 0; | 
 | 555 | 	if (idx == PIC_UPPER_INDEX) | 
 | 556 | 		shift = 32; | 
 | 557 |  | 
 | 558 | 	mask = ((u64) 0xffffffff) << shift; | 
 | 559 | 	val <<= shift; | 
 | 560 |  | 
 | 561 | 	read_pic(pic); | 
 | 562 | 	pic &= ~mask; | 
 | 563 | 	pic |= val; | 
 | 564 | 	write_pic(pic); | 
 | 565 | } | 
 | 566 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 567 | static u64 sparc_perf_event_update(struct perf_event *event, | 
 | 568 | 				   struct hw_perf_event *hwc, int idx) | 
 | 569 | { | 
 | 570 | 	int shift = 64 - 32; | 
 | 571 | 	u64 prev_raw_count, new_raw_count; | 
 | 572 | 	s64 delta; | 
 | 573 |  | 
 | 574 | again: | 
 | 575 | 	prev_raw_count = atomic64_read(&hwc->prev_count); | 
 | 576 | 	new_raw_count = read_pmc(idx); | 
 | 577 |  | 
 | 578 | 	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 
 | 579 | 			     new_raw_count) != prev_raw_count) | 
 | 580 | 		goto again; | 
 | 581 |  | 
 | 582 | 	delta = (new_raw_count << shift) - (prev_raw_count << shift); | 
 | 583 | 	delta >>= shift; | 
 | 584 |  | 
 | 585 | 	atomic64_add(delta, &event->count); | 
 | 586 | 	atomic64_sub(delta, &hwc->period_left); | 
 | 587 |  | 
 | 588 | 	return new_raw_count; | 
 | 589 | } | 
 | 590 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 591 | static int sparc_perf_event_set_period(struct perf_event *event, | 
| David S. Miller | d29862f | 2009-09-28 17:37:12 -0700 | [diff] [blame] | 592 | 				       struct hw_perf_event *hwc, int idx) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 593 | { | 
 | 594 | 	s64 left = atomic64_read(&hwc->period_left); | 
 | 595 | 	s64 period = hwc->sample_period; | 
 | 596 | 	int ret = 0; | 
 | 597 |  | 
 | 598 | 	if (unlikely(left <= -period)) { | 
 | 599 | 		left = period; | 
 | 600 | 		atomic64_set(&hwc->period_left, left); | 
 | 601 | 		hwc->last_period = period; | 
 | 602 | 		ret = 1; | 
 | 603 | 	} | 
 | 604 |  | 
 | 605 | 	if (unlikely(left <= 0)) { | 
 | 606 | 		left += period; | 
 | 607 | 		atomic64_set(&hwc->period_left, left); | 
 | 608 | 		hwc->last_period = period; | 
 | 609 | 		ret = 1; | 
 | 610 | 	} | 
 | 611 | 	if (left > MAX_PERIOD) | 
 | 612 | 		left = MAX_PERIOD; | 
 | 613 |  | 
 | 614 | 	atomic64_set(&hwc->prev_count, (u64)-left); | 
 | 615 |  | 
 | 616 | 	write_pmc(idx, (u64)(-left) & 0xffffffff); | 
 | 617 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 618 | 	perf_event_update_userpage(event); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 619 |  | 
 | 620 | 	return ret; | 
 | 621 | } | 
 | 622 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 623 | /* If performance event entries have been added, move existing | 
 | 624 |  * events around (if necessary) and then assign new entries to | 
 | 625 |  * counters. | 
 | 626 |  */ | 
 | 627 | static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 628 | { | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 629 | 	int i; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 630 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 631 | 	if (!cpuc->n_added) | 
 | 632 | 		goto out; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 633 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 634 | 	/* Read in the counters which are moving.  */ | 
 | 635 | 	for (i = 0; i < cpuc->n_events; i++) { | 
 | 636 | 		struct perf_event *cp = cpuc->event[i]; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 637 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 638 | 		if (cpuc->current_idx[i] != PIC_NO_INDEX && | 
 | 639 | 		    cpuc->current_idx[i] != cp->hw.idx) { | 
 | 640 | 			sparc_perf_event_update(cp, &cp->hw, | 
 | 641 | 						cpuc->current_idx[i]); | 
 | 642 | 			cpuc->current_idx[i] = PIC_NO_INDEX; | 
 | 643 | 		} | 
 | 644 | 	} | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 645 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 646 | 	/* Assign to counters all unassigned events.  */ | 
 | 647 | 	for (i = 0; i < cpuc->n_events; i++) { | 
 | 648 | 		struct perf_event *cp = cpuc->event[i]; | 
 | 649 | 		struct hw_perf_event *hwc = &cp->hw; | 
 | 650 | 		int idx = hwc->idx; | 
 | 651 | 		u64 enc; | 
 | 652 |  | 
 | 653 | 		if (cpuc->current_idx[i] != PIC_NO_INDEX) | 
 | 654 | 			continue; | 
 | 655 |  | 
 | 656 | 		sparc_perf_event_set_period(cp, hwc, idx); | 
 | 657 | 		cpuc->current_idx[i] = idx; | 
 | 658 |  | 
 | 659 | 		enc = perf_event_get_enc(cpuc->events[i]); | 
| David S. Miller | b7d45c3 | 2010-06-23 11:39:02 -0700 | [diff] [blame] | 660 | 		pcr &= ~mask_for_index(idx); | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 661 | 		pcr |= event_encoding(enc, idx); | 
 | 662 | 	} | 
 | 663 | out: | 
 | 664 | 	return pcr; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 665 | } | 
 | 666 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 667 | void hw_perf_enable(void) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 668 | { | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 669 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 670 | 	u64 pcr; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 671 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 672 | 	if (cpuc->enabled) | 
 | 673 | 		return; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 674 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 675 | 	cpuc->enabled = 1; | 
 | 676 | 	barrier(); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 677 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 678 | 	pcr = cpuc->pcr; | 
 | 679 | 	if (!cpuc->n_events) { | 
 | 680 | 		pcr = 0; | 
 | 681 | 	} else { | 
 | 682 | 		pcr = maybe_change_configuration(cpuc, pcr); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 683 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 684 | 		/* We require that all of the events have the same | 
 | 685 | 		 * configuration, so just fetch the settings from the | 
 | 686 | 		 * first entry. | 
 | 687 | 		 */ | 
 | 688 | 		cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; | 
 | 689 | 	} | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 690 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 691 | 	pcr_ops->write(cpuc->pcr); | 
 | 692 | } | 
 | 693 |  | 
 | 694 | void hw_perf_disable(void) | 
 | 695 | { | 
 | 696 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 697 | 	u64 val; | 
 | 698 |  | 
 | 699 | 	if (!cpuc->enabled) | 
 | 700 | 		return; | 
 | 701 |  | 
 | 702 | 	cpuc->enabled = 0; | 
 | 703 | 	cpuc->n_added = 0; | 
 | 704 |  | 
 | 705 | 	val = cpuc->pcr; | 
 | 706 | 	val &= ~(PCR_UTRACE | PCR_STRACE | | 
 | 707 | 		 sparc_pmu->hv_bit | sparc_pmu->irq_bit); | 
 | 708 | 	cpuc->pcr = val; | 
 | 709 |  | 
 | 710 | 	pcr_ops->write(cpuc->pcr); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 711 | } | 
 | 712 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 713 | static void sparc_pmu_disable(struct perf_event *event) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 714 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 715 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 716 | 	struct hw_perf_event *hwc = &event->hw; | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 717 | 	unsigned long flags; | 
 | 718 | 	int i; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 719 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 720 | 	local_irq_save(flags); | 
 | 721 | 	perf_disable(); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 722 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 723 | 	for (i = 0; i < cpuc->n_events; i++) { | 
 | 724 | 		if (event == cpuc->event[i]) { | 
 | 725 | 			int idx = cpuc->current_idx[i]; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 726 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 727 | 			/* Shift remaining entries down into | 
 | 728 | 			 * the existing slot. | 
 | 729 | 			 */ | 
 | 730 | 			while (++i < cpuc->n_events) { | 
 | 731 | 				cpuc->event[i - 1] = cpuc->event[i]; | 
 | 732 | 				cpuc->events[i - 1] = cpuc->events[i]; | 
 | 733 | 				cpuc->current_idx[i - 1] = | 
 | 734 | 					cpuc->current_idx[i]; | 
 | 735 | 			} | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 736 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 737 | 			/* Absorb the final count and turn off the | 
 | 738 | 			 * event. | 
 | 739 | 			 */ | 
 | 740 | 			sparc_pmu_disable_event(cpuc, hwc, idx); | 
 | 741 | 			barrier(); | 
 | 742 | 			sparc_perf_event_update(event, hwc, idx); | 
 | 743 |  | 
 | 744 | 			perf_event_update_userpage(event); | 
 | 745 |  | 
 | 746 | 			cpuc->n_events--; | 
 | 747 | 			break; | 
 | 748 | 		} | 
 | 749 | 	} | 
 | 750 |  | 
 | 751 | 	perf_enable(); | 
 | 752 | 	local_irq_restore(flags); | 
 | 753 | } | 
 | 754 |  | 
 | 755 | static int active_event_index(struct cpu_hw_events *cpuc, | 
 | 756 | 			      struct perf_event *event) | 
 | 757 | { | 
 | 758 | 	int i; | 
 | 759 |  | 
 | 760 | 	for (i = 0; i < cpuc->n_events; i++) { | 
 | 761 | 		if (cpuc->event[i] == event) | 
 | 762 | 			break; | 
 | 763 | 	} | 
 | 764 | 	BUG_ON(i == cpuc->n_events); | 
 | 765 | 	return cpuc->current_idx[i]; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 766 | } | 
 | 767 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 768 | static void sparc_pmu_read(struct perf_event *event) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 769 | { | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 770 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 771 | 	int idx = active_event_index(cpuc, event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 772 | 	struct hw_perf_event *hwc = &event->hw; | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 773 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 774 | 	sparc_perf_event_update(event, hwc, idx); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 775 | } | 
 | 776 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 777 | static void sparc_pmu_unthrottle(struct perf_event *event) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 778 | { | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 779 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 780 | 	int idx = active_event_index(cpuc, event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 781 | 	struct hw_perf_event *hwc = &event->hw; | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 782 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 783 | 	sparc_pmu_enable_event(cpuc, hwc, idx); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 784 | } | 
 | 785 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 786 | static atomic_t active_events = ATOMIC_INIT(0); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 787 | static DEFINE_MUTEX(pmc_grab_mutex); | 
 | 788 |  | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 789 | static void perf_stop_nmi_watchdog(void *unused) | 
 | 790 | { | 
 | 791 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 792 |  | 
 | 793 | 	stop_nmi_watchdog(NULL); | 
 | 794 | 	cpuc->pcr = pcr_ops->read(); | 
 | 795 | } | 
 | 796 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 797 | void perf_event_grab_pmc(void) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 798 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 799 | 	if (atomic_inc_not_zero(&active_events)) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 800 | 		return; | 
 | 801 |  | 
 | 802 | 	mutex_lock(&pmc_grab_mutex); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 803 | 	if (atomic_read(&active_events) == 0) { | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 804 | 		if (atomic_read(&nmi_active) > 0) { | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 805 | 			on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 806 | 			BUG_ON(atomic_read(&nmi_active) != 0); | 
 | 807 | 		} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 808 | 		atomic_inc(&active_events); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 809 | 	} | 
 | 810 | 	mutex_unlock(&pmc_grab_mutex); | 
 | 811 | } | 
 | 812 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 813 | void perf_event_release_pmc(void) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 814 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 815 | 	if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 816 | 		if (atomic_read(&nmi_active) == 0) | 
 | 817 | 			on_each_cpu(start_nmi_watchdog, NULL, 1); | 
 | 818 | 		mutex_unlock(&pmc_grab_mutex); | 
 | 819 | 	} | 
 | 820 | } | 
 | 821 |  | 
| David S. Miller | 2ce4da2 | 2009-09-26 20:42:10 -0700 | [diff] [blame] | 822 | static const struct perf_event_map *sparc_map_cache_event(u64 config) | 
 | 823 | { | 
 | 824 | 	unsigned int cache_type, cache_op, cache_result; | 
 | 825 | 	const struct perf_event_map *pmap; | 
 | 826 |  | 
 | 827 | 	if (!sparc_pmu->cache_map) | 
 | 828 | 		return ERR_PTR(-ENOENT); | 
 | 829 |  | 
 | 830 | 	cache_type = (config >>  0) & 0xff; | 
 | 831 | 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | 
 | 832 | 		return ERR_PTR(-EINVAL); | 
 | 833 |  | 
 | 834 | 	cache_op = (config >>  8) & 0xff; | 
 | 835 | 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | 
 | 836 | 		return ERR_PTR(-EINVAL); | 
 | 837 |  | 
 | 838 | 	cache_result = (config >> 16) & 0xff; | 
 | 839 | 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | 
 | 840 | 		return ERR_PTR(-EINVAL); | 
 | 841 |  | 
 | 842 | 	pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); | 
 | 843 |  | 
 | 844 | 	if (pmap->encoding == CACHE_OP_UNSUPPORTED) | 
 | 845 | 		return ERR_PTR(-ENOENT); | 
 | 846 |  | 
 | 847 | 	if (pmap->encoding == CACHE_OP_NONSENSE) | 
 | 848 | 		return ERR_PTR(-EINVAL); | 
 | 849 |  | 
 | 850 | 	return pmap; | 
 | 851 | } | 
 | 852 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 853 | static void hw_perf_event_destroy(struct perf_event *event) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 854 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 855 | 	perf_event_release_pmc(); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 856 | } | 
 | 857 |  | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 858 | /* Make sure all events can be scheduled into the hardware at | 
 | 859 |  * the same time.  This is simplified by the fact that we only | 
 | 860 |  * need to support 2 simultaneous HW events. | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 861 |  * | 
 | 862 |  * As a side effect, the evts[]->hw.idx values will be assigned | 
 | 863 |  * on success.  These are pending indexes.  When the events are | 
 | 864 |  * actually programmed into the chip, these values will propagate | 
 | 865 |  * to the per-cpu cpuc->current_idx[] slots, see the code in | 
 | 866 |  * maybe_change_configuration() for details. | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 867 |  */ | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 868 | static int sparc_check_constraints(struct perf_event **evts, | 
 | 869 | 				   unsigned long *events, int n_ev) | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 870 | { | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 871 | 	u8 msk0 = 0, msk1 = 0; | 
 | 872 | 	int idx0 = 0; | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 873 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 874 | 	/* This case is possible when we are invoked from | 
 | 875 | 	 * hw_perf_group_sched_in(). | 
 | 876 | 	 */ | 
 | 877 | 	if (!n_ev) | 
 | 878 | 		return 0; | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 879 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 880 | 	if (n_ev > perf_max_events) | 
 | 881 | 		return -1; | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 882 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 883 | 	msk0 = perf_event_get_msk(events[0]); | 
 | 884 | 	if (n_ev == 1) { | 
 | 885 | 		if (msk0 & PIC_LOWER) | 
 | 886 | 			idx0 = 1; | 
 | 887 | 		goto success; | 
 | 888 | 	} | 
 | 889 | 	BUG_ON(n_ev != 2); | 
 | 890 | 	msk1 = perf_event_get_msk(events[1]); | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 891 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 892 | 	/* If both events can go on any counter, OK.  */ | 
 | 893 | 	if (msk0 == (PIC_UPPER | PIC_LOWER) && | 
 | 894 | 	    msk1 == (PIC_UPPER | PIC_LOWER)) | 
 | 895 | 		goto success; | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 896 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 897 | 	/* If one event is limited to a specific counter, | 
 | 898 | 	 * and the other can go on both, OK. | 
 | 899 | 	 */ | 
 | 900 | 	if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) && | 
 | 901 | 	    msk1 == (PIC_UPPER | PIC_LOWER)) { | 
 | 902 | 		if (msk0 & PIC_LOWER) | 
 | 903 | 			idx0 = 1; | 
 | 904 | 		goto success; | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 905 | 	} | 
 | 906 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 907 | 	if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && | 
 | 908 | 	    msk0 == (PIC_UPPER | PIC_LOWER)) { | 
 | 909 | 		if (msk1 & PIC_UPPER) | 
 | 910 | 			idx0 = 1; | 
 | 911 | 		goto success; | 
 | 912 | 	} | 
 | 913 |  | 
 | 914 | 	/* If the events are fixed to different counters, OK.  */ | 
 | 915 | 	if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) || | 
 | 916 | 	    (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) { | 
 | 917 | 		if (msk0 & PIC_LOWER) | 
 | 918 | 			idx0 = 1; | 
 | 919 | 		goto success; | 
 | 920 | 	} | 
 | 921 |  | 
 | 922 | 	/* Otherwise, there is a conflict.  */ | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 923 | 	return -1; | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 924 |  | 
 | 925 | success: | 
 | 926 | 	evts[0]->hw.idx = idx0; | 
 | 927 | 	if (n_ev == 2) | 
 | 928 | 		evts[1]->hw.idx = idx0 ^ 1; | 
 | 929 | 	return 0; | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 930 | } | 
 | 931 |  | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 932 | static int check_excludes(struct perf_event **evts, int n_prev, int n_new) | 
 | 933 | { | 
 | 934 | 	int eu = 0, ek = 0, eh = 0; | 
 | 935 | 	struct perf_event *event; | 
 | 936 | 	int i, n, first; | 
 | 937 |  | 
 | 938 | 	n = n_prev + n_new; | 
 | 939 | 	if (n <= 1) | 
 | 940 | 		return 0; | 
 | 941 |  | 
 | 942 | 	first = 1; | 
 | 943 | 	for (i = 0; i < n; i++) { | 
 | 944 | 		event = evts[i]; | 
 | 945 | 		if (first) { | 
 | 946 | 			eu = event->attr.exclude_user; | 
 | 947 | 			ek = event->attr.exclude_kernel; | 
 | 948 | 			eh = event->attr.exclude_hv; | 
 | 949 | 			first = 0; | 
 | 950 | 		} else if (event->attr.exclude_user != eu || | 
 | 951 | 			   event->attr.exclude_kernel != ek || | 
 | 952 | 			   event->attr.exclude_hv != eh) { | 
 | 953 | 			return -EAGAIN; | 
 | 954 | 		} | 
 | 955 | 	} | 
 | 956 |  | 
 | 957 | 	return 0; | 
 | 958 | } | 
 | 959 |  | 
 | 960 | static int collect_events(struct perf_event *group, int max_count, | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 961 | 			  struct perf_event *evts[], unsigned long *events, | 
 | 962 | 			  int *current_idx) | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 963 | { | 
 | 964 | 	struct perf_event *event; | 
 | 965 | 	int n = 0; | 
 | 966 |  | 
 | 967 | 	if (!is_software_event(group)) { | 
 | 968 | 		if (n >= max_count) | 
 | 969 | 			return -1; | 
 | 970 | 		evts[n] = group; | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 971 | 		events[n] = group->hw.event_base; | 
 | 972 | 		current_idx[n++] = PIC_NO_INDEX; | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 973 | 	} | 
 | 974 | 	list_for_each_entry(event, &group->sibling_list, group_entry) { | 
 | 975 | 		if (!is_software_event(event) && | 
 | 976 | 		    event->state != PERF_EVENT_STATE_OFF) { | 
 | 977 | 			if (n >= max_count) | 
 | 978 | 				return -1; | 
 | 979 | 			evts[n] = event; | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 980 | 			events[n] = event->hw.event_base; | 
 | 981 | 			current_idx[n++] = PIC_NO_INDEX; | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 982 | 		} | 
 | 983 | 	} | 
 | 984 | 	return n; | 
 | 985 | } | 
 | 986 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 987 | static int sparc_pmu_enable(struct perf_event *event) | 
 | 988 | { | 
 | 989 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 990 | 	int n0, ret = -EAGAIN; | 
 | 991 | 	unsigned long flags; | 
 | 992 |  | 
 | 993 | 	local_irq_save(flags); | 
 | 994 | 	perf_disable(); | 
 | 995 |  | 
 | 996 | 	n0 = cpuc->n_events; | 
 | 997 | 	if (n0 >= perf_max_events) | 
 | 998 | 		goto out; | 
 | 999 |  | 
 | 1000 | 	cpuc->event[n0] = event; | 
 | 1001 | 	cpuc->events[n0] = event->hw.event_base; | 
 | 1002 | 	cpuc->current_idx[n0] = PIC_NO_INDEX; | 
 | 1003 |  | 
| Lin Ming | a13c3af | 2010-04-23 13:56:33 +0800 | [diff] [blame] | 1004 | 	/* | 
 | 1005 | 	 * If group events scheduling transaction was started, | 
 | 1006 | 	 * skip the schedulability test here, it will be peformed | 
 | 1007 | 	 * at commit time(->commit_txn) as a whole | 
 | 1008 | 	 */ | 
 | 1009 | 	if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | 
 | 1010 | 		goto nocheck; | 
 | 1011 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1012 | 	if (check_excludes(cpuc->event, n0, 1)) | 
 | 1013 | 		goto out; | 
 | 1014 | 	if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) | 
 | 1015 | 		goto out; | 
 | 1016 |  | 
| Lin Ming | a13c3af | 2010-04-23 13:56:33 +0800 | [diff] [blame] | 1017 | nocheck: | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1018 | 	cpuc->n_events++; | 
 | 1019 | 	cpuc->n_added++; | 
 | 1020 |  | 
 | 1021 | 	ret = 0; | 
 | 1022 | out: | 
 | 1023 | 	perf_enable(); | 
 | 1024 | 	local_irq_restore(flags); | 
 | 1025 | 	return ret; | 
 | 1026 | } | 
 | 1027 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1028 | static int __hw_perf_event_init(struct perf_event *event) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1029 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1030 | 	struct perf_event_attr *attr = &event->attr; | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 1031 | 	struct perf_event *evts[MAX_HWEVENTS]; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1032 | 	struct hw_perf_event *hwc = &event->hw; | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 1033 | 	unsigned long events[MAX_HWEVENTS]; | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1034 | 	int current_idx_dmy[MAX_HWEVENTS]; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1035 | 	const struct perf_event_map *pmap; | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 1036 | 	int n; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1037 |  | 
 | 1038 | 	if (atomic_read(&nmi_active) < 0) | 
 | 1039 | 		return -ENODEV; | 
 | 1040 |  | 
| David S. Miller | 2ce4da2 | 2009-09-26 20:42:10 -0700 | [diff] [blame] | 1041 | 	if (attr->type == PERF_TYPE_HARDWARE) { | 
 | 1042 | 		if (attr->config >= sparc_pmu->max_events) | 
 | 1043 | 			return -EINVAL; | 
 | 1044 | 		pmap = sparc_pmu->event_map(attr->config); | 
 | 1045 | 	} else if (attr->type == PERF_TYPE_HW_CACHE) { | 
 | 1046 | 		pmap = sparc_map_cache_event(attr->config); | 
 | 1047 | 		if (IS_ERR(pmap)) | 
 | 1048 | 			return PTR_ERR(pmap); | 
 | 1049 | 	} else | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1050 | 		return -EOPNOTSUPP; | 
 | 1051 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1052 | 	/* We save the enable bits in the config_base.  */ | 
| David S. Miller | 496c07e | 2009-09-10 07:10:59 -0700 | [diff] [blame] | 1053 | 	hwc->config_base = sparc_pmu->irq_bit; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1054 | 	if (!attr->exclude_user) | 
 | 1055 | 		hwc->config_base |= PCR_UTRACE; | 
 | 1056 | 	if (!attr->exclude_kernel) | 
 | 1057 | 		hwc->config_base |= PCR_STRACE; | 
| David S. Miller | 91b9286 | 2009-09-10 07:09:06 -0700 | [diff] [blame] | 1058 | 	if (!attr->exclude_hv) | 
 | 1059 | 		hwc->config_base |= sparc_pmu->hv_bit; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1060 |  | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 1061 | 	hwc->event_base = perf_event_encode(pmap); | 
 | 1062 |  | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 1063 | 	n = 0; | 
 | 1064 | 	if (event->group_leader != event) { | 
 | 1065 | 		n = collect_events(event->group_leader, | 
 | 1066 | 				   perf_max_events - 1, | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1067 | 				   evts, events, current_idx_dmy); | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 1068 | 		if (n < 0) | 
 | 1069 | 			return -EINVAL; | 
 | 1070 | 	} | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 1071 | 	events[n] = hwc->event_base; | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 1072 | 	evts[n] = event; | 
 | 1073 |  | 
 | 1074 | 	if (check_excludes(evts, n, 1)) | 
 | 1075 | 		return -EINVAL; | 
 | 1076 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1077 | 	if (sparc_check_constraints(evts, events, n + 1)) | 
| David S. Miller | a72a8a5 | 2009-09-28 17:35:20 -0700 | [diff] [blame] | 1078 | 		return -EINVAL; | 
 | 1079 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1080 | 	hwc->idx = PIC_NO_INDEX; | 
 | 1081 |  | 
| David S. Miller | 01552f7 | 2009-09-27 20:43:07 -0700 | [diff] [blame] | 1082 | 	/* Try to do all error checking before this point, as unwinding | 
 | 1083 | 	 * state after grabbing the PMC is difficult. | 
 | 1084 | 	 */ | 
 | 1085 | 	perf_event_grab_pmc(); | 
 | 1086 | 	event->destroy = hw_perf_event_destroy; | 
 | 1087 |  | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1088 | 	if (!hwc->sample_period) { | 
 | 1089 | 		hwc->sample_period = MAX_PERIOD; | 
 | 1090 | 		hwc->last_period = hwc->sample_period; | 
 | 1091 | 		atomic64_set(&hwc->period_left, hwc->sample_period); | 
 | 1092 | 	} | 
 | 1093 |  | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1094 | 	return 0; | 
 | 1095 | } | 
 | 1096 |  | 
| Lin Ming | a13c3af | 2010-04-23 13:56:33 +0800 | [diff] [blame] | 1097 | /* | 
 | 1098 |  * Start group events scheduling transaction | 
 | 1099 |  * Set the flag to make pmu::enable() not perform the | 
 | 1100 |  * schedulability test, it will be performed at commit time | 
 | 1101 |  */ | 
 | 1102 | static void sparc_pmu_start_txn(const struct pmu *pmu) | 
 | 1103 | { | 
 | 1104 | 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 
 | 1105 |  | 
 | 1106 | 	cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | 
 | 1107 | } | 
 | 1108 |  | 
 | 1109 | /* | 
 | 1110 |  * Stop group events scheduling transaction | 
 | 1111 |  * Clear the flag and pmu::enable() will perform the | 
 | 1112 |  * schedulability test. | 
 | 1113 |  */ | 
 | 1114 | static void sparc_pmu_cancel_txn(const struct pmu *pmu) | 
 | 1115 | { | 
 | 1116 | 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 
 | 1117 |  | 
 | 1118 | 	cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | 
 | 1119 | } | 
 | 1120 |  | 
 | 1121 | /* | 
 | 1122 |  * Commit group events scheduling transaction | 
 | 1123 |  * Perform the group schedulability test as a whole | 
 | 1124 |  * Return 0 if success | 
 | 1125 |  */ | 
 | 1126 | static int sparc_pmu_commit_txn(const struct pmu *pmu) | 
 | 1127 | { | 
 | 1128 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 1129 | 	int n; | 
 | 1130 |  | 
 | 1131 | 	if (!sparc_pmu) | 
 | 1132 | 		return -EINVAL; | 
 | 1133 |  | 
 | 1134 | 	cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 1135 | 	n = cpuc->n_events; | 
 | 1136 | 	if (check_excludes(cpuc->event, 0, n)) | 
 | 1137 | 		return -EINVAL; | 
 | 1138 | 	if (sparc_check_constraints(cpuc->event, cpuc->events, n)) | 
 | 1139 | 		return -EAGAIN; | 
 | 1140 |  | 
 | 1141 | 	return 0; | 
 | 1142 | } | 
 | 1143 |  | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1144 | static const struct pmu pmu = { | 
 | 1145 | 	.enable		= sparc_pmu_enable, | 
 | 1146 | 	.disable	= sparc_pmu_disable, | 
 | 1147 | 	.read		= sparc_pmu_read, | 
 | 1148 | 	.unthrottle	= sparc_pmu_unthrottle, | 
| Lin Ming | a13c3af | 2010-04-23 13:56:33 +0800 | [diff] [blame] | 1149 | 	.start_txn	= sparc_pmu_start_txn, | 
 | 1150 | 	.cancel_txn	= sparc_pmu_cancel_txn, | 
 | 1151 | 	.commit_txn	= sparc_pmu_commit_txn, | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1152 | }; | 
 | 1153 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1154 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1155 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1156 | 	int err = __hw_perf_event_init(event); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1157 |  | 
 | 1158 | 	if (err) | 
 | 1159 | 		return ERR_PTR(err); | 
 | 1160 | 	return &pmu; | 
 | 1161 | } | 
 | 1162 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1163 | void perf_event_print_debug(void) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1164 | { | 
 | 1165 | 	unsigned long flags; | 
 | 1166 | 	u64 pcr, pic; | 
 | 1167 | 	int cpu; | 
 | 1168 |  | 
 | 1169 | 	if (!sparc_pmu) | 
 | 1170 | 		return; | 
 | 1171 |  | 
 | 1172 | 	local_irq_save(flags); | 
 | 1173 |  | 
 | 1174 | 	cpu = smp_processor_id(); | 
 | 1175 |  | 
 | 1176 | 	pcr = pcr_ops->read(); | 
 | 1177 | 	read_pic(pic); | 
 | 1178 |  | 
 | 1179 | 	pr_info("\n"); | 
 | 1180 | 	pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", | 
 | 1181 | 		cpu, pcr, pic); | 
 | 1182 |  | 
 | 1183 | 	local_irq_restore(flags); | 
 | 1184 | } | 
 | 1185 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1186 | static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | 
| David S. Miller | d29862f | 2009-09-28 17:37:12 -0700 | [diff] [blame] | 1187 | 					    unsigned long cmd, void *__args) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1188 | { | 
 | 1189 | 	struct die_args *args = __args; | 
 | 1190 | 	struct perf_sample_data data; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1191 | 	struct cpu_hw_events *cpuc; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1192 | 	struct pt_regs *regs; | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1193 | 	int i; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1194 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1195 | 	if (!atomic_read(&active_events)) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1196 | 		return NOTIFY_DONE; | 
 | 1197 |  | 
 | 1198 | 	switch (cmd) { | 
 | 1199 | 	case DIE_NMI: | 
 | 1200 | 		break; | 
 | 1201 |  | 
 | 1202 | 	default: | 
 | 1203 | 		return NOTIFY_DONE; | 
 | 1204 | 	} | 
 | 1205 |  | 
 | 1206 | 	regs = args->regs; | 
 | 1207 |  | 
| Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 1208 | 	perf_sample_data_init(&data, 0); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1209 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1210 | 	cpuc = &__get_cpu_var(cpu_hw_events); | 
| David S. Miller | e04ed38 | 2010-01-04 23:16:03 -0800 | [diff] [blame] | 1211 |  | 
 | 1212 | 	/* If the PMU has the TOE IRQ enable bits, we need to do a | 
 | 1213 | 	 * dummy write to the %pcr to clear the overflow bits and thus | 
 | 1214 | 	 * the interrupt. | 
 | 1215 | 	 * | 
 | 1216 | 	 * Do this before we peek at the counters to determine | 
 | 1217 | 	 * overflow so we don't lose any events. | 
 | 1218 | 	 */ | 
 | 1219 | 	if (sparc_pmu->irq_bit) | 
 | 1220 | 		pcr_ops->write(cpuc->pcr); | 
 | 1221 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1222 | 	for (i = 0; i < cpuc->n_events; i++) { | 
 | 1223 | 		struct perf_event *event = cpuc->event[i]; | 
 | 1224 | 		int idx = cpuc->current_idx[i]; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1225 | 		struct hw_perf_event *hwc; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1226 | 		u64 val; | 
 | 1227 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1228 | 		hwc = &event->hw; | 
 | 1229 | 		val = sparc_perf_event_update(event, hwc, idx); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1230 | 		if (val & (1ULL << 31)) | 
 | 1231 | 			continue; | 
 | 1232 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1233 | 		data.period = event->hw.last_period; | 
 | 1234 | 		if (!sparc_perf_event_set_period(event, hwc, idx)) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1235 | 			continue; | 
 | 1236 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1237 | 		if (perf_event_overflow(event, 1, &data, regs)) | 
| David S. Miller | d175138 | 2009-09-29 21:27:06 -0700 | [diff] [blame] | 1238 | 			sparc_pmu_disable_event(cpuc, hwc, idx); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1239 | 	} | 
 | 1240 |  | 
 | 1241 | 	return NOTIFY_STOP; | 
 | 1242 | } | 
 | 1243 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1244 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { | 
 | 1245 | 	.notifier_call		= perf_event_nmi_handler, | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1246 | }; | 
 | 1247 |  | 
 | 1248 | static bool __init supported_pmu(void) | 
 | 1249 | { | 
| David S. Miller | 28e8f9b | 2009-09-26 20:54:22 -0700 | [diff] [blame] | 1250 | 	if (!strcmp(sparc_pmu_type, "ultra3") || | 
 | 1251 | 	    !strcmp(sparc_pmu_type, "ultra3+") || | 
 | 1252 | 	    !strcmp(sparc_pmu_type, "ultra3i") || | 
 | 1253 | 	    !strcmp(sparc_pmu_type, "ultra4+")) { | 
 | 1254 | 		sparc_pmu = &ultra3_pmu; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1255 | 		return true; | 
 | 1256 | 	} | 
| David S. Miller | 7eebda6 | 2009-09-26 21:23:41 -0700 | [diff] [blame] | 1257 | 	if (!strcmp(sparc_pmu_type, "niagara")) { | 
 | 1258 | 		sparc_pmu = &niagara1_pmu; | 
 | 1259 | 		return true; | 
 | 1260 | 	} | 
| David S. Miller | b73d884 | 2009-09-10 07:22:18 -0700 | [diff] [blame] | 1261 | 	if (!strcmp(sparc_pmu_type, "niagara2")) { | 
 | 1262 | 		sparc_pmu = &niagara2_pmu; | 
 | 1263 | 		return true; | 
 | 1264 | 	} | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1265 | 	return false; | 
 | 1266 | } | 
 | 1267 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1268 | void __init init_hw_perf_events(void) | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1269 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1270 | 	pr_info("Performance events: "); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1271 |  | 
 | 1272 | 	if (!supported_pmu()) { | 
 | 1273 | 		pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); | 
 | 1274 | 		return; | 
 | 1275 | 	} | 
 | 1276 |  | 
 | 1277 | 	pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | 
 | 1278 |  | 
| David S. Miller | e7bef6b | 2010-01-20 02:59:47 -0800 | [diff] [blame] | 1279 | 	/* All sparc64 PMUs currently have 2 events.  */ | 
 | 1280 | 	perf_max_events = 2; | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1281 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1282 | 	register_die_notifier(&perf_event_nmi_notifier); | 
| David S. Miller | 59abbd1 | 2009-09-10 06:28:20 -0700 | [diff] [blame] | 1283 | } | 
| David S. Miller | 4f6dbe4 | 2010-01-19 00:26:13 -0800 | [diff] [blame] | 1284 |  | 
 | 1285 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | 
 | 1286 | { | 
 | 1287 | 	if (entry->nr < PERF_MAX_STACK_DEPTH) | 
 | 1288 | 		entry->ip[entry->nr++] = ip; | 
 | 1289 | } | 
 | 1290 |  | 
 | 1291 | static void perf_callchain_kernel(struct pt_regs *regs, | 
 | 1292 | 				  struct perf_callchain_entry *entry) | 
 | 1293 | { | 
 | 1294 | 	unsigned long ksp, fp; | 
| David S. Miller | 667f0ce | 2010-04-21 03:08:11 -0700 | [diff] [blame] | 1295 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
 | 1296 | 	int graph = 0; | 
 | 1297 | #endif | 
| David S. Miller | 4f6dbe4 | 2010-01-19 00:26:13 -0800 | [diff] [blame] | 1298 |  | 
 | 1299 | 	callchain_store(entry, PERF_CONTEXT_KERNEL); | 
 | 1300 | 	callchain_store(entry, regs->tpc); | 
 | 1301 |  | 
 | 1302 | 	ksp = regs->u_regs[UREG_I6]; | 
 | 1303 | 	fp = ksp + STACK_BIAS; | 
 | 1304 | 	do { | 
 | 1305 | 		struct sparc_stackf *sf; | 
 | 1306 | 		struct pt_regs *regs; | 
 | 1307 | 		unsigned long pc; | 
 | 1308 |  | 
 | 1309 | 		if (!kstack_valid(current_thread_info(), fp)) | 
 | 1310 | 			break; | 
 | 1311 |  | 
 | 1312 | 		sf = (struct sparc_stackf *) fp; | 
 | 1313 | 		regs = (struct pt_regs *) (sf + 1); | 
 | 1314 |  | 
 | 1315 | 		if (kstack_is_trap_frame(current_thread_info(), regs)) { | 
 | 1316 | 			if (user_mode(regs)) | 
 | 1317 | 				break; | 
 | 1318 | 			pc = regs->tpc; | 
 | 1319 | 			fp = regs->u_regs[UREG_I6] + STACK_BIAS; | 
 | 1320 | 		} else { | 
 | 1321 | 			pc = sf->callers_pc; | 
 | 1322 | 			fp = (unsigned long)sf->fp + STACK_BIAS; | 
 | 1323 | 		} | 
 | 1324 | 		callchain_store(entry, pc); | 
| David S. Miller | 667f0ce | 2010-04-21 03:08:11 -0700 | [diff] [blame] | 1325 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
 | 1326 | 		if ((pc + 8UL) == (unsigned long) &return_to_handler) { | 
 | 1327 | 			int index = current->curr_ret_stack; | 
 | 1328 | 			if (current->ret_stack && index >= graph) { | 
 | 1329 | 				pc = current->ret_stack[index - graph].ret; | 
 | 1330 | 				callchain_store(entry, pc); | 
 | 1331 | 				graph++; | 
 | 1332 | 			} | 
 | 1333 | 		} | 
 | 1334 | #endif | 
| David S. Miller | 4f6dbe4 | 2010-01-19 00:26:13 -0800 | [diff] [blame] | 1335 | 	} while (entry->nr < PERF_MAX_STACK_DEPTH); | 
 | 1336 | } | 
 | 1337 |  | 
 | 1338 | static void perf_callchain_user_64(struct pt_regs *regs, | 
 | 1339 | 				   struct perf_callchain_entry *entry) | 
 | 1340 | { | 
 | 1341 | 	unsigned long ufp; | 
 | 1342 |  | 
 | 1343 | 	callchain_store(entry, PERF_CONTEXT_USER); | 
 | 1344 | 	callchain_store(entry, regs->tpc); | 
 | 1345 |  | 
 | 1346 | 	ufp = regs->u_regs[UREG_I6] + STACK_BIAS; | 
 | 1347 | 	do { | 
 | 1348 | 		struct sparc_stackf *usf, sf; | 
 | 1349 | 		unsigned long pc; | 
 | 1350 |  | 
 | 1351 | 		usf = (struct sparc_stackf *) ufp; | 
 | 1352 | 		if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | 
 | 1353 | 			break; | 
 | 1354 |  | 
 | 1355 | 		pc = sf.callers_pc; | 
 | 1356 | 		ufp = (unsigned long)sf.fp + STACK_BIAS; | 
 | 1357 | 		callchain_store(entry, pc); | 
 | 1358 | 	} while (entry->nr < PERF_MAX_STACK_DEPTH); | 
 | 1359 | } | 
 | 1360 |  | 
 | 1361 | static void perf_callchain_user_32(struct pt_regs *regs, | 
 | 1362 | 				   struct perf_callchain_entry *entry) | 
 | 1363 | { | 
 | 1364 | 	unsigned long ufp; | 
 | 1365 |  | 
 | 1366 | 	callchain_store(entry, PERF_CONTEXT_USER); | 
 | 1367 | 	callchain_store(entry, regs->tpc); | 
 | 1368 |  | 
| David S. Miller | 9e8307e | 2010-03-29 13:08:52 -0700 | [diff] [blame] | 1369 | 	ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; | 
| David S. Miller | 4f6dbe4 | 2010-01-19 00:26:13 -0800 | [diff] [blame] | 1370 | 	do { | 
 | 1371 | 		struct sparc_stackf32 *usf, sf; | 
 | 1372 | 		unsigned long pc; | 
 | 1373 |  | 
 | 1374 | 		usf = (struct sparc_stackf32 *) ufp; | 
 | 1375 | 		if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | 
 | 1376 | 			break; | 
 | 1377 |  | 
 | 1378 | 		pc = sf.callers_pc; | 
 | 1379 | 		ufp = (unsigned long)sf.fp; | 
 | 1380 | 		callchain_store(entry, pc); | 
 | 1381 | 	} while (entry->nr < PERF_MAX_STACK_DEPTH); | 
 | 1382 | } | 
 | 1383 |  | 
 | 1384 | /* Like powerpc we can't get PMU interrupts within the PMU handler, | 
| Daniel Mack | 3ad2f3f | 2010-02-03 08:01:28 +0800 | [diff] [blame] | 1385 |  * so no need for separate NMI and IRQ chains as on x86. | 
| David S. Miller | 4f6dbe4 | 2010-01-19 00:26:13 -0800 | [diff] [blame] | 1386 |  */ | 
 | 1387 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | 
 | 1388 |  | 
 | 1389 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | 
 | 1390 | { | 
 | 1391 | 	struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | 
 | 1392 |  | 
 | 1393 | 	entry->nr = 0; | 
 | 1394 | 	if (!user_mode(regs)) { | 
 | 1395 | 		stack_trace_flush(); | 
 | 1396 | 		perf_callchain_kernel(regs, entry); | 
 | 1397 | 		if (current->mm) | 
 | 1398 | 			regs = task_pt_regs(current); | 
 | 1399 | 		else | 
 | 1400 | 			regs = NULL; | 
 | 1401 | 	} | 
 | 1402 | 	if (regs) { | 
 | 1403 | 		flushw_user(); | 
 | 1404 | 		if (test_thread_flag(TIF_32BIT)) | 
 | 1405 | 			perf_callchain_user_32(regs, entry); | 
 | 1406 | 		else | 
 | 1407 | 			perf_callchain_user_64(regs, entry); | 
 | 1408 | 	} | 
 | 1409 | 	return entry; | 
 | 1410 | } |