| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 1 | /* | 
 | 2 |  * Performance event support framework for SuperH hardware counters. | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2009  Paul Mundt | 
 | 5 |  * | 
 | 6 |  * Heavily based on the x86 and PowerPC implementations. | 
 | 7 |  * | 
 | 8 |  * x86: | 
 | 9 |  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | 
 | 10 |  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 
 | 11 |  *  Copyright (C) 2009 Jaswinder Singh Rajput | 
 | 12 |  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 
 | 13 |  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
 | 14 |  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | 
 | 15 |  * | 
 | 16 |  * ppc: | 
 | 17 |  *  Copyright 2008-2009 Paul Mackerras, IBM Corporation. | 
 | 18 |  * | 
 | 19 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 20 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 21 |  * for more details. | 
 | 22 |  */ | 
 | 23 | #include <linux/kernel.h> | 
 | 24 | #include <linux/init.h> | 
 | 25 | #include <linux/io.h> | 
 | 26 | #include <linux/irq.h> | 
 | 27 | #include <linux/perf_event.h> | 
 | 28 | #include <asm/processor.h> | 
 | 29 |  | 
 | 30 | struct cpu_hw_events { | 
 | 31 | 	struct perf_event	*events[MAX_HWEVENTS]; | 
 | 32 | 	unsigned long		used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | 
 | 33 | 	unsigned long		active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | 
 | 34 | }; | 
 | 35 |  | 
 | 36 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | 
 | 37 |  | 
 | 38 | static struct sh_pmu *sh_pmu __read_mostly; | 
 | 39 |  | 
 | 40 | /* Number of perf_events counting hardware events */ | 
 | 41 | static atomic_t num_events; | 
 | 42 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | 
 | 43 | static DEFINE_MUTEX(pmc_reserve_mutex); | 
 | 44 |  | 
 | 45 | /* | 
 | 46 |  * Stub these out for now, do something more profound later. | 
 | 47 |  */ | 
 | 48 | int reserve_pmc_hardware(void) | 
 | 49 | { | 
 | 50 | 	return 0; | 
 | 51 | } | 
 | 52 |  | 
 | 53 | void release_pmc_hardware(void) | 
 | 54 | { | 
 | 55 | } | 
 | 56 |  | 
 | 57 | static inline int sh_pmu_initialized(void) | 
 | 58 | { | 
 | 59 | 	return !!sh_pmu; | 
 | 60 | } | 
 | 61 |  | 
| Matt Fleming | 84c7991 | 2010-10-03 21:41:13 +0100 | [diff] [blame] | 62 | const char *perf_pmu_name(void) | 
 | 63 | { | 
 | 64 | 	if (!sh_pmu) | 
 | 65 | 		return NULL; | 
 | 66 |  | 
 | 67 | 	return sh_pmu->name; | 
 | 68 | } | 
 | 69 | EXPORT_SYMBOL_GPL(perf_pmu_name); | 
 | 70 |  | 
| Matt Fleming | 3bf101b | 2010-09-27 20:22:24 +0100 | [diff] [blame] | 71 | int perf_num_counters(void) | 
 | 72 | { | 
 | 73 | 	if (!sh_pmu) | 
 | 74 | 		return 0; | 
 | 75 |  | 
 | 76 | 	return sh_pmu->num_events; | 
 | 77 | } | 
 | 78 | EXPORT_SYMBOL_GPL(perf_num_counters); | 
 | 79 |  | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 80 | /* | 
 | 81 |  * Release the PMU if this is the last perf_event. | 
 | 82 |  */ | 
 | 83 | static void hw_perf_event_destroy(struct perf_event *event) | 
 | 84 | { | 
 | 85 | 	if (!atomic_add_unless(&num_events, -1, 1)) { | 
 | 86 | 		mutex_lock(&pmc_reserve_mutex); | 
 | 87 | 		if (atomic_dec_return(&num_events) == 0) | 
 | 88 | 			release_pmc_hardware(); | 
 | 89 | 		mutex_unlock(&pmc_reserve_mutex); | 
 | 90 | 	} | 
 | 91 | } | 
 | 92 |  | 
 | 93 | static int hw_perf_cache_event(int config, int *evp) | 
 | 94 | { | 
 | 95 | 	unsigned long type, op, result; | 
 | 96 | 	int ev; | 
 | 97 |  | 
 | 98 | 	if (!sh_pmu->cache_events) | 
 | 99 | 		return -EINVAL; | 
 | 100 |  | 
 | 101 | 	/* unpack config */ | 
 | 102 | 	type = config & 0xff; | 
 | 103 | 	op = (config >> 8) & 0xff; | 
 | 104 | 	result = (config >> 16) & 0xff; | 
 | 105 |  | 
 | 106 | 	if (type >= PERF_COUNT_HW_CACHE_MAX || | 
 | 107 | 	    op >= PERF_COUNT_HW_CACHE_OP_MAX || | 
 | 108 | 	    result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | 
 | 109 | 		return -EINVAL; | 
 | 110 |  | 
 | 111 | 	ev = (*sh_pmu->cache_events)[type][op][result]; | 
 | 112 | 	if (ev == 0) | 
 | 113 | 		return -EOPNOTSUPP; | 
 | 114 | 	if (ev == -1) | 
 | 115 | 		return -EINVAL; | 
 | 116 | 	*evp = ev; | 
 | 117 | 	return 0; | 
 | 118 | } | 
 | 119 |  | 
 | 120 | static int __hw_perf_event_init(struct perf_event *event) | 
 | 121 | { | 
 | 122 | 	struct perf_event_attr *attr = &event->attr; | 
 | 123 | 	struct hw_perf_event *hwc = &event->hw; | 
| Paul Mundt | 8820002 | 2009-11-05 13:56:50 +0900 | [diff] [blame] | 124 | 	int config = -1; | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 125 | 	int err; | 
 | 126 |  | 
 | 127 | 	if (!sh_pmu_initialized()) | 
 | 128 | 		return -ENODEV; | 
 | 129 |  | 
 | 130 | 	/* | 
 | 131 | 	 * All of the on-chip counters are "limited", in that they have | 
 | 132 | 	 * no interrupts, and are therefore unable to do sampling without | 
 | 133 | 	 * further work and timer assistance. | 
 | 134 | 	 */ | 
 | 135 | 	if (hwc->sample_period) | 
 | 136 | 		return -EINVAL; | 
 | 137 |  | 
 | 138 | 	/* | 
 | 139 | 	 * See if we need to reserve the counter. | 
 | 140 | 	 * | 
 | 141 | 	 * If no events are currently in use, then we have to take a | 
 | 142 | 	 * mutex to ensure that we don't race with another task doing | 
 | 143 | 	 * reserve_pmc_hardware or release_pmc_hardware. | 
 | 144 | 	 */ | 
 | 145 | 	err = 0; | 
 | 146 | 	if (!atomic_inc_not_zero(&num_events)) { | 
 | 147 | 		mutex_lock(&pmc_reserve_mutex); | 
 | 148 | 		if (atomic_read(&num_events) == 0 && | 
 | 149 | 		    reserve_pmc_hardware()) | 
 | 150 | 			err = -EBUSY; | 
 | 151 | 		else | 
 | 152 | 			atomic_inc(&num_events); | 
 | 153 | 		mutex_unlock(&pmc_reserve_mutex); | 
 | 154 | 	} | 
 | 155 |  | 
 | 156 | 	if (err) | 
 | 157 | 		return err; | 
 | 158 |  | 
 | 159 | 	event->destroy = hw_perf_event_destroy; | 
 | 160 |  | 
 | 161 | 	switch (attr->type) { | 
 | 162 | 	case PERF_TYPE_RAW: | 
 | 163 | 		config = attr->config & sh_pmu->raw_event_mask; | 
 | 164 | 		break; | 
 | 165 | 	case PERF_TYPE_HW_CACHE: | 
 | 166 | 		err = hw_perf_cache_event(attr->config, &config); | 
 | 167 | 		if (err) | 
 | 168 | 			return err; | 
 | 169 | 		break; | 
 | 170 | 	case PERF_TYPE_HARDWARE: | 
 | 171 | 		if (attr->config >= sh_pmu->max_events) | 
 | 172 | 			return -EINVAL; | 
 | 173 |  | 
 | 174 | 		config = sh_pmu->event_map(attr->config); | 
 | 175 | 		break; | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 176 | 	} | 
 | 177 |  | 
 | 178 | 	if (config == -1) | 
 | 179 | 		return -EINVAL; | 
 | 180 |  | 
 | 181 | 	hwc->config |= config; | 
 | 182 |  | 
 | 183 | 	return 0; | 
 | 184 | } | 
 | 185 |  | 
 | 186 | static void sh_perf_event_update(struct perf_event *event, | 
 | 187 | 				   struct hw_perf_event *hwc, int idx) | 
 | 188 | { | 
 | 189 | 	u64 prev_raw_count, new_raw_count; | 
 | 190 | 	s64 delta; | 
 | 191 | 	int shift = 0; | 
 | 192 |  | 
 | 193 | 	/* | 
 | 194 | 	 * Depending on the counter configuration, they may or may not | 
 | 195 | 	 * be chained, in which case the previous counter value can be | 
 | 196 | 	 * updated underneath us if the lower-half overflows. | 
 | 197 | 	 * | 
 | 198 | 	 * Our tactic to handle this is to first atomically read and | 
 | 199 | 	 * exchange a new raw count - then add that new-prev delta | 
 | 200 | 	 * count to the generic counter atomically. | 
 | 201 | 	 * | 
 | 202 | 	 * As there is no interrupt associated with the overflow events, | 
 | 203 | 	 * this is the simplest approach for maintaining consistency. | 
 | 204 | 	 */ | 
 | 205 | again: | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 206 | 	prev_raw_count = local64_read(&hwc->prev_count); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 207 | 	new_raw_count = sh_pmu->read(idx); | 
 | 208 |  | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 209 | 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 210 | 			     new_raw_count) != prev_raw_count) | 
 | 211 | 		goto again; | 
 | 212 |  | 
 | 213 | 	/* | 
 | 214 | 	 * Now we have the new raw value and have updated the prev | 
 | 215 | 	 * timestamp already. We can now calculate the elapsed delta | 
 | 216 | 	 * (counter-)time and add that to the generic counter. | 
 | 217 | 	 * | 
 | 218 | 	 * Careful, not all hw sign-extends above the physical width | 
 | 219 | 	 * of the count. | 
 | 220 | 	 */ | 
 | 221 | 	delta = (new_raw_count << shift) - (prev_raw_count << shift); | 
 | 222 | 	delta >>= shift; | 
 | 223 |  | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 224 | 	local64_add(delta, &event->count); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 225 | } | 
 | 226 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 227 | static void sh_pmu_stop(struct perf_event *event, int flags) | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 228 | { | 
 | 229 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 230 | 	struct hw_perf_event *hwc = &event->hw; | 
 | 231 | 	int idx = hwc->idx; | 
 | 232 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 233 | 	if (!(event->hw.state & PERF_HES_STOPPED)) { | 
 | 234 | 		sh_pmu->disable(hwc, idx); | 
 | 235 | 		cpuc->events[idx] = NULL; | 
 | 236 | 		event->hw.state |= PERF_HES_STOPPED; | 
 | 237 | 	} | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 238 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 239 | 	if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { | 
 | 240 | 		sh_perf_event_update(event, &event->hw, idx); | 
 | 241 | 		event->hw.state |= PERF_HES_UPTODATE; | 
 | 242 | 	} | 
 | 243 | } | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 244 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 245 | static void sh_pmu_start(struct perf_event *event, int flags) | 
 | 246 | { | 
 | 247 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 248 | 	struct hw_perf_event *hwc = &event->hw; | 
 | 249 | 	int idx = hwc->idx; | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 250 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 251 | 	if (WARN_ON_ONCE(idx == -1)) | 
 | 252 | 		return; | 
 | 253 |  | 
 | 254 | 	if (flags & PERF_EF_RELOAD) | 
 | 255 | 		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | 
 | 256 |  | 
 | 257 | 	cpuc->events[idx] = event; | 
 | 258 | 	event->hw.state = 0; | 
 | 259 | 	sh_pmu->enable(hwc, idx); | 
 | 260 | } | 
 | 261 |  | 
 | 262 | static void sh_pmu_del(struct perf_event *event, int flags) | 
 | 263 | { | 
 | 264 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 265 |  | 
 | 266 | 	sh_pmu_stop(event, PERF_EF_UPDATE); | 
 | 267 | 	__clear_bit(event->hw.idx, cpuc->used_mask); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 268 |  | 
 | 269 | 	perf_event_update_userpage(event); | 
 | 270 | } | 
 | 271 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 272 | static int sh_pmu_add(struct perf_event *event, int flags) | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 273 | { | 
 | 274 | 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 
 | 275 | 	struct hw_perf_event *hwc = &event->hw; | 
 | 276 | 	int idx = hwc->idx; | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 277 | 	int ret = -EAGAIN; | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 278 |  | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 279 | 	perf_pmu_disable(event->pmu); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 280 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 281 | 	if (__test_and_set_bit(idx, cpuc->used_mask)) { | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 282 | 		idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); | 
 | 283 | 		if (idx == sh_pmu->num_events) | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 284 | 			goto out; | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 285 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 286 | 		__set_bit(idx, cpuc->used_mask); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 287 | 		hwc->idx = idx; | 
 | 288 | 	} | 
 | 289 |  | 
 | 290 | 	sh_pmu->disable(hwc, idx); | 
 | 291 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 292 | 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | 
 | 293 | 	if (flags & PERF_EF_START) | 
 | 294 | 		sh_pmu_start(event, PERF_EF_RELOAD); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 295 |  | 
 | 296 | 	perf_event_update_userpage(event); | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 297 | 	ret = 0; | 
 | 298 | out: | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 299 | 	perf_pmu_enable(event->pmu); | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 300 | 	return ret; | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 301 | } | 
 | 302 |  | 
 | 303 | static void sh_pmu_read(struct perf_event *event) | 
 | 304 | { | 
 | 305 | 	sh_perf_event_update(event, &event->hw, event->hw.idx); | 
 | 306 | } | 
 | 307 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 308 | static int sh_pmu_event_init(struct perf_event *event) | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 309 | { | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 310 | 	int err; | 
 | 311 |  | 
 | 312 | 	switch (event->attr.type) { | 
 | 313 | 	case PERF_TYPE_RAW: | 
 | 314 | 	case PERF_TYPE_HW_CACHE: | 
 | 315 | 	case PERF_TYPE_HARDWARE: | 
 | 316 | 		err = __hw_perf_event_init(event); | 
 | 317 | 		break; | 
 | 318 |  | 
 | 319 | 	default: | 
 | 320 | 		return -ENOENT; | 
 | 321 | 	} | 
 | 322 |  | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 323 | 	if (unlikely(err)) { | 
 | 324 | 		if (event->destroy) | 
 | 325 | 			event->destroy(event); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 326 | 	} | 
 | 327 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 328 | 	return err; | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 329 | } | 
 | 330 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 331 | static void sh_pmu_enable(struct pmu *pmu) | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 332 | { | 
 | 333 | 	if (!sh_pmu_initialized()) | 
 | 334 | 		return; | 
 | 335 |  | 
 | 336 | 	sh_pmu->enable_all(); | 
 | 337 | } | 
 | 338 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 339 | static void sh_pmu_disable(struct pmu *pmu) | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 340 | { | 
 | 341 | 	if (!sh_pmu_initialized()) | 
 | 342 | 		return; | 
 | 343 |  | 
 | 344 | 	sh_pmu->disable_all(); | 
 | 345 | } | 
 | 346 |  | 
| Peter Zijlstra | 51b0fe3 | 2010-06-11 13:35:57 +0200 | [diff] [blame] | 347 | static struct pmu pmu = { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 348 | 	.pmu_enable	= sh_pmu_enable, | 
 | 349 | 	.pmu_disable	= sh_pmu_disable, | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 350 | 	.event_init	= sh_pmu_event_init, | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 351 | 	.add		= sh_pmu_add, | 
 | 352 | 	.del		= sh_pmu_del, | 
 | 353 | 	.start		= sh_pmu_start, | 
 | 354 | 	.stop		= sh_pmu_stop, | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 355 | 	.read		= sh_pmu_read, | 
 | 356 | }; | 
 | 357 |  | 
| Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 358 | static void sh_pmu_setup(int cpu) | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 359 | { | 
 | 360 | 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 
 | 361 |  | 
 | 362 | 	memset(cpuhw, 0, sizeof(struct cpu_hw_events)); | 
 | 363 | } | 
 | 364 |  | 
| Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 365 | static int __cpuinit | 
 | 366 | sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 
 | 367 | { | 
 | 368 | 	unsigned int cpu = (long)hcpu; | 
 | 369 |  | 
 | 370 | 	switch (action & ~CPU_TASKS_FROZEN) { | 
 | 371 | 	case CPU_UP_PREPARE: | 
 | 372 | 		sh_pmu_setup(cpu); | 
 | 373 | 		break; | 
 | 374 |  | 
 | 375 | 	default: | 
 | 376 | 		break; | 
 | 377 | 	} | 
 | 378 |  | 
 | 379 | 	return NOTIFY_OK; | 
 | 380 | } | 
 | 381 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 382 | int __cpuinit register_sh_pmu(struct sh_pmu *_pmu) | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 383 | { | 
 | 384 | 	if (sh_pmu) | 
 | 385 | 		return -EBUSY; | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 386 | 	sh_pmu = _pmu; | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 387 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 388 | 	pr_info("Performance Events: %s support registered\n", _pmu->name); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 389 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 390 | 	WARN_ON(_pmu->num_events > MAX_HWEVENTS); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 391 |  | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 392 | 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | 
| Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 393 | 	perf_cpu_notifier(sh_pmu_notifier); | 
| Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 394 | 	return 0; | 
 | 395 | } |