| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1 | /* | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 2 |  * Performance events core code: | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3 |  * | 
 | 4 |  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | 
 | 5 |  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 
 | 6 |  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
 | 7 |  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | 
 | 8 |  * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 9 |  * For licensing details see kernel-base/COPYING | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10 |  */ | 
 | 11 |  | 
 | 12 | #include <linux/fs.h> | 
 | 13 | #include <linux/mm.h> | 
 | 14 | #include <linux/cpu.h> | 
 | 15 | #include <linux/smp.h> | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 16 | #include <linux/idr.h> | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 17 | #include <linux/file.h> | 
 | 18 | #include <linux/poll.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 19 | #include <linux/slab.h> | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 20 | #include <linux/hash.h> | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 21 | #include <linux/sysfs.h> | 
 | 22 | #include <linux/dcache.h> | 
 | 23 | #include <linux/percpu.h> | 
 | 24 | #include <linux/ptrace.h> | 
| Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 25 | #include <linux/reboot.h> | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 26 | #include <linux/vmstat.h> | 
| Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 27 | #include <linux/device.h> | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 28 | #include <linux/vmalloc.h> | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 29 | #include <linux/hardirq.h> | 
 | 30 | #include <linux/rculist.h> | 
 | 31 | #include <linux/uaccess.h> | 
 | 32 | #include <linux/syscalls.h> | 
 | 33 | #include <linux/anon_inodes.h> | 
 | 34 | #include <linux/kernel_stat.h> | 
 | 35 | #include <linux/perf_event.h> | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 36 | #include <linux/ftrace_event.h> | 
| Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 37 | #include <linux/hw_breakpoint.h> | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 38 |  | 
 | 39 | #include <asm/irq_regs.h> | 
 | 40 |  | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 41 | struct remote_function_call { | 
 | 42 | 	struct task_struct *p; | 
 | 43 | 	int (*func)(void *info); | 
 | 44 | 	void *info; | 
 | 45 | 	int ret; | 
 | 46 | }; | 
 | 47 |  | 
 | 48 | static void remote_function(void *data) | 
 | 49 | { | 
 | 50 | 	struct remote_function_call *tfc = data; | 
 | 51 | 	struct task_struct *p = tfc->p; | 
 | 52 |  | 
 | 53 | 	if (p) { | 
 | 54 | 		tfc->ret = -EAGAIN; | 
 | 55 | 		if (task_cpu(p) != smp_processor_id() || !task_curr(p)) | 
 | 56 | 			return; | 
 | 57 | 	} | 
 | 58 |  | 
 | 59 | 	tfc->ret = tfc->func(tfc->info); | 
 | 60 | } | 
 | 61 |  | 
 | 62 | /** | 
 | 63 |  * task_function_call - call a function on the cpu on which a task runs | 
 | 64 |  * @p:		the task to evaluate | 
 | 65 |  * @func:	the function to be called | 
 | 66 |  * @info:	the function call argument | 
 | 67 |  * | 
 | 68 |  * Calls the function @func when the task is currently running. This might | 
 | 69 |  * be on the current CPU, which just calls the function directly | 
 | 70 |  * | 
 | 71 |  * returns: @func return value, or | 
 | 72 |  *	    -ESRCH  - when the process isn't running | 
 | 73 |  *	    -EAGAIN - when the process moved away | 
 | 74 |  */ | 
 | 75 | static int | 
 | 76 | task_function_call(struct task_struct *p, int (*func) (void *info), void *info) | 
 | 77 | { | 
 | 78 | 	struct remote_function_call data = { | 
 | 79 | 		.p = p, | 
 | 80 | 		.func = func, | 
 | 81 | 		.info = info, | 
 | 82 | 		.ret = -ESRCH, /* No such (running) process */ | 
 | 83 | 	}; | 
 | 84 |  | 
 | 85 | 	if (task_curr(p)) | 
 | 86 | 		smp_call_function_single(task_cpu(p), remote_function, &data, 1); | 
 | 87 |  | 
 | 88 | 	return data.ret; | 
 | 89 | } | 
 | 90 |  | 
 | 91 | /** | 
 | 92 |  * cpu_function_call - call a function on the cpu | 
 | 93 |  * @func:	the function to be called | 
 | 94 |  * @info:	the function call argument | 
 | 95 |  * | 
 | 96 |  * Calls the function @func on the remote cpu. | 
 | 97 |  * | 
 | 98 |  * returns: @func return value or -ENXIO when the cpu is offline | 
 | 99 |  */ | 
 | 100 | static int cpu_function_call(int cpu, int (*func) (void *info), void *info) | 
 | 101 | { | 
 | 102 | 	struct remote_function_call data = { | 
 | 103 | 		.p = NULL, | 
 | 104 | 		.func = func, | 
 | 105 | 		.info = info, | 
 | 106 | 		.ret = -ENXIO, /* No such CPU */ | 
 | 107 | 	}; | 
 | 108 |  | 
 | 109 | 	smp_call_function_single(cpu, remote_function, &data, 1); | 
 | 110 |  | 
 | 111 | 	return data.ret; | 
 | 112 | } | 
 | 113 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 114 | #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ | 
 | 115 | 		       PERF_FLAG_FD_OUTPUT  |\ | 
 | 116 | 		       PERF_FLAG_PID_CGROUP) | 
 | 117 |  | 
| Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 118 | enum event_type_t { | 
 | 119 | 	EVENT_FLEXIBLE = 0x1, | 
 | 120 | 	EVENT_PINNED = 0x2, | 
 | 121 | 	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, | 
 | 122 | }; | 
 | 123 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 124 | /* | 
 | 125 |  * perf_sched_events : >0 events exist | 
 | 126 |  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu | 
 | 127 |  */ | 
 | 128 | atomic_t perf_sched_events __read_mostly; | 
 | 129 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); | 
 | 130 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 131 | static atomic_t nr_mmap_events __read_mostly; | 
 | 132 | static atomic_t nr_comm_events __read_mostly; | 
 | 133 | static atomic_t nr_task_events __read_mostly; | 
 | 134 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 135 | static LIST_HEAD(pmus); | 
 | 136 | static DEFINE_MUTEX(pmus_lock); | 
 | 137 | static struct srcu_struct pmus_srcu; | 
 | 138 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 139 | /* | 
 | 140 |  * perf event paranoia level: | 
 | 141 |  *  -1 - not paranoid at all | 
 | 142 |  *   0 - disallow raw tracepoint access for unpriv | 
 | 143 |  *   1 - disallow cpu events for unpriv | 
 | 144 |  *   2 - disallow kernel profiling for unpriv | 
 | 145 |  */ | 
 | 146 | int sysctl_perf_event_paranoid __read_mostly = 1; | 
 | 147 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 148 | int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ | 
 | 149 |  | 
 | 150 | /* | 
 | 151 |  * max perf event sample rate | 
 | 152 |  */ | 
| Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 153 | #define DEFAULT_MAX_SAMPLE_RATE 100000 | 
 | 154 | int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; | 
 | 155 | static int max_samples_per_tick __read_mostly = | 
 | 156 | 	DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); | 
 | 157 |  | 
 | 158 | int perf_proc_update_handler(struct ctl_table *table, int write, | 
 | 159 | 		void __user *buffer, size_t *lenp, | 
 | 160 | 		loff_t *ppos) | 
 | 161 | { | 
 | 162 | 	int ret = proc_dointvec(table, write, buffer, lenp, ppos); | 
 | 163 |  | 
 | 164 | 	if (ret || !write) | 
 | 165 | 		return ret; | 
 | 166 |  | 
 | 167 | 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); | 
 | 168 |  | 
 | 169 | 	return 0; | 
 | 170 | } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 171 |  | 
 | 172 | static atomic64_t perf_event_id; | 
 | 173 |  | 
| Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 174 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, | 
 | 175 | 			      enum event_type_t event_type); | 
 | 176 |  | 
 | 177 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 178 | 			     enum event_type_t event_type, | 
 | 179 | 			     struct task_struct *task); | 
 | 180 |  | 
 | 181 | static void update_context_time(struct perf_event_context *ctx); | 
 | 182 | static u64 perf_event_time(struct perf_event *event); | 
| Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 183 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 184 | void __weak perf_event_print_debug(void)	{ } | 
 | 185 |  | 
| Matt Fleming | 84c7991 | 2010-10-03 21:41:13 +0100 | [diff] [blame] | 186 | extern __weak const char *perf_pmu_name(void) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 187 | { | 
| Matt Fleming | 84c7991 | 2010-10-03 21:41:13 +0100 | [diff] [blame] | 188 | 	return "pmu"; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 189 | } | 
 | 190 |  | 
| Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 191 | static inline u64 perf_clock(void) | 
 | 192 | { | 
 | 193 | 	return local_clock(); | 
 | 194 | } | 
 | 195 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 196 | static inline struct perf_cpu_context * | 
 | 197 | __get_cpu_context(struct perf_event_context *ctx) | 
 | 198 | { | 
 | 199 | 	return this_cpu_ptr(ctx->pmu->pmu_cpu_context); | 
 | 200 | } | 
 | 201 |  | 
 | 202 | #ifdef CONFIG_CGROUP_PERF | 
 | 203 |  | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 204 | /* | 
 | 205 |  * Must ensure cgroup is pinned (css_get) before calling | 
 | 206 |  * this function. In other words, we cannot call this function | 
 | 207 |  * if there is no cgroup event for the current CPU context. | 
 | 208 |  */ | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 209 | static inline struct perf_cgroup * | 
 | 210 | perf_cgroup_from_task(struct task_struct *task) | 
 | 211 | { | 
 | 212 | 	return container_of(task_subsys_state(task, perf_subsys_id), | 
 | 213 | 			struct perf_cgroup, css); | 
 | 214 | } | 
 | 215 |  | 
 | 216 | static inline bool | 
 | 217 | perf_cgroup_match(struct perf_event *event) | 
 | 218 | { | 
 | 219 | 	struct perf_event_context *ctx = event->ctx; | 
 | 220 | 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 
 | 221 |  | 
 | 222 | 	return !event->cgrp || event->cgrp == cpuctx->cgrp; | 
 | 223 | } | 
 | 224 |  | 
 | 225 | static inline void perf_get_cgroup(struct perf_event *event) | 
 | 226 | { | 
 | 227 | 	css_get(&event->cgrp->css); | 
 | 228 | } | 
 | 229 |  | 
 | 230 | static inline void perf_put_cgroup(struct perf_event *event) | 
 | 231 | { | 
 | 232 | 	css_put(&event->cgrp->css); | 
 | 233 | } | 
 | 234 |  | 
 | 235 | static inline void perf_detach_cgroup(struct perf_event *event) | 
 | 236 | { | 
 | 237 | 	perf_put_cgroup(event); | 
 | 238 | 	event->cgrp = NULL; | 
 | 239 | } | 
 | 240 |  | 
 | 241 | static inline int is_cgroup_event(struct perf_event *event) | 
 | 242 | { | 
 | 243 | 	return event->cgrp != NULL; | 
 | 244 | } | 
 | 245 |  | 
 | 246 | static inline u64 perf_cgroup_event_time(struct perf_event *event) | 
 | 247 | { | 
 | 248 | 	struct perf_cgroup_info *t; | 
 | 249 |  | 
 | 250 | 	t = per_cpu_ptr(event->cgrp->info, event->cpu); | 
 | 251 | 	return t->time; | 
 | 252 | } | 
 | 253 |  | 
 | 254 | static inline void __update_cgrp_time(struct perf_cgroup *cgrp) | 
 | 255 | { | 
 | 256 | 	struct perf_cgroup_info *info; | 
 | 257 | 	u64 now; | 
 | 258 |  | 
 | 259 | 	now = perf_clock(); | 
 | 260 |  | 
 | 261 | 	info = this_cpu_ptr(cgrp->info); | 
 | 262 |  | 
 | 263 | 	info->time += now - info->timestamp; | 
 | 264 | 	info->timestamp = now; | 
 | 265 | } | 
 | 266 |  | 
 | 267 | static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) | 
 | 268 | { | 
 | 269 | 	struct perf_cgroup *cgrp_out = cpuctx->cgrp; | 
 | 270 | 	if (cgrp_out) | 
 | 271 | 		__update_cgrp_time(cgrp_out); | 
 | 272 | } | 
 | 273 |  | 
 | 274 | static inline void update_cgrp_time_from_event(struct perf_event *event) | 
 | 275 | { | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 276 | 	struct perf_cgroup *cgrp; | 
 | 277 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 278 | 	/* | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 279 | 	 * ensure we access cgroup data only when needed and | 
 | 280 | 	 * when we know the cgroup is pinned (css_get) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 281 | 	 */ | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 282 | 	if (!is_cgroup_event(event)) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 283 | 		return; | 
 | 284 |  | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 285 | 	cgrp = perf_cgroup_from_task(current); | 
 | 286 | 	/* | 
 | 287 | 	 * Do not update time when cgroup is not active | 
 | 288 | 	 */ | 
 | 289 | 	if (cgrp == event->cgrp) | 
 | 290 | 		__update_cgrp_time(event->cgrp); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 291 | } | 
 | 292 |  | 
 | 293 | static inline void | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 294 | perf_cgroup_set_timestamp(struct task_struct *task, | 
 | 295 | 			  struct perf_event_context *ctx) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 296 | { | 
 | 297 | 	struct perf_cgroup *cgrp; | 
 | 298 | 	struct perf_cgroup_info *info; | 
 | 299 |  | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 300 | 	/* | 
 | 301 | 	 * ctx->lock held by caller | 
 | 302 | 	 * ensure we do not access cgroup data | 
 | 303 | 	 * unless we have the cgroup pinned (css_get) | 
 | 304 | 	 */ | 
 | 305 | 	if (!task || !ctx->nr_cgroups) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 306 | 		return; | 
 | 307 |  | 
 | 308 | 	cgrp = perf_cgroup_from_task(task); | 
 | 309 | 	info = this_cpu_ptr(cgrp->info); | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 310 | 	info->timestamp = ctx->timestamp; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 311 | } | 
 | 312 |  | 
 | 313 | #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */ | 
 | 314 | #define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */ | 
 | 315 |  | 
 | 316 | /* | 
 | 317 |  * reschedule events based on the cgroup constraint of task. | 
 | 318 |  * | 
 | 319 |  * mode SWOUT : schedule out everything | 
 | 320 |  * mode SWIN : schedule in based on cgroup for next | 
 | 321 |  */ | 
 | 322 | void perf_cgroup_switch(struct task_struct *task, int mode) | 
 | 323 | { | 
 | 324 | 	struct perf_cpu_context *cpuctx; | 
 | 325 | 	struct pmu *pmu; | 
 | 326 | 	unsigned long flags; | 
 | 327 |  | 
 | 328 | 	/* | 
 | 329 | 	 * disable interrupts to avoid geting nr_cgroup | 
 | 330 | 	 * changes via __perf_event_disable(). Also | 
 | 331 | 	 * avoids preemption. | 
 | 332 | 	 */ | 
 | 333 | 	local_irq_save(flags); | 
 | 334 |  | 
 | 335 | 	/* | 
 | 336 | 	 * we reschedule only in the presence of cgroup | 
 | 337 | 	 * constrained events. | 
 | 338 | 	 */ | 
 | 339 | 	rcu_read_lock(); | 
 | 340 |  | 
 | 341 | 	list_for_each_entry_rcu(pmu, &pmus, entry) { | 
 | 342 |  | 
 | 343 | 		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 
 | 344 |  | 
 | 345 | 		perf_pmu_disable(cpuctx->ctx.pmu); | 
 | 346 |  | 
 | 347 | 		/* | 
 | 348 | 		 * perf_cgroup_events says at least one | 
 | 349 | 		 * context on this CPU has cgroup events. | 
 | 350 | 		 * | 
 | 351 | 		 * ctx->nr_cgroups reports the number of cgroup | 
 | 352 | 		 * events for a context. | 
 | 353 | 		 */ | 
 | 354 | 		if (cpuctx->ctx.nr_cgroups > 0) { | 
 | 355 |  | 
 | 356 | 			if (mode & PERF_CGROUP_SWOUT) { | 
 | 357 | 				cpu_ctx_sched_out(cpuctx, EVENT_ALL); | 
 | 358 | 				/* | 
 | 359 | 				 * must not be done before ctxswout due | 
 | 360 | 				 * to event_filter_match() in event_sched_out() | 
 | 361 | 				 */ | 
 | 362 | 				cpuctx->cgrp = NULL; | 
 | 363 | 			} | 
 | 364 |  | 
 | 365 | 			if (mode & PERF_CGROUP_SWIN) { | 
 | 366 | 				/* set cgrp before ctxsw in to | 
 | 367 | 				 * allow event_filter_match() to not | 
 | 368 | 				 * have to pass task around | 
 | 369 | 				 */ | 
 | 370 | 				cpuctx->cgrp = perf_cgroup_from_task(task); | 
 | 371 | 				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); | 
 | 372 | 			} | 
 | 373 | 		} | 
 | 374 |  | 
 | 375 | 		perf_pmu_enable(cpuctx->ctx.pmu); | 
 | 376 | 	} | 
 | 377 |  | 
 | 378 | 	rcu_read_unlock(); | 
 | 379 |  | 
 | 380 | 	local_irq_restore(flags); | 
 | 381 | } | 
 | 382 |  | 
 | 383 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 
 | 384 | { | 
 | 385 | 	perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | 
 | 386 | } | 
 | 387 |  | 
 | 388 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 
 | 389 | { | 
 | 390 | 	perf_cgroup_switch(task, PERF_CGROUP_SWIN); | 
 | 391 | } | 
 | 392 |  | 
 | 393 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, | 
 | 394 | 				      struct perf_event_attr *attr, | 
 | 395 | 				      struct perf_event *group_leader) | 
 | 396 | { | 
 | 397 | 	struct perf_cgroup *cgrp; | 
 | 398 | 	struct cgroup_subsys_state *css; | 
 | 399 | 	struct file *file; | 
 | 400 | 	int ret = 0, fput_needed; | 
 | 401 |  | 
 | 402 | 	file = fget_light(fd, &fput_needed); | 
 | 403 | 	if (!file) | 
 | 404 | 		return -EBADF; | 
 | 405 |  | 
 | 406 | 	css = cgroup_css_from_dir(file, perf_subsys_id); | 
| Li Zefan | 3db272c | 2011-03-03 14:25:37 +0800 | [diff] [blame] | 407 | 	if (IS_ERR(css)) { | 
 | 408 | 		ret = PTR_ERR(css); | 
 | 409 | 		goto out; | 
 | 410 | 	} | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 411 |  | 
 | 412 | 	cgrp = container_of(css, struct perf_cgroup, css); | 
 | 413 | 	event->cgrp = cgrp; | 
 | 414 |  | 
| Li Zefan | f75e18c | 2011-03-03 14:25:50 +0800 | [diff] [blame] | 415 | 	/* must be done before we fput() the file */ | 
 | 416 | 	perf_get_cgroup(event); | 
 | 417 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 418 | 	/* | 
 | 419 | 	 * all events in a group must monitor | 
 | 420 | 	 * the same cgroup because a task belongs | 
 | 421 | 	 * to only one perf cgroup at a time | 
 | 422 | 	 */ | 
 | 423 | 	if (group_leader && group_leader->cgrp != cgrp) { | 
 | 424 | 		perf_detach_cgroup(event); | 
 | 425 | 		ret = -EINVAL; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 426 | 	} | 
| Li Zefan | 3db272c | 2011-03-03 14:25:37 +0800 | [diff] [blame] | 427 | out: | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 428 | 	fput_light(file, fput_needed); | 
 | 429 | 	return ret; | 
 | 430 | } | 
 | 431 |  | 
 | 432 | static inline void | 
 | 433 | perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) | 
 | 434 | { | 
 | 435 | 	struct perf_cgroup_info *t; | 
 | 436 | 	t = per_cpu_ptr(event->cgrp->info, event->cpu); | 
 | 437 | 	event->shadow_ctx_time = now - t->timestamp; | 
 | 438 | } | 
 | 439 |  | 
 | 440 | static inline void | 
 | 441 | perf_cgroup_defer_enabled(struct perf_event *event) | 
 | 442 | { | 
 | 443 | 	/* | 
 | 444 | 	 * when the current task's perf cgroup does not match | 
 | 445 | 	 * the event's, we need to remember to call the | 
 | 446 | 	 * perf_mark_enable() function the first time a task with | 
 | 447 | 	 * a matching perf cgroup is scheduled in. | 
 | 448 | 	 */ | 
 | 449 | 	if (is_cgroup_event(event) && !perf_cgroup_match(event)) | 
 | 450 | 		event->cgrp_defer_enabled = 1; | 
 | 451 | } | 
 | 452 |  | 
 | 453 | static inline void | 
 | 454 | perf_cgroup_mark_enabled(struct perf_event *event, | 
 | 455 | 			 struct perf_event_context *ctx) | 
 | 456 | { | 
 | 457 | 	struct perf_event *sub; | 
 | 458 | 	u64 tstamp = perf_event_time(event); | 
 | 459 |  | 
 | 460 | 	if (!event->cgrp_defer_enabled) | 
 | 461 | 		return; | 
 | 462 |  | 
 | 463 | 	event->cgrp_defer_enabled = 0; | 
 | 464 |  | 
 | 465 | 	event->tstamp_enabled = tstamp - event->total_time_enabled; | 
 | 466 | 	list_for_each_entry(sub, &event->sibling_list, group_entry) { | 
 | 467 | 		if (sub->state >= PERF_EVENT_STATE_INACTIVE) { | 
 | 468 | 			sub->tstamp_enabled = tstamp - sub->total_time_enabled; | 
 | 469 | 			sub->cgrp_defer_enabled = 0; | 
 | 470 | 		} | 
 | 471 | 	} | 
 | 472 | } | 
 | 473 | #else /* !CONFIG_CGROUP_PERF */ | 
 | 474 |  | 
 | 475 | static inline bool | 
 | 476 | perf_cgroup_match(struct perf_event *event) | 
 | 477 | { | 
 | 478 | 	return true; | 
 | 479 | } | 
 | 480 |  | 
 | 481 | static inline void perf_detach_cgroup(struct perf_event *event) | 
 | 482 | {} | 
 | 483 |  | 
 | 484 | static inline int is_cgroup_event(struct perf_event *event) | 
 | 485 | { | 
 | 486 | 	return 0; | 
 | 487 | } | 
 | 488 |  | 
 | 489 | static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) | 
 | 490 | { | 
 | 491 | 	return 0; | 
 | 492 | } | 
 | 493 |  | 
 | 494 | static inline void update_cgrp_time_from_event(struct perf_event *event) | 
 | 495 | { | 
 | 496 | } | 
 | 497 |  | 
 | 498 | static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) | 
 | 499 | { | 
 | 500 | } | 
 | 501 |  | 
 | 502 | static inline void perf_cgroup_sched_out(struct task_struct *task) | 
 | 503 | { | 
 | 504 | } | 
 | 505 |  | 
 | 506 | static inline void perf_cgroup_sched_in(struct task_struct *task) | 
 | 507 | { | 
 | 508 | } | 
 | 509 |  | 
 | 510 | static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, | 
 | 511 | 				      struct perf_event_attr *attr, | 
 | 512 | 				      struct perf_event *group_leader) | 
 | 513 | { | 
 | 514 | 	return -EINVAL; | 
 | 515 | } | 
 | 516 |  | 
 | 517 | static inline void | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 518 | perf_cgroup_set_timestamp(struct task_struct *task, | 
 | 519 | 			  struct perf_event_context *ctx) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 520 | { | 
 | 521 | } | 
 | 522 |  | 
 | 523 | void | 
 | 524 | perf_cgroup_switch(struct task_struct *task, struct task_struct *next) | 
 | 525 | { | 
 | 526 | } | 
 | 527 |  | 
 | 528 | static inline void | 
 | 529 | perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) | 
 | 530 | { | 
 | 531 | } | 
 | 532 |  | 
 | 533 | static inline u64 perf_cgroup_event_time(struct perf_event *event) | 
 | 534 | { | 
 | 535 | 	return 0; | 
 | 536 | } | 
 | 537 |  | 
 | 538 | static inline void | 
 | 539 | perf_cgroup_defer_enabled(struct perf_event *event) | 
 | 540 | { | 
 | 541 | } | 
 | 542 |  | 
 | 543 | static inline void | 
 | 544 | perf_cgroup_mark_enabled(struct perf_event *event, | 
 | 545 | 			 struct perf_event_context *ctx) | 
 | 546 | { | 
 | 547 | } | 
 | 548 | #endif | 
 | 549 |  | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 550 | void perf_pmu_disable(struct pmu *pmu) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 551 | { | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 552 | 	int *count = this_cpu_ptr(pmu->pmu_disable_count); | 
 | 553 | 	if (!(*count)++) | 
 | 554 | 		pmu->pmu_disable(pmu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 555 | } | 
 | 556 |  | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 557 | void perf_pmu_enable(struct pmu *pmu) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 558 | { | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 559 | 	int *count = this_cpu_ptr(pmu->pmu_disable_count); | 
 | 560 | 	if (!--(*count)) | 
 | 561 | 		pmu->pmu_enable(pmu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 562 | } | 
 | 563 |  | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 564 | static DEFINE_PER_CPU(struct list_head, rotation_list); | 
 | 565 |  | 
 | 566 | /* | 
 | 567 |  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized | 
 | 568 |  * because they're strictly cpu affine and rotate_start is called with IRQs | 
 | 569 |  * disabled, while rotate_context is called from IRQ context. | 
 | 570 |  */ | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 571 | static void perf_pmu_rotate_start(struct pmu *pmu) | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 572 | { | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 573 | 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 574 | 	struct list_head *head = &__get_cpu_var(rotation_list); | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 575 |  | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 576 | 	WARN_ON(!irqs_disabled()); | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 577 |  | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 578 | 	if (list_empty(&cpuctx->rotation_list)) | 
 | 579 | 		list_add(&cpuctx->rotation_list, head); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 580 | } | 
 | 581 |  | 
 | 582 | static void get_ctx(struct perf_event_context *ctx) | 
 | 583 | { | 
 | 584 | 	WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); | 
 | 585 | } | 
 | 586 |  | 
 | 587 | static void free_ctx(struct rcu_head *head) | 
 | 588 | { | 
 | 589 | 	struct perf_event_context *ctx; | 
 | 590 |  | 
 | 591 | 	ctx = container_of(head, struct perf_event_context, rcu_head); | 
 | 592 | 	kfree(ctx); | 
 | 593 | } | 
 | 594 |  | 
 | 595 | static void put_ctx(struct perf_event_context *ctx) | 
 | 596 | { | 
 | 597 | 	if (atomic_dec_and_test(&ctx->refcount)) { | 
 | 598 | 		if (ctx->parent_ctx) | 
 | 599 | 			put_ctx(ctx->parent_ctx); | 
 | 600 | 		if (ctx->task) | 
 | 601 | 			put_task_struct(ctx->task); | 
 | 602 | 		call_rcu(&ctx->rcu_head, free_ctx); | 
 | 603 | 	} | 
 | 604 | } | 
 | 605 |  | 
 | 606 | static void unclone_ctx(struct perf_event_context *ctx) | 
 | 607 | { | 
 | 608 | 	if (ctx->parent_ctx) { | 
 | 609 | 		put_ctx(ctx->parent_ctx); | 
 | 610 | 		ctx->parent_ctx = NULL; | 
 | 611 | 	} | 
 | 612 | } | 
 | 613 |  | 
| Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 614 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | 
 | 615 | { | 
 | 616 | 	/* | 
 | 617 | 	 * only top level events have the pid namespace they were created in | 
 | 618 | 	 */ | 
 | 619 | 	if (event->parent) | 
 | 620 | 		event = event->parent; | 
 | 621 |  | 
 | 622 | 	return task_tgid_nr_ns(p, event->ns); | 
 | 623 | } | 
 | 624 |  | 
 | 625 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | 
 | 626 | { | 
 | 627 | 	/* | 
 | 628 | 	 * only top level events have the pid namespace they were created in | 
 | 629 | 	 */ | 
 | 630 | 	if (event->parent) | 
 | 631 | 		event = event->parent; | 
 | 632 |  | 
 | 633 | 	return task_pid_nr_ns(p, event->ns); | 
 | 634 | } | 
 | 635 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 636 | /* | 
 | 637 |  * If we inherit events we want to return the parent event id | 
 | 638 |  * to userspace. | 
 | 639 |  */ | 
 | 640 | static u64 primary_event_id(struct perf_event *event) | 
 | 641 | { | 
 | 642 | 	u64 id = event->id; | 
 | 643 |  | 
 | 644 | 	if (event->parent) | 
 | 645 | 		id = event->parent->id; | 
 | 646 |  | 
 | 647 | 	return id; | 
 | 648 | } | 
 | 649 |  | 
 | 650 | /* | 
 | 651 |  * Get the perf_event_context for a task and lock it. | 
 | 652 |  * This has to cope with with the fact that until it is locked, | 
 | 653 |  * the context could get moved to another task. | 
 | 654 |  */ | 
 | 655 | static struct perf_event_context * | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 656 | perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 657 | { | 
 | 658 | 	struct perf_event_context *ctx; | 
 | 659 |  | 
 | 660 | 	rcu_read_lock(); | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 661 | retry: | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 662 | 	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 663 | 	if (ctx) { | 
 | 664 | 		/* | 
 | 665 | 		 * If this context is a clone of another, it might | 
 | 666 | 		 * get swapped for another underneath us by | 
 | 667 | 		 * perf_event_task_sched_out, though the | 
 | 668 | 		 * rcu_read_lock() protects us from any context | 
 | 669 | 		 * getting freed.  Lock the context and check if it | 
 | 670 | 		 * got swapped before we could get the lock, and retry | 
 | 671 | 		 * if so.  If we locked the right context, then it | 
 | 672 | 		 * can't get swapped on us any more. | 
 | 673 | 		 */ | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 674 | 		raw_spin_lock_irqsave(&ctx->lock, *flags); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 675 | 		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 676 | 			raw_spin_unlock_irqrestore(&ctx->lock, *flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 677 | 			goto retry; | 
 | 678 | 		} | 
 | 679 |  | 
 | 680 | 		if (!atomic_inc_not_zero(&ctx->refcount)) { | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 681 | 			raw_spin_unlock_irqrestore(&ctx->lock, *flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 682 | 			ctx = NULL; | 
 | 683 | 		} | 
 | 684 | 	} | 
 | 685 | 	rcu_read_unlock(); | 
 | 686 | 	return ctx; | 
 | 687 | } | 
 | 688 |  | 
 | 689 | /* | 
 | 690 |  * Get the context for a task and increment its pin_count so it | 
 | 691 |  * can't get swapped to another task.  This also increments its | 
 | 692 |  * reference count so that the context can't get freed. | 
 | 693 |  */ | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 694 | static struct perf_event_context * | 
 | 695 | perf_pin_task_context(struct task_struct *task, int ctxn) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 696 | { | 
 | 697 | 	struct perf_event_context *ctx; | 
 | 698 | 	unsigned long flags; | 
 | 699 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 700 | 	ctx = perf_lock_task_context(task, ctxn, &flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 701 | 	if (ctx) { | 
 | 702 | 		++ctx->pin_count; | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 703 | 		raw_spin_unlock_irqrestore(&ctx->lock, flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 704 | 	} | 
 | 705 | 	return ctx; | 
 | 706 | } | 
 | 707 |  | 
 | 708 | static void perf_unpin_context(struct perf_event_context *ctx) | 
 | 709 | { | 
 | 710 | 	unsigned long flags; | 
 | 711 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 712 | 	raw_spin_lock_irqsave(&ctx->lock, flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 713 | 	--ctx->pin_count; | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 714 | 	raw_spin_unlock_irqrestore(&ctx->lock, flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 715 | } | 
 | 716 |  | 
| Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 717 | /* | 
 | 718 |  * Update the record of the current time in a context. | 
 | 719 |  */ | 
 | 720 | static void update_context_time(struct perf_event_context *ctx) | 
 | 721 | { | 
 | 722 | 	u64 now = perf_clock(); | 
 | 723 |  | 
 | 724 | 	ctx->time += now - ctx->timestamp; | 
 | 725 | 	ctx->timestamp = now; | 
 | 726 | } | 
 | 727 |  | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 728 | static u64 perf_event_time(struct perf_event *event) | 
 | 729 | { | 
 | 730 | 	struct perf_event_context *ctx = event->ctx; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 731 |  | 
 | 732 | 	if (is_cgroup_event(event)) | 
 | 733 | 		return perf_cgroup_event_time(event); | 
 | 734 |  | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 735 | 	return ctx ? ctx->time : 0; | 
 | 736 | } | 
 | 737 |  | 
| Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 738 | /* | 
 | 739 |  * Update the total_time_enabled and total_time_running fields for a event. | 
 | 740 |  */ | 
 | 741 | static void update_event_times(struct perf_event *event) | 
 | 742 | { | 
 | 743 | 	struct perf_event_context *ctx = event->ctx; | 
 | 744 | 	u64 run_end; | 
 | 745 |  | 
 | 746 | 	if (event->state < PERF_EVENT_STATE_INACTIVE || | 
 | 747 | 	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE) | 
 | 748 | 		return; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 749 | 	/* | 
 | 750 | 	 * in cgroup mode, time_enabled represents | 
 | 751 | 	 * the time the event was enabled AND active | 
 | 752 | 	 * tasks were in the monitored cgroup. This is | 
 | 753 | 	 * independent of the activity of the context as | 
 | 754 | 	 * there may be a mix of cgroup and non-cgroup events. | 
 | 755 | 	 * | 
 | 756 | 	 * That is why we treat cgroup events differently | 
 | 757 | 	 * here. | 
 | 758 | 	 */ | 
 | 759 | 	if (is_cgroup_event(event)) | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 760 | 		run_end = perf_event_time(event); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 761 | 	else if (ctx->is_active) | 
 | 762 | 		run_end = ctx->time; | 
| Peter Zijlstra | acd1d7c | 2009-11-23 15:00:36 +0100 | [diff] [blame] | 763 | 	else | 
 | 764 | 		run_end = event->tstamp_stopped; | 
 | 765 |  | 
 | 766 | 	event->total_time_enabled = run_end - event->tstamp_enabled; | 
| Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 767 |  | 
 | 768 | 	if (event->state == PERF_EVENT_STATE_INACTIVE) | 
 | 769 | 		run_end = event->tstamp_stopped; | 
 | 770 | 	else | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 771 | 		run_end = perf_event_time(event); | 
| Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 772 |  | 
 | 773 | 	event->total_time_running = run_end - event->tstamp_running; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 774 |  | 
| Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 775 | } | 
 | 776 |  | 
| Peter Zijlstra | 96c21a4 | 2010-05-11 16:19:10 +0200 | [diff] [blame] | 777 | /* | 
 | 778 |  * Update total_time_enabled and total_time_running for all events in a group. | 
 | 779 |  */ | 
 | 780 | static void update_group_times(struct perf_event *leader) | 
 | 781 | { | 
 | 782 | 	struct perf_event *event; | 
 | 783 |  | 
 | 784 | 	update_event_times(leader); | 
 | 785 | 	list_for_each_entry(event, &leader->sibling_list, group_entry) | 
 | 786 | 		update_event_times(event); | 
 | 787 | } | 
 | 788 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 789 | static struct list_head * | 
 | 790 | ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) | 
 | 791 | { | 
 | 792 | 	if (event->attr.pinned) | 
 | 793 | 		return &ctx->pinned_groups; | 
 | 794 | 	else | 
 | 795 | 		return &ctx->flexible_groups; | 
 | 796 | } | 
 | 797 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 798 | /* | 
 | 799 |  * Add a event from the lists for its context. | 
 | 800 |  * Must be called with ctx->mutex and ctx->lock held. | 
 | 801 |  */ | 
 | 802 | static void | 
 | 803 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) | 
 | 804 | { | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 805 | 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); | 
 | 806 | 	event->attach_state |= PERF_ATTACH_CONTEXT; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 807 |  | 
 | 808 | 	/* | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 809 | 	 * If we're a stand alone event or group leader, we go to the context | 
 | 810 | 	 * list, group events are kept attached to the group so that | 
 | 811 | 	 * perf_group_detach can, at all times, locate all siblings. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 812 | 	 */ | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 813 | 	if (event->group_leader == event) { | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 814 | 		struct list_head *list; | 
 | 815 |  | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 816 | 		if (is_software_event(event)) | 
 | 817 | 			event->group_flags |= PERF_GROUP_SOFTWARE; | 
 | 818 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 819 | 		list = ctx_group_list(event, ctx); | 
 | 820 | 		list_add_tail(&event->group_entry, list); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 821 | 	} | 
 | 822 |  | 
| Peter Zijlstra | 0830937 | 2011-03-03 11:31:20 +0100 | [diff] [blame] | 823 | 	if (is_cgroup_event(event)) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 824 | 		ctx->nr_cgroups++; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 825 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 826 | 	list_add_rcu(&event->event_entry, &ctx->event_list); | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 827 | 	if (!ctx->nr_events) | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 828 | 		perf_pmu_rotate_start(ctx->pmu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 829 | 	ctx->nr_events++; | 
 | 830 | 	if (event->attr.inherit_stat) | 
 | 831 | 		ctx->nr_stat++; | 
 | 832 | } | 
 | 833 |  | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 834 | /* | 
 | 835 |  * Called at perf_event creation and when events are attached/detached from a | 
 | 836 |  * group. | 
 | 837 |  */ | 
 | 838 | static void perf_event__read_size(struct perf_event *event) | 
 | 839 | { | 
 | 840 | 	int entry = sizeof(u64); /* value */ | 
 | 841 | 	int size = 0; | 
 | 842 | 	int nr = 1; | 
 | 843 |  | 
 | 844 | 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 
 | 845 | 		size += sizeof(u64); | 
 | 846 |  | 
 | 847 | 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 
 | 848 | 		size += sizeof(u64); | 
 | 849 |  | 
 | 850 | 	if (event->attr.read_format & PERF_FORMAT_ID) | 
 | 851 | 		entry += sizeof(u64); | 
 | 852 |  | 
 | 853 | 	if (event->attr.read_format & PERF_FORMAT_GROUP) { | 
 | 854 | 		nr += event->group_leader->nr_siblings; | 
 | 855 | 		size += sizeof(u64); | 
 | 856 | 	} | 
 | 857 |  | 
 | 858 | 	size += entry * nr; | 
 | 859 | 	event->read_size = size; | 
 | 860 | } | 
 | 861 |  | 
 | 862 | static void perf_event__header_size(struct perf_event *event) | 
 | 863 | { | 
 | 864 | 	struct perf_sample_data *data; | 
 | 865 | 	u64 sample_type = event->attr.sample_type; | 
 | 866 | 	u16 size = 0; | 
 | 867 |  | 
 | 868 | 	perf_event__read_size(event); | 
 | 869 |  | 
 | 870 | 	if (sample_type & PERF_SAMPLE_IP) | 
 | 871 | 		size += sizeof(data->ip); | 
 | 872 |  | 
| Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 873 | 	if (sample_type & PERF_SAMPLE_ADDR) | 
 | 874 | 		size += sizeof(data->addr); | 
 | 875 |  | 
 | 876 | 	if (sample_type & PERF_SAMPLE_PERIOD) | 
 | 877 | 		size += sizeof(data->period); | 
 | 878 |  | 
 | 879 | 	if (sample_type & PERF_SAMPLE_READ) | 
 | 880 | 		size += event->read_size; | 
 | 881 |  | 
 | 882 | 	event->header_size = size; | 
 | 883 | } | 
 | 884 |  | 
 | 885 | static void perf_event__id_header_size(struct perf_event *event) | 
 | 886 | { | 
 | 887 | 	struct perf_sample_data *data; | 
 | 888 | 	u64 sample_type = event->attr.sample_type; | 
 | 889 | 	u16 size = 0; | 
 | 890 |  | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 891 | 	if (sample_type & PERF_SAMPLE_TID) | 
 | 892 | 		size += sizeof(data->tid_entry); | 
 | 893 |  | 
 | 894 | 	if (sample_type & PERF_SAMPLE_TIME) | 
 | 895 | 		size += sizeof(data->time); | 
 | 896 |  | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 897 | 	if (sample_type & PERF_SAMPLE_ID) | 
 | 898 | 		size += sizeof(data->id); | 
 | 899 |  | 
 | 900 | 	if (sample_type & PERF_SAMPLE_STREAM_ID) | 
 | 901 | 		size += sizeof(data->stream_id); | 
 | 902 |  | 
 | 903 | 	if (sample_type & PERF_SAMPLE_CPU) | 
 | 904 | 		size += sizeof(data->cpu_entry); | 
 | 905 |  | 
| Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 906 | 	event->id_header_size = size; | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 907 | } | 
 | 908 |  | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 909 | static void perf_group_attach(struct perf_event *event) | 
 | 910 | { | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 911 | 	struct perf_event *group_leader = event->group_leader, *pos; | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 912 |  | 
| Peter Zijlstra | 74c3337 | 2010-10-15 11:40:29 +0200 | [diff] [blame] | 913 | 	/* | 
 | 914 | 	 * We can have double attach due to group movement in perf_event_open. | 
 | 915 | 	 */ | 
 | 916 | 	if (event->attach_state & PERF_ATTACH_GROUP) | 
 | 917 | 		return; | 
 | 918 |  | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 919 | 	event->attach_state |= PERF_ATTACH_GROUP; | 
 | 920 |  | 
 | 921 | 	if (group_leader == event) | 
 | 922 | 		return; | 
 | 923 |  | 
 | 924 | 	if (group_leader->group_flags & PERF_GROUP_SOFTWARE && | 
 | 925 | 			!is_software_event(event)) | 
 | 926 | 		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; | 
 | 927 |  | 
 | 928 | 	list_add_tail(&event->group_entry, &group_leader->sibling_list); | 
 | 929 | 	group_leader->nr_siblings++; | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 930 |  | 
 | 931 | 	perf_event__header_size(group_leader); | 
 | 932 |  | 
 | 933 | 	list_for_each_entry(pos, &group_leader->sibling_list, group_entry) | 
 | 934 | 		perf_event__header_size(pos); | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 935 | } | 
 | 936 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 937 | /* | 
 | 938 |  * Remove a event from the lists for its context. | 
 | 939 |  * Must be called with ctx->mutex and ctx->lock held. | 
 | 940 |  */ | 
 | 941 | static void | 
 | 942 | list_del_event(struct perf_event *event, struct perf_event_context *ctx) | 
 | 943 | { | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 944 | 	/* | 
 | 945 | 	 * We can have double detach due to exit/hot-unplug + close. | 
 | 946 | 	 */ | 
 | 947 | 	if (!(event->attach_state & PERF_ATTACH_CONTEXT)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 948 | 		return; | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 949 |  | 
 | 950 | 	event->attach_state &= ~PERF_ATTACH_CONTEXT; | 
 | 951 |  | 
| Peter Zijlstra | 0830937 | 2011-03-03 11:31:20 +0100 | [diff] [blame] | 952 | 	if (is_cgroup_event(event)) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 953 | 		ctx->nr_cgroups--; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 954 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 955 | 	ctx->nr_events--; | 
 | 956 | 	if (event->attr.inherit_stat) | 
 | 957 | 		ctx->nr_stat--; | 
 | 958 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 959 | 	list_del_rcu(&event->event_entry); | 
 | 960 |  | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 961 | 	if (event->group_leader == event) | 
 | 962 | 		list_del_init(&event->group_entry); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 963 |  | 
| Peter Zijlstra | 96c21a4 | 2010-05-11 16:19:10 +0200 | [diff] [blame] | 964 | 	update_group_times(event); | 
| Stephane Eranian | b2e74a2 | 2009-11-26 09:24:30 -0800 | [diff] [blame] | 965 |  | 
 | 966 | 	/* | 
 | 967 | 	 * If event was in error state, then keep it | 
 | 968 | 	 * that way, otherwise bogus counts will be | 
 | 969 | 	 * returned on read(). The only way to get out | 
 | 970 | 	 * of error state is by explicit re-enabling | 
 | 971 | 	 * of the event | 
 | 972 | 	 */ | 
 | 973 | 	if (event->state > PERF_EVENT_STATE_OFF) | 
 | 974 | 		event->state = PERF_EVENT_STATE_OFF; | 
| Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 975 | } | 
 | 976 |  | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 977 | static void perf_group_detach(struct perf_event *event) | 
| Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 978 | { | 
 | 979 | 	struct perf_event *sibling, *tmp; | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 980 | 	struct list_head *list = NULL; | 
 | 981 |  | 
 | 982 | 	/* | 
 | 983 | 	 * We can have double detach due to exit/hot-unplug + close. | 
 | 984 | 	 */ | 
 | 985 | 	if (!(event->attach_state & PERF_ATTACH_GROUP)) | 
 | 986 | 		return; | 
 | 987 |  | 
 | 988 | 	event->attach_state &= ~PERF_ATTACH_GROUP; | 
 | 989 |  | 
 | 990 | 	/* | 
 | 991 | 	 * If this is a sibling, remove it from its group. | 
 | 992 | 	 */ | 
 | 993 | 	if (event->group_leader != event) { | 
 | 994 | 		list_del_init(&event->group_entry); | 
 | 995 | 		event->group_leader->nr_siblings--; | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 996 | 		goto out; | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 997 | 	} | 
 | 998 |  | 
 | 999 | 	if (!list_empty(&event->group_entry)) | 
 | 1000 | 		list = &event->group_entry; | 
| Peter Zijlstra | 2e2af50 | 2009-11-23 11:37:25 +0100 | [diff] [blame] | 1001 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1002 | 	/* | 
 | 1003 | 	 * If this was a group event with sibling events then | 
 | 1004 | 	 * upgrade the siblings to singleton events by adding them | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1005 | 	 * to whatever list we are on. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1006 | 	 */ | 
 | 1007 | 	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1008 | 		if (list) | 
 | 1009 | 			list_move_tail(&sibling->group_entry, list); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1010 | 		sibling->group_leader = sibling; | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 1011 |  | 
 | 1012 | 		/* Inherit group flags from the previous leader */ | 
 | 1013 | 		sibling->group_flags = event->group_flags; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1014 | 	} | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1015 |  | 
 | 1016 | out: | 
 | 1017 | 	perf_event__header_size(event->group_leader); | 
 | 1018 |  | 
 | 1019 | 	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) | 
 | 1020 | 		perf_event__header_size(tmp); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1021 | } | 
 | 1022 |  | 
| Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 1023 | static inline int | 
 | 1024 | event_filter_match(struct perf_event *event) | 
 | 1025 | { | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1026 | 	return (event->cpu == -1 || event->cpu == smp_processor_id()) | 
 | 1027 | 	    && perf_cgroup_match(event); | 
| Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 1028 | } | 
 | 1029 |  | 
| Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1030 | static void | 
 | 1031 | event_sched_out(struct perf_event *event, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1032 | 		  struct perf_cpu_context *cpuctx, | 
 | 1033 | 		  struct perf_event_context *ctx) | 
 | 1034 | { | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1035 | 	u64 tstamp = perf_event_time(event); | 
| Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 1036 | 	u64 delta; | 
 | 1037 | 	/* | 
 | 1038 | 	 * An event which could not be activated because of | 
 | 1039 | 	 * filter mismatch still needs to have its timings | 
 | 1040 | 	 * maintained, otherwise bogus information is return | 
 | 1041 | 	 * via read() for time_enabled, time_running: | 
 | 1042 | 	 */ | 
 | 1043 | 	if (event->state == PERF_EVENT_STATE_INACTIVE | 
 | 1044 | 	    && !event_filter_match(event)) { | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1045 | 		delta = tstamp - event->tstamp_stopped; | 
| Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 1046 | 		event->tstamp_running += delta; | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1047 | 		event->tstamp_stopped = tstamp; | 
| Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 1048 | 	} | 
 | 1049 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1050 | 	if (event->state != PERF_EVENT_STATE_ACTIVE) | 
| Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1051 | 		return; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1052 |  | 
 | 1053 | 	event->state = PERF_EVENT_STATE_INACTIVE; | 
 | 1054 | 	if (event->pending_disable) { | 
 | 1055 | 		event->pending_disable = 0; | 
 | 1056 | 		event->state = PERF_EVENT_STATE_OFF; | 
 | 1057 | 	} | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1058 | 	event->tstamp_stopped = tstamp; | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 1059 | 	event->pmu->del(event, 0); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1060 | 	event->oncpu = -1; | 
 | 1061 |  | 
 | 1062 | 	if (!is_software_event(event)) | 
 | 1063 | 		cpuctx->active_oncpu--; | 
 | 1064 | 	ctx->nr_active--; | 
 | 1065 | 	if (event->attr.exclusive || !cpuctx->active_oncpu) | 
 | 1066 | 		cpuctx->exclusive = 0; | 
 | 1067 | } | 
 | 1068 |  | 
 | 1069 | static void | 
 | 1070 | group_sched_out(struct perf_event *group_event, | 
 | 1071 | 		struct perf_cpu_context *cpuctx, | 
 | 1072 | 		struct perf_event_context *ctx) | 
 | 1073 | { | 
 | 1074 | 	struct perf_event *event; | 
| Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 1075 | 	int state = group_event->state; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1076 |  | 
 | 1077 | 	event_sched_out(group_event, cpuctx, ctx); | 
 | 1078 |  | 
 | 1079 | 	/* | 
 | 1080 | 	 * Schedule out siblings (if any): | 
 | 1081 | 	 */ | 
 | 1082 | 	list_for_each_entry(event, &group_event->sibling_list, group_entry) | 
 | 1083 | 		event_sched_out(event, cpuctx, ctx); | 
 | 1084 |  | 
| Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 1085 | 	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1086 | 		cpuctx->exclusive = 0; | 
 | 1087 | } | 
 | 1088 |  | 
 | 1089 | /* | 
 | 1090 |  * Cross CPU call to remove a performance event | 
 | 1091 |  * | 
 | 1092 |  * We disable the event on the hardware level first. After that we | 
 | 1093 |  * remove it from the context list. | 
 | 1094 |  */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1095 | static int __perf_remove_from_context(void *info) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1096 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1097 | 	struct perf_event *event = info; | 
 | 1098 | 	struct perf_event_context *ctx = event->ctx; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1099 | 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1100 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1101 | 	raw_spin_lock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1102 | 	event_sched_out(event, cpuctx, ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1103 | 	list_del_event(event, ctx); | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1104 | 	raw_spin_unlock(&ctx->lock); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1105 |  | 
 | 1106 | 	return 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1107 | } | 
 | 1108 |  | 
 | 1109 |  | 
 | 1110 | /* | 
 | 1111 |  * Remove the event from a task's (or a CPU's) list of events. | 
 | 1112 |  * | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1113 |  * CPU events are removed with a smp call. For task events we only | 
 | 1114 |  * call when the task is on a CPU. | 
 | 1115 |  * | 
 | 1116 |  * If event->ctx is a cloned context, callers must make sure that | 
 | 1117 |  * every task struct that event->ctx->task could possibly point to | 
 | 1118 |  * remains valid.  This is OK when called from perf_release since | 
 | 1119 |  * that only calls us on the top-level context, which can't be a clone. | 
 | 1120 |  * When called from perf_event_exit_task, it's OK because the | 
 | 1121 |  * context has been detached from its task. | 
 | 1122 |  */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1123 | static void perf_remove_from_context(struct perf_event *event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1124 | { | 
 | 1125 | 	struct perf_event_context *ctx = event->ctx; | 
 | 1126 | 	struct task_struct *task = ctx->task; | 
 | 1127 |  | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1128 | 	lockdep_assert_held(&ctx->mutex); | 
 | 1129 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1130 | 	if (!task) { | 
 | 1131 | 		/* | 
 | 1132 | 		 * Per cpu events are removed via an smp call and | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 1133 | 		 * the removal is always successful. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1134 | 		 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1135 | 		cpu_function_call(event->cpu, __perf_remove_from_context, event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1136 | 		return; | 
 | 1137 | 	} | 
 | 1138 |  | 
 | 1139 | retry: | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1140 | 	if (!task_function_call(task, __perf_remove_from_context, event)) | 
 | 1141 | 		return; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1142 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1143 | 	raw_spin_lock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1144 | 	/* | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1145 | 	 * If we failed to find a running task, but find the context active now | 
 | 1146 | 	 * that we've acquired the ctx->lock, retry. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1147 | 	 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1148 | 	if (ctx->is_active) { | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1149 | 		raw_spin_unlock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1150 | 		goto retry; | 
 | 1151 | 	} | 
 | 1152 |  | 
 | 1153 | 	/* | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1154 | 	 * Since the task isn't running, its safe to remove the event, us | 
 | 1155 | 	 * holding the ctx->lock ensures the task won't get scheduled in. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1156 | 	 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1157 | 	list_del_event(event, ctx); | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1158 | 	raw_spin_unlock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1159 | } | 
 | 1160 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1161 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1162 |  * Cross CPU call to disable a performance event | 
 | 1163 |  */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1164 | static int __perf_event_disable(void *info) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1165 | { | 
 | 1166 | 	struct perf_event *event = info; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1167 | 	struct perf_event_context *ctx = event->ctx; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1168 | 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1169 |  | 
 | 1170 | 	/* | 
 | 1171 | 	 * If this is a per-task event, need to check whether this | 
 | 1172 | 	 * event's task is the current task on this cpu. | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1173 | 	 * | 
 | 1174 | 	 * Can trigger due to concurrent perf_event_context_sched_out() | 
 | 1175 | 	 * flipping contexts around. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1176 | 	 */ | 
 | 1177 | 	if (ctx->task && cpuctx->task_ctx != ctx) | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1178 | 		return -EINVAL; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1179 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1180 | 	raw_spin_lock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1181 |  | 
 | 1182 | 	/* | 
 | 1183 | 	 * If the event is on, turn it off. | 
 | 1184 | 	 * If it is in error state, leave it in error state. | 
 | 1185 | 	 */ | 
 | 1186 | 	if (event->state >= PERF_EVENT_STATE_INACTIVE) { | 
 | 1187 | 		update_context_time(ctx); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1188 | 		update_cgrp_time_from_event(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1189 | 		update_group_times(event); | 
 | 1190 | 		if (event == event->group_leader) | 
 | 1191 | 			group_sched_out(event, cpuctx, ctx); | 
 | 1192 | 		else | 
 | 1193 | 			event_sched_out(event, cpuctx, ctx); | 
 | 1194 | 		event->state = PERF_EVENT_STATE_OFF; | 
 | 1195 | 	} | 
 | 1196 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1197 | 	raw_spin_unlock(&ctx->lock); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1198 |  | 
 | 1199 | 	return 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1200 | } | 
 | 1201 |  | 
 | 1202 | /* | 
 | 1203 |  * Disable a event. | 
 | 1204 |  * | 
 | 1205 |  * If event->ctx is a cloned context, callers must make sure that | 
 | 1206 |  * every task struct that event->ctx->task could possibly point to | 
 | 1207 |  * remains valid.  This condition is satisifed when called through | 
 | 1208 |  * perf_event_for_each_child or perf_event_for_each because they | 
 | 1209 |  * hold the top-level event's child_mutex, so any descendant that | 
 | 1210 |  * goes to exit will block in sync_child_event. | 
 | 1211 |  * When called from perf_pending_event it's OK because event->ctx | 
 | 1212 |  * is the current context on this CPU and preemption is disabled, | 
 | 1213 |  * hence we can't get into perf_event_task_sched_out for this context. | 
 | 1214 |  */ | 
| Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1215 | void perf_event_disable(struct perf_event *event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1216 | { | 
 | 1217 | 	struct perf_event_context *ctx = event->ctx; | 
 | 1218 | 	struct task_struct *task = ctx->task; | 
 | 1219 |  | 
 | 1220 | 	if (!task) { | 
 | 1221 | 		/* | 
 | 1222 | 		 * Disable the event on the cpu that it's on | 
 | 1223 | 		 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1224 | 		cpu_function_call(event->cpu, __perf_event_disable, event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1225 | 		return; | 
 | 1226 | 	} | 
 | 1227 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1228 | retry: | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1229 | 	if (!task_function_call(task, __perf_event_disable, event)) | 
 | 1230 | 		return; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1231 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1232 | 	raw_spin_lock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1233 | 	/* | 
 | 1234 | 	 * If the event is still active, we need to retry the cross-call. | 
 | 1235 | 	 */ | 
 | 1236 | 	if (event->state == PERF_EVENT_STATE_ACTIVE) { | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1237 | 		raw_spin_unlock_irq(&ctx->lock); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1238 | 		/* | 
 | 1239 | 		 * Reload the task pointer, it might have been changed by | 
 | 1240 | 		 * a concurrent perf_event_context_sched_out(). | 
 | 1241 | 		 */ | 
 | 1242 | 		task = ctx->task; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1243 | 		goto retry; | 
 | 1244 | 	} | 
 | 1245 |  | 
 | 1246 | 	/* | 
 | 1247 | 	 * Since we have the lock this context can't be scheduled | 
 | 1248 | 	 * in, so we can change the state safely. | 
 | 1249 | 	 */ | 
 | 1250 | 	if (event->state == PERF_EVENT_STATE_INACTIVE) { | 
 | 1251 | 		update_group_times(event); | 
 | 1252 | 		event->state = PERF_EVENT_STATE_OFF; | 
 | 1253 | 	} | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1254 | 	raw_spin_unlock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1255 | } | 
 | 1256 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1257 | static void perf_set_shadow_time(struct perf_event *event, | 
 | 1258 | 				 struct perf_event_context *ctx, | 
 | 1259 | 				 u64 tstamp) | 
 | 1260 | { | 
 | 1261 | 	/* | 
 | 1262 | 	 * use the correct time source for the time snapshot | 
 | 1263 | 	 * | 
 | 1264 | 	 * We could get by without this by leveraging the | 
 | 1265 | 	 * fact that to get to this function, the caller | 
 | 1266 | 	 * has most likely already called update_context_time() | 
 | 1267 | 	 * and update_cgrp_time_xx() and thus both timestamp | 
 | 1268 | 	 * are identical (or very close). Given that tstamp is, | 
 | 1269 | 	 * already adjusted for cgroup, we could say that: | 
 | 1270 | 	 *    tstamp - ctx->timestamp | 
 | 1271 | 	 * is equivalent to | 
 | 1272 | 	 *    tstamp - cgrp->timestamp. | 
 | 1273 | 	 * | 
 | 1274 | 	 * Then, in perf_output_read(), the calculation would | 
 | 1275 | 	 * work with no changes because: | 
 | 1276 | 	 * - event is guaranteed scheduled in | 
 | 1277 | 	 * - no scheduled out in between | 
 | 1278 | 	 * - thus the timestamp would be the same | 
 | 1279 | 	 * | 
 | 1280 | 	 * But this is a bit hairy. | 
 | 1281 | 	 * | 
 | 1282 | 	 * So instead, we have an explicit cgroup call to remain | 
 | 1283 | 	 * within the time time source all along. We believe it | 
 | 1284 | 	 * is cleaner and simpler to understand. | 
 | 1285 | 	 */ | 
 | 1286 | 	if (is_cgroup_event(event)) | 
 | 1287 | 		perf_cgroup_set_shadow_time(event, tstamp); | 
 | 1288 | 	else | 
 | 1289 | 		event->shadow_ctx_time = tstamp - ctx->timestamp; | 
 | 1290 | } | 
 | 1291 |  | 
| Peter Zijlstra | 4fe757d | 2011-02-15 22:26:07 +0100 | [diff] [blame] | 1292 | #define MAX_INTERRUPTS (~0ULL) | 
 | 1293 |  | 
 | 1294 | static void perf_log_throttle(struct perf_event *event, int enable); | 
 | 1295 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1296 | static int | 
| Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1297 | event_sched_in(struct perf_event *event, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1298 | 		 struct perf_cpu_context *cpuctx, | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 1299 | 		 struct perf_event_context *ctx) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1300 | { | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1301 | 	u64 tstamp = perf_event_time(event); | 
 | 1302 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1303 | 	if (event->state <= PERF_EVENT_STATE_OFF) | 
 | 1304 | 		return 0; | 
 | 1305 |  | 
 | 1306 | 	event->state = PERF_EVENT_STATE_ACTIVE; | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 1307 | 	event->oncpu = smp_processor_id(); | 
| Peter Zijlstra | 4fe757d | 2011-02-15 22:26:07 +0100 | [diff] [blame] | 1308 |  | 
 | 1309 | 	/* | 
 | 1310 | 	 * Unthrottle events, since we scheduled we might have missed several | 
 | 1311 | 	 * ticks already, also for a heavily scheduling task there is little | 
 | 1312 | 	 * guarantee it'll get a tick in a timely manner. | 
 | 1313 | 	 */ | 
 | 1314 | 	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { | 
 | 1315 | 		perf_log_throttle(event, 1); | 
 | 1316 | 		event->hw.interrupts = 0; | 
 | 1317 | 	} | 
 | 1318 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1319 | 	/* | 
 | 1320 | 	 * The new state must be visible before we turn it on in the hardware: | 
 | 1321 | 	 */ | 
 | 1322 | 	smp_wmb(); | 
 | 1323 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 1324 | 	if (event->pmu->add(event, PERF_EF_START)) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1325 | 		event->state = PERF_EVENT_STATE_INACTIVE; | 
 | 1326 | 		event->oncpu = -1; | 
 | 1327 | 		return -EAGAIN; | 
 | 1328 | 	} | 
 | 1329 |  | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1330 | 	event->tstamp_running += tstamp - event->tstamp_stopped; | 
| Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1331 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1332 | 	perf_set_shadow_time(event, ctx, tstamp); | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 1333 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1334 | 	if (!is_software_event(event)) | 
 | 1335 | 		cpuctx->active_oncpu++; | 
 | 1336 | 	ctx->nr_active++; | 
 | 1337 |  | 
 | 1338 | 	if (event->attr.exclusive) | 
 | 1339 | 		cpuctx->exclusive = 1; | 
 | 1340 |  | 
 | 1341 | 	return 0; | 
 | 1342 | } | 
 | 1343 |  | 
 | 1344 | static int | 
 | 1345 | group_sched_in(struct perf_event *group_event, | 
 | 1346 | 	       struct perf_cpu_context *cpuctx, | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 1347 | 	       struct perf_event_context *ctx) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1348 | { | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 1349 | 	struct perf_event *event, *partial_group = NULL; | 
| Peter Zijlstra | 51b0fe3 | 2010-06-11 13:35:57 +0200 | [diff] [blame] | 1350 | 	struct pmu *pmu = group_event->pmu; | 
| Stephane Eranian | d7842da | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1351 | 	u64 now = ctx->time; | 
 | 1352 | 	bool simulate = false; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1353 |  | 
 | 1354 | 	if (group_event->state == PERF_EVENT_STATE_OFF) | 
 | 1355 | 		return 0; | 
 | 1356 |  | 
| Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 1357 | 	pmu->start_txn(pmu); | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 1358 |  | 
| Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1359 | 	if (event_sched_in(group_event, cpuctx, ctx)) { | 
| Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 1360 | 		pmu->cancel_txn(pmu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1361 | 		return -EAGAIN; | 
| Stephane Eranian | 90151c3 | 2010-05-25 16:23:10 +0200 | [diff] [blame] | 1362 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1363 |  | 
 | 1364 | 	/* | 
 | 1365 | 	 * Schedule in siblings as one group (if any): | 
 | 1366 | 	 */ | 
 | 1367 | 	list_for_each_entry(event, &group_event->sibling_list, group_entry) { | 
| Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1368 | 		if (event_sched_in(event, cpuctx, ctx)) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1369 | 			partial_group = event; | 
 | 1370 | 			goto group_error; | 
 | 1371 | 		} | 
 | 1372 | 	} | 
 | 1373 |  | 
| Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1374 | 	if (!pmu->commit_txn(pmu)) | 
| Paul Mackerras | 6e85158 | 2010-05-08 20:58:00 +1000 | [diff] [blame] | 1375 | 		return 0; | 
| Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1376 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1377 | group_error: | 
 | 1378 | 	/* | 
 | 1379 | 	 * Groups can be scheduled in as one unit only, so undo any | 
 | 1380 | 	 * partial group before returning: | 
| Stephane Eranian | d7842da | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1381 | 	 * The events up to the failed event are scheduled out normally, | 
 | 1382 | 	 * tstamp_stopped will be updated. | 
 | 1383 | 	 * | 
 | 1384 | 	 * The failed events and the remaining siblings need to have | 
 | 1385 | 	 * their timings updated as if they had gone thru event_sched_in() | 
 | 1386 | 	 * and event_sched_out(). This is required to get consistent timings | 
 | 1387 | 	 * across the group. This also takes care of the case where the group | 
 | 1388 | 	 * could never be scheduled by ensuring tstamp_stopped is set to mark | 
 | 1389 | 	 * the time the event was actually stopped, such that time delta | 
 | 1390 | 	 * calculation in update_event_times() is correct. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1391 | 	 */ | 
 | 1392 | 	list_for_each_entry(event, &group_event->sibling_list, group_entry) { | 
 | 1393 | 		if (event == partial_group) | 
| Stephane Eranian | d7842da | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1394 | 			simulate = true; | 
 | 1395 |  | 
 | 1396 | 		if (simulate) { | 
 | 1397 | 			event->tstamp_running += now - event->tstamp_stopped; | 
 | 1398 | 			event->tstamp_stopped = now; | 
 | 1399 | 		} else { | 
 | 1400 | 			event_sched_out(event, cpuctx, ctx); | 
 | 1401 | 		} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1402 | 	} | 
| Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 1403 | 	event_sched_out(group_event, cpuctx, ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1404 |  | 
| Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 1405 | 	pmu->cancel_txn(pmu); | 
| Stephane Eranian | 90151c3 | 2010-05-25 16:23:10 +0200 | [diff] [blame] | 1406 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1407 | 	return -EAGAIN; | 
 | 1408 | } | 
 | 1409 |  | 
 | 1410 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1411 |  * Work out whether we can put this event group on the CPU now. | 
 | 1412 |  */ | 
 | 1413 | static int group_can_go_on(struct perf_event *event, | 
 | 1414 | 			   struct perf_cpu_context *cpuctx, | 
 | 1415 | 			   int can_add_hw) | 
 | 1416 | { | 
 | 1417 | 	/* | 
 | 1418 | 	 * Groups consisting entirely of software events can always go on. | 
 | 1419 | 	 */ | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 1420 | 	if (event->group_flags & PERF_GROUP_SOFTWARE) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1421 | 		return 1; | 
 | 1422 | 	/* | 
 | 1423 | 	 * If an exclusive group is already on, no other hardware | 
 | 1424 | 	 * events can go on. | 
 | 1425 | 	 */ | 
 | 1426 | 	if (cpuctx->exclusive) | 
 | 1427 | 		return 0; | 
 | 1428 | 	/* | 
 | 1429 | 	 * If this group is exclusive and there are already | 
 | 1430 | 	 * events on the CPU, it can't go on. | 
 | 1431 | 	 */ | 
 | 1432 | 	if (event->attr.exclusive && cpuctx->active_oncpu) | 
 | 1433 | 		return 0; | 
 | 1434 | 	/* | 
 | 1435 | 	 * Otherwise, try to add it if all previous groups were able | 
 | 1436 | 	 * to go on. | 
 | 1437 | 	 */ | 
 | 1438 | 	return can_add_hw; | 
 | 1439 | } | 
 | 1440 |  | 
 | 1441 | static void add_event_to_ctx(struct perf_event *event, | 
 | 1442 | 			       struct perf_event_context *ctx) | 
 | 1443 | { | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1444 | 	u64 tstamp = perf_event_time(event); | 
 | 1445 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1446 | 	list_add_event(event, ctx); | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1447 | 	perf_group_attach(event); | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1448 | 	event->tstamp_enabled = tstamp; | 
 | 1449 | 	event->tstamp_running = tstamp; | 
 | 1450 | 	event->tstamp_stopped = tstamp; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1451 | } | 
 | 1452 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1453 | static void perf_event_context_sched_in(struct perf_event_context *ctx, | 
 | 1454 | 					struct task_struct *tsk); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1455 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1456 | /* | 
 | 1457 |  * Cross CPU call to install and enable a performance event | 
 | 1458 |  * | 
 | 1459 |  * Must be called with ctx->mutex held | 
 | 1460 |  */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1461 | static int  __perf_install_in_context(void *info) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1462 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1463 | 	struct perf_event *event = info; | 
 | 1464 | 	struct perf_event_context *ctx = event->ctx; | 
 | 1465 | 	struct perf_event *leader = event->group_leader; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1466 | 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1467 | 	int err; | 
 | 1468 |  | 
 | 1469 | 	/* | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1470 | 	 * In case we're installing a new context to an already running task, | 
 | 1471 | 	 * could also happen before perf_event_task_sched_in() on architectures | 
 | 1472 | 	 * which do context switches with IRQs enabled. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1473 | 	 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1474 | 	if (ctx->task && !cpuctx->task_ctx) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1475 | 		perf_event_context_sched_in(ctx, ctx->task); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1476 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1477 | 	raw_spin_lock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1478 | 	ctx->is_active = 1; | 
 | 1479 | 	update_context_time(ctx); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1480 | 	/* | 
 | 1481 | 	 * update cgrp time only if current cgrp | 
 | 1482 | 	 * matches event->cgrp. Must be done before | 
 | 1483 | 	 * calling add_event_to_ctx() | 
 | 1484 | 	 */ | 
 | 1485 | 	update_cgrp_time_from_event(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1486 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1487 | 	add_event_to_ctx(event, ctx); | 
 | 1488 |  | 
| Stephane Eranian | 5632ab1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1489 | 	if (!event_filter_match(event)) | 
| Peter Zijlstra | f4c4176 | 2009-12-16 17:55:54 +0100 | [diff] [blame] | 1490 | 		goto unlock; | 
 | 1491 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1492 | 	/* | 
 | 1493 | 	 * Don't put the event on if it is disabled or if | 
 | 1494 | 	 * it is in a group and the group isn't on. | 
 | 1495 | 	 */ | 
 | 1496 | 	if (event->state != PERF_EVENT_STATE_INACTIVE || | 
 | 1497 | 	    (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) | 
 | 1498 | 		goto unlock; | 
 | 1499 |  | 
 | 1500 | 	/* | 
 | 1501 | 	 * An exclusive event can't go on if there are already active | 
 | 1502 | 	 * hardware events, and no hardware event can go on if there | 
 | 1503 | 	 * is already an exclusive event on. | 
 | 1504 | 	 */ | 
 | 1505 | 	if (!group_can_go_on(event, cpuctx, 1)) | 
 | 1506 | 		err = -EEXIST; | 
 | 1507 | 	else | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 1508 | 		err = event_sched_in(event, cpuctx, ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1509 |  | 
 | 1510 | 	if (err) { | 
 | 1511 | 		/* | 
 | 1512 | 		 * This event couldn't go on.  If it is in a group | 
 | 1513 | 		 * then we have to pull the whole group off. | 
 | 1514 | 		 * If the event group is pinned then put it in error state. | 
 | 1515 | 		 */ | 
 | 1516 | 		if (leader != event) | 
 | 1517 | 			group_sched_out(leader, cpuctx, ctx); | 
 | 1518 | 		if (leader->attr.pinned) { | 
 | 1519 | 			update_group_times(leader); | 
 | 1520 | 			leader->state = PERF_EVENT_STATE_ERROR; | 
 | 1521 | 		} | 
 | 1522 | 	} | 
 | 1523 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1524 | unlock: | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1525 | 	raw_spin_unlock(&ctx->lock); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1526 |  | 
 | 1527 | 	return 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1528 | } | 
 | 1529 |  | 
 | 1530 | /* | 
 | 1531 |  * Attach a performance event to a context | 
 | 1532 |  * | 
 | 1533 |  * First we add the event to the list with the hardware enable bit | 
 | 1534 |  * in event->hw_config cleared. | 
 | 1535 |  * | 
 | 1536 |  * If the event is attached to a task which is on a CPU we use a smp | 
 | 1537 |  * call to enable it in the task context. The task might have been | 
 | 1538 |  * scheduled away, but we check this in the smp call again. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1539 |  */ | 
 | 1540 | static void | 
 | 1541 | perf_install_in_context(struct perf_event_context *ctx, | 
 | 1542 | 			struct perf_event *event, | 
 | 1543 | 			int cpu) | 
 | 1544 | { | 
 | 1545 | 	struct task_struct *task = ctx->task; | 
 | 1546 |  | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1547 | 	lockdep_assert_held(&ctx->mutex); | 
 | 1548 |  | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 1549 | 	event->ctx = ctx; | 
 | 1550 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1551 | 	if (!task) { | 
 | 1552 | 		/* | 
 | 1553 | 		 * Per cpu events are installed via an smp call and | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 1554 | 		 * the install is always successful. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1555 | 		 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1556 | 		cpu_function_call(cpu, __perf_install_in_context, event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1557 | 		return; | 
 | 1558 | 	} | 
 | 1559 |  | 
 | 1560 | retry: | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1561 | 	if (!task_function_call(task, __perf_install_in_context, event)) | 
 | 1562 | 		return; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1563 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1564 | 	raw_spin_lock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1565 | 	/* | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1566 | 	 * If we failed to find a running task, but find the context active now | 
 | 1567 | 	 * that we've acquired the ctx->lock, retry. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1568 | 	 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1569 | 	if (ctx->is_active) { | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1570 | 		raw_spin_unlock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1571 | 		goto retry; | 
 | 1572 | 	} | 
 | 1573 |  | 
 | 1574 | 	/* | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1575 | 	 * Since the task isn't running, its safe to add the event, us holding | 
 | 1576 | 	 * the ctx->lock ensures the task won't get scheduled in. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1577 | 	 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1578 | 	add_event_to_ctx(event, ctx); | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1579 | 	raw_spin_unlock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1580 | } | 
 | 1581 |  | 
 | 1582 | /* | 
 | 1583 |  * Put a event into inactive state and update time fields. | 
 | 1584 |  * Enabling the leader of a group effectively enables all | 
 | 1585 |  * the group members that aren't explicitly disabled, so we | 
 | 1586 |  * have to update their ->tstamp_enabled also. | 
 | 1587 |  * Note: this works for group members as well as group leaders | 
 | 1588 |  * since the non-leader members' sibling_lists will be empty. | 
 | 1589 |  */ | 
 | 1590 | static void __perf_event_mark_enabled(struct perf_event *event, | 
 | 1591 | 					struct perf_event_context *ctx) | 
 | 1592 | { | 
 | 1593 | 	struct perf_event *sub; | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1594 | 	u64 tstamp = perf_event_time(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1595 |  | 
 | 1596 | 	event->state = PERF_EVENT_STATE_INACTIVE; | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1597 | 	event->tstamp_enabled = tstamp - event->total_time_enabled; | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1598 | 	list_for_each_entry(sub, &event->sibling_list, group_entry) { | 
| Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1599 | 		if (sub->state >= PERF_EVENT_STATE_INACTIVE) | 
 | 1600 | 			sub->tstamp_enabled = tstamp - sub->total_time_enabled; | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1601 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1602 | } | 
 | 1603 |  | 
 | 1604 | /* | 
 | 1605 |  * Cross CPU call to enable a performance event | 
 | 1606 |  */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1607 | static int __perf_event_enable(void *info) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1608 | { | 
 | 1609 | 	struct perf_event *event = info; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1610 | 	struct perf_event_context *ctx = event->ctx; | 
 | 1611 | 	struct perf_event *leader = event->group_leader; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1612 | 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1613 | 	int err; | 
 | 1614 |  | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1615 | 	if (WARN_ON_ONCE(!ctx->is_active)) | 
 | 1616 | 		return -EINVAL; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1617 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1618 | 	raw_spin_lock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1619 | 	update_context_time(ctx); | 
 | 1620 |  | 
 | 1621 | 	if (event->state >= PERF_EVENT_STATE_INACTIVE) | 
 | 1622 | 		goto unlock; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1623 |  | 
 | 1624 | 	/* | 
 | 1625 | 	 * set current task's cgroup time reference point | 
 | 1626 | 	 */ | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 1627 | 	perf_cgroup_set_timestamp(current, ctx); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1628 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1629 | 	__perf_event_mark_enabled(event, ctx); | 
 | 1630 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1631 | 	if (!event_filter_match(event)) { | 
 | 1632 | 		if (is_cgroup_event(event)) | 
 | 1633 | 			perf_cgroup_defer_enabled(event); | 
| Peter Zijlstra | f4c4176 | 2009-12-16 17:55:54 +0100 | [diff] [blame] | 1634 | 		goto unlock; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1635 | 	} | 
| Peter Zijlstra | f4c4176 | 2009-12-16 17:55:54 +0100 | [diff] [blame] | 1636 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1637 | 	/* | 
 | 1638 | 	 * If the event is in a group and isn't the group leader, | 
 | 1639 | 	 * then don't put it on unless the group is on. | 
 | 1640 | 	 */ | 
 | 1641 | 	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) | 
 | 1642 | 		goto unlock; | 
 | 1643 |  | 
 | 1644 | 	if (!group_can_go_on(event, cpuctx, 1)) { | 
 | 1645 | 		err = -EEXIST; | 
 | 1646 | 	} else { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1647 | 		if (event == leader) | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 1648 | 			err = group_sched_in(event, cpuctx, ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1649 | 		else | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 1650 | 			err = event_sched_in(event, cpuctx, ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1651 | 	} | 
 | 1652 |  | 
 | 1653 | 	if (err) { | 
 | 1654 | 		/* | 
 | 1655 | 		 * If this event can't go on and it's part of a | 
 | 1656 | 		 * group, then the whole group has to come off. | 
 | 1657 | 		 */ | 
 | 1658 | 		if (leader != event) | 
 | 1659 | 			group_sched_out(leader, cpuctx, ctx); | 
 | 1660 | 		if (leader->attr.pinned) { | 
 | 1661 | 			update_group_times(leader); | 
 | 1662 | 			leader->state = PERF_EVENT_STATE_ERROR; | 
 | 1663 | 		} | 
 | 1664 | 	} | 
 | 1665 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1666 | unlock: | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1667 | 	raw_spin_unlock(&ctx->lock); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1668 |  | 
 | 1669 | 	return 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1670 | } | 
 | 1671 |  | 
 | 1672 | /* | 
 | 1673 |  * Enable a event. | 
 | 1674 |  * | 
 | 1675 |  * If event->ctx is a cloned context, callers must make sure that | 
 | 1676 |  * every task struct that event->ctx->task could possibly point to | 
 | 1677 |  * remains valid.  This condition is satisfied when called through | 
 | 1678 |  * perf_event_for_each_child or perf_event_for_each as described | 
 | 1679 |  * for perf_event_disable. | 
 | 1680 |  */ | 
| Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1681 | void perf_event_enable(struct perf_event *event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1682 | { | 
 | 1683 | 	struct perf_event_context *ctx = event->ctx; | 
 | 1684 | 	struct task_struct *task = ctx->task; | 
 | 1685 |  | 
 | 1686 | 	if (!task) { | 
 | 1687 | 		/* | 
 | 1688 | 		 * Enable the event on the cpu that it's on | 
 | 1689 | 		 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1690 | 		cpu_function_call(event->cpu, __perf_event_enable, event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1691 | 		return; | 
 | 1692 | 	} | 
 | 1693 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1694 | 	raw_spin_lock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1695 | 	if (event->state >= PERF_EVENT_STATE_INACTIVE) | 
 | 1696 | 		goto out; | 
 | 1697 |  | 
 | 1698 | 	/* | 
 | 1699 | 	 * If the event is in error state, clear that first. | 
 | 1700 | 	 * That way, if we see the event in error state below, we | 
 | 1701 | 	 * know that it has gone back into error state, as distinct | 
 | 1702 | 	 * from the task having been scheduled away before the | 
 | 1703 | 	 * cross-call arrived. | 
 | 1704 | 	 */ | 
 | 1705 | 	if (event->state == PERF_EVENT_STATE_ERROR) | 
 | 1706 | 		event->state = PERF_EVENT_STATE_OFF; | 
 | 1707 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1708 | retry: | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1709 | 	if (!ctx->is_active) { | 
 | 1710 | 		__perf_event_mark_enabled(event, ctx); | 
 | 1711 | 		goto out; | 
 | 1712 | 	} | 
 | 1713 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1714 | 	raw_spin_unlock_irq(&ctx->lock); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1715 |  | 
 | 1716 | 	if (!task_function_call(task, __perf_event_enable, event)) | 
 | 1717 | 		return; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1718 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1719 | 	raw_spin_lock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1720 |  | 
 | 1721 | 	/* | 
 | 1722 | 	 * If the context is active and the event is still off, | 
 | 1723 | 	 * we need to retry the cross-call. | 
 | 1724 | 	 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1725 | 	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { | 
 | 1726 | 		/* | 
 | 1727 | 		 * task could have been flipped by a concurrent | 
 | 1728 | 		 * perf_event_context_sched_out() | 
 | 1729 | 		 */ | 
 | 1730 | 		task = ctx->task; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1731 | 		goto retry; | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1732 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1733 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1734 | out: | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1735 | 	raw_spin_unlock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1736 | } | 
 | 1737 |  | 
 | 1738 | static int perf_event_refresh(struct perf_event *event, int refresh) | 
 | 1739 | { | 
 | 1740 | 	/* | 
 | 1741 | 	 * not supported on inherited events | 
 | 1742 | 	 */ | 
| Franck Bui-Huu | 2e939d1 | 2010-11-23 16:21:44 +0100 | [diff] [blame] | 1743 | 	if (event->attr.inherit || !is_sampling_event(event)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1744 | 		return -EINVAL; | 
 | 1745 |  | 
 | 1746 | 	atomic_add(refresh, &event->event_limit); | 
 | 1747 | 	perf_event_enable(event); | 
 | 1748 |  | 
 | 1749 | 	return 0; | 
 | 1750 | } | 
 | 1751 |  | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 1752 | static void ctx_sched_out(struct perf_event_context *ctx, | 
 | 1753 | 			  struct perf_cpu_context *cpuctx, | 
 | 1754 | 			  enum event_type_t event_type) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1755 | { | 
 | 1756 | 	struct perf_event *event; | 
 | 1757 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1758 | 	raw_spin_lock(&ctx->lock); | 
| Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 1759 | 	perf_pmu_disable(ctx->pmu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1760 | 	ctx->is_active = 0; | 
 | 1761 | 	if (likely(!ctx->nr_events)) | 
 | 1762 | 		goto out; | 
 | 1763 | 	update_context_time(ctx); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1764 | 	update_cgrp_time_from_cpuctx(cpuctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1765 |  | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 1766 | 	if (!ctx->nr_active) | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 1767 | 		goto out; | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 1768 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1769 | 	if (event_type & EVENT_PINNED) { | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 1770 | 		list_for_each_entry(event, &ctx->pinned_groups, group_entry) | 
 | 1771 | 			group_sched_out(event, cpuctx, ctx); | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1772 | 	} | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 1773 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1774 | 	if (event_type & EVENT_FLEXIBLE) { | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 1775 | 		list_for_each_entry(event, &ctx->flexible_groups, group_entry) | 
| Xiao Guangrong | 8c9ed8e | 2009-09-25 13:51:17 +0800 | [diff] [blame] | 1776 | 			group_sched_out(event, cpuctx, ctx); | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1777 | 	} | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1778 | out: | 
| Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 1779 | 	perf_pmu_enable(ctx->pmu); | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1780 | 	raw_spin_unlock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1781 | } | 
 | 1782 |  | 
 | 1783 | /* | 
 | 1784 |  * Test whether two contexts are equivalent, i.e. whether they | 
 | 1785 |  * have both been cloned from the same version of the same context | 
 | 1786 |  * and they both have the same number of enabled events. | 
 | 1787 |  * If the number of enabled events is the same, then the set | 
 | 1788 |  * of enabled events should be the same, because these are both | 
 | 1789 |  * inherited contexts, therefore we can't access individual events | 
 | 1790 |  * in them directly with an fd; we can only enable/disable all | 
 | 1791 |  * events via prctl, or enable/disable all events in a family | 
 | 1792 |  * via ioctl, which will have the same effect on both contexts. | 
 | 1793 |  */ | 
 | 1794 | static int context_equiv(struct perf_event_context *ctx1, | 
 | 1795 | 			 struct perf_event_context *ctx2) | 
 | 1796 | { | 
 | 1797 | 	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx | 
 | 1798 | 		&& ctx1->parent_gen == ctx2->parent_gen | 
 | 1799 | 		&& !ctx1->pin_count && !ctx2->pin_count; | 
 | 1800 | } | 
 | 1801 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1802 | static void __perf_event_sync_stat(struct perf_event *event, | 
 | 1803 | 				     struct perf_event *next_event) | 
 | 1804 | { | 
 | 1805 | 	u64 value; | 
 | 1806 |  | 
 | 1807 | 	if (!event->attr.inherit_stat) | 
 | 1808 | 		return; | 
 | 1809 |  | 
 | 1810 | 	/* | 
 | 1811 | 	 * Update the event value, we cannot use perf_event_read() | 
 | 1812 | 	 * because we're in the middle of a context switch and have IRQs | 
 | 1813 | 	 * disabled, which upsets smp_call_function_single(), however | 
 | 1814 | 	 * we know the event must be on the current CPU, therefore we | 
 | 1815 | 	 * don't need to use it. | 
 | 1816 | 	 */ | 
 | 1817 | 	switch (event->state) { | 
 | 1818 | 	case PERF_EVENT_STATE_ACTIVE: | 
| Peter Zijlstra | 3dbebf1 | 2009-11-20 22:19:52 +0100 | [diff] [blame] | 1819 | 		event->pmu->read(event); | 
 | 1820 | 		/* fall-through */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1821 |  | 
 | 1822 | 	case PERF_EVENT_STATE_INACTIVE: | 
 | 1823 | 		update_event_times(event); | 
 | 1824 | 		break; | 
 | 1825 |  | 
 | 1826 | 	default: | 
 | 1827 | 		break; | 
 | 1828 | 	} | 
 | 1829 |  | 
 | 1830 | 	/* | 
 | 1831 | 	 * In order to keep per-task stats reliable we need to flip the event | 
 | 1832 | 	 * values when we flip the contexts. | 
 | 1833 | 	 */ | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 1834 | 	value = local64_read(&next_event->count); | 
 | 1835 | 	value = local64_xchg(&event->count, value); | 
 | 1836 | 	local64_set(&next_event->count, value); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1837 |  | 
 | 1838 | 	swap(event->total_time_enabled, next_event->total_time_enabled); | 
 | 1839 | 	swap(event->total_time_running, next_event->total_time_running); | 
 | 1840 |  | 
 | 1841 | 	/* | 
 | 1842 | 	 * Since we swizzled the values, update the user visible data too. | 
 | 1843 | 	 */ | 
 | 1844 | 	perf_event_update_userpage(event); | 
 | 1845 | 	perf_event_update_userpage(next_event); | 
 | 1846 | } | 
 | 1847 |  | 
 | 1848 | #define list_next_entry(pos, member) \ | 
 | 1849 | 	list_entry(pos->member.next, typeof(*pos), member) | 
 | 1850 |  | 
 | 1851 | static void perf_event_sync_stat(struct perf_event_context *ctx, | 
 | 1852 | 				   struct perf_event_context *next_ctx) | 
 | 1853 | { | 
 | 1854 | 	struct perf_event *event, *next_event; | 
 | 1855 |  | 
 | 1856 | 	if (!ctx->nr_stat) | 
 | 1857 | 		return; | 
 | 1858 |  | 
| Peter Zijlstra | 02ffdbc | 2009-11-20 22:19:50 +0100 | [diff] [blame] | 1859 | 	update_context_time(ctx); | 
 | 1860 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1861 | 	event = list_first_entry(&ctx->event_list, | 
 | 1862 | 				   struct perf_event, event_entry); | 
 | 1863 |  | 
 | 1864 | 	next_event = list_first_entry(&next_ctx->event_list, | 
 | 1865 | 					struct perf_event, event_entry); | 
 | 1866 |  | 
 | 1867 | 	while (&event->event_entry != &ctx->event_list && | 
 | 1868 | 	       &next_event->event_entry != &next_ctx->event_list) { | 
 | 1869 |  | 
 | 1870 | 		__perf_event_sync_stat(event, next_event); | 
 | 1871 |  | 
 | 1872 | 		event = list_next_entry(event, event_entry); | 
 | 1873 | 		next_event = list_next_entry(next_event, event_entry); | 
 | 1874 | 	} | 
 | 1875 | } | 
 | 1876 |  | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 1877 | static void perf_event_context_sched_out(struct task_struct *task, int ctxn, | 
 | 1878 | 					 struct task_struct *next) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1879 | { | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1880 | 	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1881 | 	struct perf_event_context *next_ctx; | 
 | 1882 | 	struct perf_event_context *parent; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1883 | 	struct perf_cpu_context *cpuctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1884 | 	int do_switch = 1; | 
 | 1885 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1886 | 	if (likely(!ctx)) | 
 | 1887 | 		return; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1888 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1889 | 	cpuctx = __get_cpu_context(ctx); | 
 | 1890 | 	if (!cpuctx->task_ctx) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1891 | 		return; | 
 | 1892 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1893 | 	rcu_read_lock(); | 
 | 1894 | 	parent = rcu_dereference(ctx->parent_ctx); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1895 | 	next_ctx = next->perf_event_ctxp[ctxn]; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1896 | 	if (parent && next_ctx && | 
 | 1897 | 	    rcu_dereference(next_ctx->parent_ctx) == parent) { | 
 | 1898 | 		/* | 
 | 1899 | 		 * Looks like the two contexts are clones, so we might be | 
 | 1900 | 		 * able to optimize the context switch.  We lock both | 
 | 1901 | 		 * contexts and check that they are clones under the | 
 | 1902 | 		 * lock (including re-checking that neither has been | 
 | 1903 | 		 * uncloned in the meantime).  It doesn't matter which | 
 | 1904 | 		 * order we take the locks because no other cpu could | 
 | 1905 | 		 * be trying to lock both of these tasks. | 
 | 1906 | 		 */ | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1907 | 		raw_spin_lock(&ctx->lock); | 
 | 1908 | 		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1909 | 		if (context_equiv(ctx, next_ctx)) { | 
 | 1910 | 			/* | 
 | 1911 | 			 * XXX do we need a memory barrier of sorts | 
 | 1912 | 			 * wrt to rcu_dereference() of perf_event_ctxp | 
 | 1913 | 			 */ | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1914 | 			task->perf_event_ctxp[ctxn] = next_ctx; | 
 | 1915 | 			next->perf_event_ctxp[ctxn] = ctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1916 | 			ctx->task = next; | 
 | 1917 | 			next_ctx->task = task; | 
 | 1918 | 			do_switch = 0; | 
 | 1919 |  | 
 | 1920 | 			perf_event_sync_stat(ctx, next_ctx); | 
 | 1921 | 		} | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1922 | 		raw_spin_unlock(&next_ctx->lock); | 
 | 1923 | 		raw_spin_unlock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1924 | 	} | 
 | 1925 | 	rcu_read_unlock(); | 
 | 1926 |  | 
 | 1927 | 	if (do_switch) { | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 1928 | 		ctx_sched_out(ctx, cpuctx, EVENT_ALL); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1929 | 		cpuctx->task_ctx = NULL; | 
 | 1930 | 	} | 
 | 1931 | } | 
 | 1932 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1933 | #define for_each_task_context_nr(ctxn)					\ | 
 | 1934 | 	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) | 
 | 1935 |  | 
 | 1936 | /* | 
 | 1937 |  * Called from scheduler to remove the events of the current task, | 
 | 1938 |  * with interrupts disabled. | 
 | 1939 |  * | 
 | 1940 |  * We stop each event and update the event value in event->count. | 
 | 1941 |  * | 
 | 1942 |  * This does not protect us against NMI, but disable() | 
 | 1943 |  * sets the disabled bit in the control field of event _before_ | 
 | 1944 |  * accessing the event control register. If a NMI hits, then it will | 
 | 1945 |  * not restart the event. | 
 | 1946 |  */ | 
| Peter Zijlstra | 82cd6de | 2010-10-14 17:57:23 +0200 | [diff] [blame] | 1947 | void __perf_event_task_sched_out(struct task_struct *task, | 
 | 1948 | 				 struct task_struct *next) | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1949 | { | 
 | 1950 | 	int ctxn; | 
 | 1951 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1952 | 	for_each_task_context_nr(ctxn) | 
 | 1953 | 		perf_event_context_sched_out(task, ctxn, next); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1954 |  | 
 | 1955 | 	/* | 
 | 1956 | 	 * if cgroup events exist on this CPU, then we need | 
 | 1957 | 	 * to check if we have to switch out PMU state. | 
 | 1958 | 	 * cgroup event are system-wide mode only | 
 | 1959 | 	 */ | 
 | 1960 | 	if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 
 | 1961 | 		perf_cgroup_sched_out(task); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1962 | } | 
 | 1963 |  | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 1964 | static void task_ctx_sched_out(struct perf_event_context *ctx, | 
 | 1965 | 			       enum event_type_t event_type) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1966 | { | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1967 | 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1968 |  | 
 | 1969 | 	if (!cpuctx->task_ctx) | 
 | 1970 | 		return; | 
 | 1971 |  | 
 | 1972 | 	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) | 
 | 1973 | 		return; | 
 | 1974 |  | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 1975 | 	ctx_sched_out(ctx, cpuctx, event_type); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1976 | 	cpuctx->task_ctx = NULL; | 
 | 1977 | } | 
 | 1978 |  | 
 | 1979 | /* | 
 | 1980 |  * Called with IRQs disabled | 
 | 1981 |  */ | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 1982 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, | 
 | 1983 | 			      enum event_type_t event_type) | 
 | 1984 | { | 
 | 1985 | 	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1986 | } | 
 | 1987 |  | 
 | 1988 | static void | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 1989 | ctx_pinned_sched_in(struct perf_event_context *ctx, | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 1990 | 		    struct perf_cpu_context *cpuctx) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1991 | { | 
 | 1992 | 	struct perf_event *event; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1993 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 1994 | 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) { | 
 | 1995 | 		if (event->state <= PERF_EVENT_STATE_OFF) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1996 | 			continue; | 
| Stephane Eranian | 5632ab1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1997 | 		if (!event_filter_match(event)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1998 | 			continue; | 
 | 1999 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2000 | 		/* may need to reset tstamp_enabled */ | 
 | 2001 | 		if (is_cgroup_event(event)) | 
 | 2002 | 			perf_cgroup_mark_enabled(event, ctx); | 
 | 2003 |  | 
| Xiao Guangrong | 8c9ed8e | 2009-09-25 13:51:17 +0800 | [diff] [blame] | 2004 | 		if (group_can_go_on(event, cpuctx, 1)) | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 2005 | 			group_sched_in(event, cpuctx, ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2006 |  | 
 | 2007 | 		/* | 
 | 2008 | 		 * If this pinned group hasn't been scheduled, | 
 | 2009 | 		 * put it in error state. | 
 | 2010 | 		 */ | 
 | 2011 | 		if (event->state == PERF_EVENT_STATE_INACTIVE) { | 
 | 2012 | 			update_group_times(event); | 
 | 2013 | 			event->state = PERF_EVENT_STATE_ERROR; | 
 | 2014 | 		} | 
 | 2015 | 	} | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2016 | } | 
 | 2017 |  | 
 | 2018 | static void | 
 | 2019 | ctx_flexible_sched_in(struct perf_event_context *ctx, | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 2020 | 		      struct perf_cpu_context *cpuctx) | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2021 | { | 
 | 2022 | 	struct perf_event *event; | 
 | 2023 | 	int can_add_hw = 1; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2024 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 2025 | 	list_for_each_entry(event, &ctx->flexible_groups, group_entry) { | 
 | 2026 | 		/* Ignore events in OFF or ERROR state */ | 
 | 2027 | 		if (event->state <= PERF_EVENT_STATE_OFF) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2028 | 			continue; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2029 | 		/* | 
 | 2030 | 		 * Listen to the 'cpu' scheduling filter constraint | 
 | 2031 | 		 * of events: | 
 | 2032 | 		 */ | 
| Stephane Eranian | 5632ab1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 2033 | 		if (!event_filter_match(event)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2034 | 			continue; | 
 | 2035 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2036 | 		/* may need to reset tstamp_enabled */ | 
 | 2037 | 		if (is_cgroup_event(event)) | 
 | 2038 | 			perf_cgroup_mark_enabled(event, ctx); | 
 | 2039 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 2040 | 		if (group_can_go_on(event, cpuctx, can_add_hw)) { | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 2041 | 			if (group_sched_in(event, cpuctx, ctx)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2042 | 				can_add_hw = 0; | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 2043 | 		} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2044 | 	} | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2045 | } | 
 | 2046 |  | 
 | 2047 | static void | 
 | 2048 | ctx_sched_in(struct perf_event_context *ctx, | 
 | 2049 | 	     struct perf_cpu_context *cpuctx, | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2050 | 	     enum event_type_t event_type, | 
 | 2051 | 	     struct task_struct *task) | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2052 | { | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2053 | 	u64 now; | 
 | 2054 |  | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2055 | 	raw_spin_lock(&ctx->lock); | 
 | 2056 | 	ctx->is_active = 1; | 
 | 2057 | 	if (likely(!ctx->nr_events)) | 
 | 2058 | 		goto out; | 
 | 2059 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2060 | 	now = perf_clock(); | 
 | 2061 | 	ctx->timestamp = now; | 
| Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 2062 | 	perf_cgroup_set_timestamp(task, ctx); | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2063 | 	/* | 
 | 2064 | 	 * First go through the list and put on any pinned groups | 
 | 2065 | 	 * in order to give them the best chance of going on. | 
 | 2066 | 	 */ | 
 | 2067 | 	if (event_type & EVENT_PINNED) | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 2068 | 		ctx_pinned_sched_in(ctx, cpuctx); | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2069 |  | 
 | 2070 | 	/* Then walk through the lower prio flexible groups */ | 
 | 2071 | 	if (event_type & EVENT_FLEXIBLE) | 
| Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 2072 | 		ctx_flexible_sched_in(ctx, cpuctx); | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2073 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 2074 | out: | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2075 | 	raw_spin_unlock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2076 | } | 
 | 2077 |  | 
| Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 2078 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2079 | 			     enum event_type_t event_type, | 
 | 2080 | 			     struct task_struct *task) | 
| Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 2081 | { | 
 | 2082 | 	struct perf_event_context *ctx = &cpuctx->ctx; | 
 | 2083 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2084 | 	ctx_sched_in(ctx, cpuctx, event_type, task); | 
| Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 2085 | } | 
 | 2086 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2087 | static void task_ctx_sched_in(struct perf_event_context *ctx, | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2088 | 			      enum event_type_t event_type) | 
 | 2089 | { | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2090 | 	struct perf_cpu_context *cpuctx; | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2091 |  | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2092 | 	cpuctx = __get_cpu_context(ctx); | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2093 | 	if (cpuctx->task_ctx == ctx) | 
 | 2094 | 		return; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2095 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2096 | 	ctx_sched_in(ctx, cpuctx, event_type, NULL); | 
| Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 2097 | 	cpuctx->task_ctx = ctx; | 
 | 2098 | } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2099 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2100 | static void perf_event_context_sched_in(struct perf_event_context *ctx, | 
 | 2101 | 					struct task_struct *task) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2102 | { | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2103 | 	struct perf_cpu_context *cpuctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2104 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 2105 | 	cpuctx = __get_cpu_context(ctx); | 
| Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 2106 | 	if (cpuctx->task_ctx == ctx) | 
 | 2107 | 		return; | 
 | 2108 |  | 
| Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 2109 | 	perf_pmu_disable(ctx->pmu); | 
| Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 2110 | 	/* | 
 | 2111 | 	 * We want to keep the following priority order: | 
 | 2112 | 	 * cpu pinned (that don't need to move), task pinned, | 
 | 2113 | 	 * cpu flexible, task flexible. | 
 | 2114 | 	 */ | 
 | 2115 | 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 
 | 2116 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2117 | 	ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); | 
 | 2118 | 	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); | 
 | 2119 | 	ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); | 
| Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 2120 |  | 
 | 2121 | 	cpuctx->task_ctx = ctx; | 
| eranian@google.com | 9b33fa6 | 2010-03-10 22:26:05 -0800 | [diff] [blame] | 2122 |  | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2123 | 	/* | 
 | 2124 | 	 * Since these rotations are per-cpu, we need to ensure the | 
 | 2125 | 	 * cpu-context we got scheduled on is actually rotating. | 
 | 2126 | 	 */ | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 2127 | 	perf_pmu_rotate_start(ctx->pmu); | 
| Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 2128 | 	perf_pmu_enable(ctx->pmu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2129 | } | 
 | 2130 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2131 | /* | 
 | 2132 |  * Called from scheduler to add the events of the current task | 
 | 2133 |  * with interrupts disabled. | 
 | 2134 |  * | 
 | 2135 |  * We restore the event value and then enable it. | 
 | 2136 |  * | 
 | 2137 |  * This does not protect us against NMI, but enable() | 
 | 2138 |  * sets the enabled bit in the control field of event _before_ | 
 | 2139 |  * accessing the event control register. If a NMI hits, then it will | 
 | 2140 |  * keep the event running. | 
 | 2141 |  */ | 
| Peter Zijlstra | 82cd6de | 2010-10-14 17:57:23 +0200 | [diff] [blame] | 2142 | void __perf_event_task_sched_in(struct task_struct *task) | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2143 | { | 
 | 2144 | 	struct perf_event_context *ctx; | 
 | 2145 | 	int ctxn; | 
 | 2146 |  | 
 | 2147 | 	for_each_task_context_nr(ctxn) { | 
 | 2148 | 		ctx = task->perf_event_ctxp[ctxn]; | 
 | 2149 | 		if (likely(!ctx)) | 
 | 2150 | 			continue; | 
 | 2151 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2152 | 		perf_event_context_sched_in(ctx, task); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2153 | 	} | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2154 | 	/* | 
 | 2155 | 	 * if cgroup events exist on this CPU, then we need | 
 | 2156 | 	 * to check if we have to switch in PMU state. | 
 | 2157 | 	 * cgroup event are system-wide mode only | 
 | 2158 | 	 */ | 
 | 2159 | 	if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 
 | 2160 | 		perf_cgroup_sched_in(task); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2161 | } | 
 | 2162 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2163 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 
 | 2164 | { | 
 | 2165 | 	u64 frequency = event->attr.sample_freq; | 
 | 2166 | 	u64 sec = NSEC_PER_SEC; | 
 | 2167 | 	u64 divisor, dividend; | 
 | 2168 |  | 
 | 2169 | 	int count_fls, nsec_fls, frequency_fls, sec_fls; | 
 | 2170 |  | 
 | 2171 | 	count_fls = fls64(count); | 
 | 2172 | 	nsec_fls = fls64(nsec); | 
 | 2173 | 	frequency_fls = fls64(frequency); | 
 | 2174 | 	sec_fls = 30; | 
 | 2175 |  | 
 | 2176 | 	/* | 
 | 2177 | 	 * We got @count in @nsec, with a target of sample_freq HZ | 
 | 2178 | 	 * the target period becomes: | 
 | 2179 | 	 * | 
 | 2180 | 	 *             @count * 10^9 | 
 | 2181 | 	 * period = ------------------- | 
 | 2182 | 	 *          @nsec * sample_freq | 
 | 2183 | 	 * | 
 | 2184 | 	 */ | 
 | 2185 |  | 
 | 2186 | 	/* | 
 | 2187 | 	 * Reduce accuracy by one bit such that @a and @b converge | 
 | 2188 | 	 * to a similar magnitude. | 
 | 2189 | 	 */ | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2190 | #define REDUCE_FLS(a, b)		\ | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2191 | do {					\ | 
 | 2192 | 	if (a##_fls > b##_fls) {	\ | 
 | 2193 | 		a >>= 1;		\ | 
 | 2194 | 		a##_fls--;		\ | 
 | 2195 | 	} else {			\ | 
 | 2196 | 		b >>= 1;		\ | 
 | 2197 | 		b##_fls--;		\ | 
 | 2198 | 	}				\ | 
 | 2199 | } while (0) | 
 | 2200 |  | 
 | 2201 | 	/* | 
 | 2202 | 	 * Reduce accuracy until either term fits in a u64, then proceed with | 
 | 2203 | 	 * the other, so that finally we can do a u64/u64 division. | 
 | 2204 | 	 */ | 
 | 2205 | 	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { | 
 | 2206 | 		REDUCE_FLS(nsec, frequency); | 
 | 2207 | 		REDUCE_FLS(sec, count); | 
 | 2208 | 	} | 
 | 2209 |  | 
 | 2210 | 	if (count_fls + sec_fls > 64) { | 
 | 2211 | 		divisor = nsec * frequency; | 
 | 2212 |  | 
 | 2213 | 		while (count_fls + sec_fls > 64) { | 
 | 2214 | 			REDUCE_FLS(count, sec); | 
 | 2215 | 			divisor >>= 1; | 
 | 2216 | 		} | 
 | 2217 |  | 
 | 2218 | 		dividend = count * sec; | 
 | 2219 | 	} else { | 
 | 2220 | 		dividend = count * sec; | 
 | 2221 |  | 
 | 2222 | 		while (nsec_fls + frequency_fls > 64) { | 
 | 2223 | 			REDUCE_FLS(nsec, frequency); | 
 | 2224 | 			dividend >>= 1; | 
 | 2225 | 		} | 
 | 2226 |  | 
 | 2227 | 		divisor = nsec * frequency; | 
 | 2228 | 	} | 
 | 2229 |  | 
| Peter Zijlstra | f6ab91a | 2010-06-04 15:18:01 +0200 | [diff] [blame] | 2230 | 	if (!divisor) | 
 | 2231 | 		return dividend; | 
 | 2232 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2233 | 	return div64_u64(dividend, divisor); | 
 | 2234 | } | 
 | 2235 |  | 
 | 2236 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2237 | { | 
 | 2238 | 	struct hw_perf_event *hwc = &event->hw; | 
| Peter Zijlstra | f6ab91a | 2010-06-04 15:18:01 +0200 | [diff] [blame] | 2239 | 	s64 period, sample_period; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2240 | 	s64 delta; | 
 | 2241 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2242 | 	period = perf_calculate_period(event, nsec, count); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2243 |  | 
 | 2244 | 	delta = (s64)(period - hwc->sample_period); | 
 | 2245 | 	delta = (delta + 7) / 8; /* low pass filter */ | 
 | 2246 |  | 
 | 2247 | 	sample_period = hwc->sample_period + delta; | 
 | 2248 |  | 
 | 2249 | 	if (!sample_period) | 
 | 2250 | 		sample_period = 1; | 
 | 2251 |  | 
 | 2252 | 	hwc->sample_period = sample_period; | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2253 |  | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 2254 | 	if (local64_read(&hwc->period_left) > 8*sample_period) { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 2255 | 		event->pmu->stop(event, PERF_EF_UPDATE); | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 2256 | 		local64_set(&hwc->period_left, 0); | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 2257 | 		event->pmu->start(event, PERF_EF_RELOAD); | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2258 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2259 | } | 
 | 2260 |  | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2261 | static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2262 | { | 
 | 2263 | 	struct perf_event *event; | 
 | 2264 | 	struct hw_perf_event *hwc; | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2265 | 	u64 interrupts, now; | 
 | 2266 | 	s64 delta; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2267 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2268 | 	raw_spin_lock(&ctx->lock); | 
| Paul Mackerras | 03541f8 | 2009-10-14 16:58:03 +1100 | [diff] [blame] | 2269 | 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2270 | 		if (event->state != PERF_EVENT_STATE_ACTIVE) | 
 | 2271 | 			continue; | 
 | 2272 |  | 
| Stephane Eranian | 5632ab1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 2273 | 		if (!event_filter_match(event)) | 
| Peter Zijlstra | 5d27c23 | 2009-12-17 13:16:32 +0100 | [diff] [blame] | 2274 | 			continue; | 
 | 2275 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2276 | 		hwc = &event->hw; | 
 | 2277 |  | 
 | 2278 | 		interrupts = hwc->interrupts; | 
 | 2279 | 		hwc->interrupts = 0; | 
 | 2280 |  | 
 | 2281 | 		/* | 
 | 2282 | 		 * unthrottle events on the tick | 
 | 2283 | 		 */ | 
 | 2284 | 		if (interrupts == MAX_INTERRUPTS) { | 
 | 2285 | 			perf_log_throttle(event, 1); | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 2286 | 			event->pmu->start(event, 0); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2287 | 		} | 
 | 2288 |  | 
 | 2289 | 		if (!event->attr.freq || !event->attr.sample_freq) | 
 | 2290 | 			continue; | 
 | 2291 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2292 | 		event->pmu->read(event); | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 2293 | 		now = local64_read(&event->count); | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2294 | 		delta = now - hwc->freq_count_stamp; | 
 | 2295 | 		hwc->freq_count_stamp = now; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2296 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 2297 | 		if (delta > 0) | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2298 | 			perf_adjust_period(event, period, delta); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2299 | 	} | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2300 | 	raw_spin_unlock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2301 | } | 
 | 2302 |  | 
 | 2303 | /* | 
 | 2304 |  * Round-robin a context's events: | 
 | 2305 |  */ | 
 | 2306 | static void rotate_ctx(struct perf_event_context *ctx) | 
 | 2307 | { | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2308 | 	raw_spin_lock(&ctx->lock); | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 2309 |  | 
| Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 2310 | 	/* | 
 | 2311 | 	 * Rotate the first entry last of non-pinned groups. Rotation might be | 
 | 2312 | 	 * disabled by the inheritance code. | 
 | 2313 | 	 */ | 
 | 2314 | 	if (!ctx->rotate_disable) | 
 | 2315 | 		list_rotate_left(&ctx->flexible_groups); | 
| Frederic Weisbecker | e286417 | 2010-01-09 21:05:28 +0100 | [diff] [blame] | 2316 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2317 | 	raw_spin_unlock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2318 | } | 
 | 2319 |  | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2320 | /* | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2321 |  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized | 
 | 2322 |  * because they're strictly cpu affine and rotate_start is called with IRQs | 
 | 2323 |  * disabled, while rotate_context is called from IRQ context. | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2324 |  */ | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2325 | static void perf_rotate_context(struct perf_cpu_context *cpuctx) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2326 | { | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2327 | 	u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2328 | 	struct perf_event_context *ctx = NULL; | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2329 | 	int rotate = 0, remove = 1; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2330 |  | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2331 | 	if (cpuctx->ctx.nr_events) { | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2332 | 		remove = 0; | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2333 | 		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) | 
 | 2334 | 			rotate = 1; | 
 | 2335 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2336 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2337 | 	ctx = cpuctx->task_ctx; | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2338 | 	if (ctx && ctx->nr_events) { | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2339 | 		remove = 0; | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2340 | 		if (ctx->nr_events != ctx->nr_active) | 
 | 2341 | 			rotate = 1; | 
 | 2342 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2343 |  | 
| Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 2344 | 	perf_pmu_disable(cpuctx->ctx.pmu); | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2345 | 	perf_ctx_adjust_freq(&cpuctx->ctx, interval); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2346 | 	if (ctx) | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2347 | 		perf_ctx_adjust_freq(ctx, interval); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2348 |  | 
| Peter Zijlstra | d4944a0 | 2010-03-08 13:51:20 +0100 | [diff] [blame] | 2349 | 	if (!rotate) | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2350 | 		goto done; | 
| Peter Zijlstra | d4944a0 | 2010-03-08 13:51:20 +0100 | [diff] [blame] | 2351 |  | 
| Frederic Weisbecker | 7defb0f | 2010-01-17 12:15:31 +0100 | [diff] [blame] | 2352 | 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2353 | 	if (ctx) | 
| Frederic Weisbecker | 7defb0f | 2010-01-17 12:15:31 +0100 | [diff] [blame] | 2354 | 		task_ctx_sched_out(ctx, EVENT_FLEXIBLE); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2355 |  | 
 | 2356 | 	rotate_ctx(&cpuctx->ctx); | 
 | 2357 | 	if (ctx) | 
 | 2358 | 		rotate_ctx(ctx); | 
 | 2359 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2360 | 	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2361 | 	if (ctx) | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2362 | 		task_ctx_sched_in(ctx, EVENT_FLEXIBLE); | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2363 |  | 
 | 2364 | done: | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2365 | 	if (remove) | 
 | 2366 | 		list_del_init(&cpuctx->rotation_list); | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 2367 |  | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 2368 | 	perf_pmu_enable(cpuctx->ctx.pmu); | 
 | 2369 | } | 
 | 2370 |  | 
 | 2371 | void perf_event_task_tick(void) | 
 | 2372 | { | 
 | 2373 | 	struct list_head *head = &__get_cpu_var(rotation_list); | 
 | 2374 | 	struct perf_cpu_context *cpuctx, *tmp; | 
 | 2375 |  | 
 | 2376 | 	WARN_ON(!irqs_disabled()); | 
 | 2377 |  | 
 | 2378 | 	list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { | 
 | 2379 | 		if (cpuctx->jiffies_interval == 1 || | 
 | 2380 | 				!(jiffies % cpuctx->jiffies_interval)) | 
 | 2381 | 			perf_rotate_context(cpuctx); | 
 | 2382 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2383 | } | 
 | 2384 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 2385 | static int event_enable_on_exec(struct perf_event *event, | 
 | 2386 | 				struct perf_event_context *ctx) | 
 | 2387 | { | 
 | 2388 | 	if (!event->attr.enable_on_exec) | 
 | 2389 | 		return 0; | 
 | 2390 |  | 
 | 2391 | 	event->attr.enable_on_exec = 0; | 
 | 2392 | 	if (event->state >= PERF_EVENT_STATE_INACTIVE) | 
 | 2393 | 		return 0; | 
 | 2394 |  | 
 | 2395 | 	__perf_event_mark_enabled(event, ctx); | 
 | 2396 |  | 
 | 2397 | 	return 1; | 
 | 2398 | } | 
 | 2399 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2400 | /* | 
 | 2401 |  * Enable all of a task's events that have been marked enable-on-exec. | 
 | 2402 |  * This expects task == current. | 
 | 2403 |  */ | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2404 | static void perf_event_enable_on_exec(struct perf_event_context *ctx) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2405 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2406 | 	struct perf_event *event; | 
 | 2407 | 	unsigned long flags; | 
 | 2408 | 	int enabled = 0; | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 2409 | 	int ret; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2410 |  | 
 | 2411 | 	local_irq_save(flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2412 | 	if (!ctx || !ctx->nr_events) | 
 | 2413 | 		goto out; | 
 | 2414 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2415 | 	task_ctx_sched_out(ctx, EVENT_ALL); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2416 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2417 | 	raw_spin_lock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2418 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 2419 | 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) { | 
 | 2420 | 		ret = event_enable_on_exec(event, ctx); | 
 | 2421 | 		if (ret) | 
 | 2422 | 			enabled = 1; | 
 | 2423 | 	} | 
 | 2424 |  | 
 | 2425 | 	list_for_each_entry(event, &ctx->flexible_groups, group_entry) { | 
 | 2426 | 		ret = event_enable_on_exec(event, ctx); | 
 | 2427 | 		if (ret) | 
 | 2428 | 			enabled = 1; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2429 | 	} | 
 | 2430 |  | 
 | 2431 | 	/* | 
 | 2432 | 	 * Unclone this context if we enabled any event. | 
 | 2433 | 	 */ | 
 | 2434 | 	if (enabled) | 
 | 2435 | 		unclone_ctx(ctx); | 
 | 2436 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2437 | 	raw_spin_unlock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2438 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2439 | 	perf_event_context_sched_in(ctx, ctx->task); | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 2440 | out: | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2441 | 	local_irq_restore(flags); | 
 | 2442 | } | 
 | 2443 |  | 
 | 2444 | /* | 
 | 2445 |  * Cross CPU call to read the hardware event | 
 | 2446 |  */ | 
 | 2447 | static void __perf_event_read(void *info) | 
 | 2448 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2449 | 	struct perf_event *event = info; | 
 | 2450 | 	struct perf_event_context *ctx = event->ctx; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 2451 | 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2452 |  | 
 | 2453 | 	/* | 
 | 2454 | 	 * If this is a task context, we need to check whether it is | 
 | 2455 | 	 * the current task context of this cpu.  If not it has been | 
 | 2456 | 	 * scheduled out before the smp call arrived.  In that case | 
 | 2457 | 	 * event->count would have been updated to a recent sample | 
 | 2458 | 	 * when the event was scheduled out. | 
 | 2459 | 	 */ | 
 | 2460 | 	if (ctx->task && cpuctx->task_ctx != ctx) | 
 | 2461 | 		return; | 
 | 2462 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2463 | 	raw_spin_lock(&ctx->lock); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2464 | 	if (ctx->is_active) { | 
| Peter Zijlstra | 542e72f | 2011-01-26 15:38:35 +0100 | [diff] [blame] | 2465 | 		update_context_time(ctx); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2466 | 		update_cgrp_time_from_event(event); | 
 | 2467 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2468 | 	update_event_times(event); | 
| Peter Zijlstra | 542e72f | 2011-01-26 15:38:35 +0100 | [diff] [blame] | 2469 | 	if (event->state == PERF_EVENT_STATE_ACTIVE) | 
 | 2470 | 		event->pmu->read(event); | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2471 | 	raw_spin_unlock(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2472 | } | 
 | 2473 |  | 
| Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 2474 | static inline u64 perf_event_count(struct perf_event *event) | 
 | 2475 | { | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 2476 | 	return local64_read(&event->count) + atomic64_read(&event->child_count); | 
| Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 2477 | } | 
 | 2478 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2479 | static u64 perf_event_read(struct perf_event *event) | 
 | 2480 | { | 
 | 2481 | 	/* | 
 | 2482 | 	 * If event is enabled and currently active on a CPU, update the | 
 | 2483 | 	 * value in the event structure: | 
 | 2484 | 	 */ | 
 | 2485 | 	if (event->state == PERF_EVENT_STATE_ACTIVE) { | 
 | 2486 | 		smp_call_function_single(event->oncpu, | 
 | 2487 | 					 __perf_event_read, event, 1); | 
 | 2488 | 	} else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 
| Peter Zijlstra | 2b8988c | 2009-11-20 22:19:54 +0100 | [diff] [blame] | 2489 | 		struct perf_event_context *ctx = event->ctx; | 
 | 2490 | 		unsigned long flags; | 
 | 2491 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2492 | 		raw_spin_lock_irqsave(&ctx->lock, flags); | 
| Stephane Eranian | c530ccd | 2010-10-15 15:26:01 +0200 | [diff] [blame] | 2493 | 		/* | 
 | 2494 | 		 * may read while context is not active | 
 | 2495 | 		 * (e.g., thread is blocked), in that case | 
 | 2496 | 		 * we cannot update context time | 
 | 2497 | 		 */ | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2498 | 		if (ctx->is_active) { | 
| Stephane Eranian | c530ccd | 2010-10-15 15:26:01 +0200 | [diff] [blame] | 2499 | 			update_context_time(ctx); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2500 | 			update_cgrp_time_from_event(event); | 
 | 2501 | 		} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2502 | 		update_event_times(event); | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2503 | 		raw_spin_unlock_irqrestore(&ctx->lock, flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2504 | 	} | 
 | 2505 |  | 
| Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 2506 | 	return perf_event_count(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2507 | } | 
 | 2508 |  | 
 | 2509 | /* | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 2510 |  * Callchain support | 
 | 2511 |  */ | 
 | 2512 |  | 
 | 2513 | struct callchain_cpus_entries { | 
 | 2514 | 	struct rcu_head			rcu_head; | 
 | 2515 | 	struct perf_callchain_entry	*cpu_entries[0]; | 
 | 2516 | }; | 
 | 2517 |  | 
| Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 2518 | static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 2519 | static atomic_t nr_callchain_events; | 
 | 2520 | static DEFINE_MUTEX(callchain_mutex); | 
 | 2521 | struct callchain_cpus_entries *callchain_cpus_entries; | 
 | 2522 |  | 
 | 2523 |  | 
 | 2524 | __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, | 
 | 2525 | 				  struct pt_regs *regs) | 
 | 2526 | { | 
 | 2527 | } | 
 | 2528 |  | 
 | 2529 | __weak void perf_callchain_user(struct perf_callchain_entry *entry, | 
 | 2530 | 				struct pt_regs *regs) | 
 | 2531 | { | 
 | 2532 | } | 
 | 2533 |  | 
 | 2534 | static void release_callchain_buffers_rcu(struct rcu_head *head) | 
 | 2535 | { | 
 | 2536 | 	struct callchain_cpus_entries *entries; | 
 | 2537 | 	int cpu; | 
 | 2538 |  | 
 | 2539 | 	entries = container_of(head, struct callchain_cpus_entries, rcu_head); | 
 | 2540 |  | 
 | 2541 | 	for_each_possible_cpu(cpu) | 
 | 2542 | 		kfree(entries->cpu_entries[cpu]); | 
 | 2543 |  | 
 | 2544 | 	kfree(entries); | 
 | 2545 | } | 
 | 2546 |  | 
 | 2547 | static void release_callchain_buffers(void) | 
 | 2548 | { | 
 | 2549 | 	struct callchain_cpus_entries *entries; | 
 | 2550 |  | 
 | 2551 | 	entries = callchain_cpus_entries; | 
 | 2552 | 	rcu_assign_pointer(callchain_cpus_entries, NULL); | 
 | 2553 | 	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); | 
 | 2554 | } | 
 | 2555 |  | 
 | 2556 | static int alloc_callchain_buffers(void) | 
 | 2557 | { | 
 | 2558 | 	int cpu; | 
 | 2559 | 	int size; | 
 | 2560 | 	struct callchain_cpus_entries *entries; | 
 | 2561 |  | 
 | 2562 | 	/* | 
 | 2563 | 	 * We can't use the percpu allocation API for data that can be | 
 | 2564 | 	 * accessed from NMI. Use a temporary manual per cpu allocation | 
 | 2565 | 	 * until that gets sorted out. | 
 | 2566 | 	 */ | 
| Eric Dumazet | 88d4f0d | 2011-01-25 19:40:51 +0100 | [diff] [blame] | 2567 | 	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 2568 |  | 
 | 2569 | 	entries = kzalloc(size, GFP_KERNEL); | 
 | 2570 | 	if (!entries) | 
 | 2571 | 		return -ENOMEM; | 
 | 2572 |  | 
| Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 2573 | 	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 2574 |  | 
 | 2575 | 	for_each_possible_cpu(cpu) { | 
 | 2576 | 		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, | 
 | 2577 | 							 cpu_to_node(cpu)); | 
 | 2578 | 		if (!entries->cpu_entries[cpu]) | 
 | 2579 | 			goto fail; | 
 | 2580 | 	} | 
 | 2581 |  | 
 | 2582 | 	rcu_assign_pointer(callchain_cpus_entries, entries); | 
 | 2583 |  | 
 | 2584 | 	return 0; | 
 | 2585 |  | 
 | 2586 | fail: | 
 | 2587 | 	for_each_possible_cpu(cpu) | 
 | 2588 | 		kfree(entries->cpu_entries[cpu]); | 
 | 2589 | 	kfree(entries); | 
 | 2590 |  | 
 | 2591 | 	return -ENOMEM; | 
 | 2592 | } | 
 | 2593 |  | 
 | 2594 | static int get_callchain_buffers(void) | 
 | 2595 | { | 
 | 2596 | 	int err = 0; | 
 | 2597 | 	int count; | 
 | 2598 |  | 
 | 2599 | 	mutex_lock(&callchain_mutex); | 
 | 2600 |  | 
 | 2601 | 	count = atomic_inc_return(&nr_callchain_events); | 
 | 2602 | 	if (WARN_ON_ONCE(count < 1)) { | 
 | 2603 | 		err = -EINVAL; | 
 | 2604 | 		goto exit; | 
 | 2605 | 	} | 
 | 2606 |  | 
 | 2607 | 	if (count > 1) { | 
 | 2608 | 		/* If the allocation failed, give up */ | 
 | 2609 | 		if (!callchain_cpus_entries) | 
 | 2610 | 			err = -ENOMEM; | 
 | 2611 | 		goto exit; | 
 | 2612 | 	} | 
 | 2613 |  | 
 | 2614 | 	err = alloc_callchain_buffers(); | 
 | 2615 | 	if (err) | 
 | 2616 | 		release_callchain_buffers(); | 
 | 2617 | exit: | 
 | 2618 | 	mutex_unlock(&callchain_mutex); | 
 | 2619 |  | 
 | 2620 | 	return err; | 
 | 2621 | } | 
 | 2622 |  | 
 | 2623 | static void put_callchain_buffers(void) | 
 | 2624 | { | 
 | 2625 | 	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { | 
 | 2626 | 		release_callchain_buffers(); | 
 | 2627 | 		mutex_unlock(&callchain_mutex); | 
 | 2628 | 	} | 
 | 2629 | } | 
 | 2630 |  | 
 | 2631 | static int get_recursion_context(int *recursion) | 
 | 2632 | { | 
 | 2633 | 	int rctx; | 
 | 2634 |  | 
 | 2635 | 	if (in_nmi()) | 
 | 2636 | 		rctx = 3; | 
 | 2637 | 	else if (in_irq()) | 
 | 2638 | 		rctx = 2; | 
 | 2639 | 	else if (in_softirq()) | 
 | 2640 | 		rctx = 1; | 
 | 2641 | 	else | 
 | 2642 | 		rctx = 0; | 
 | 2643 |  | 
 | 2644 | 	if (recursion[rctx]) | 
 | 2645 | 		return -1; | 
 | 2646 |  | 
 | 2647 | 	recursion[rctx]++; | 
 | 2648 | 	barrier(); | 
 | 2649 |  | 
 | 2650 | 	return rctx; | 
 | 2651 | } | 
 | 2652 |  | 
 | 2653 | static inline void put_recursion_context(int *recursion, int rctx) | 
 | 2654 | { | 
 | 2655 | 	barrier(); | 
 | 2656 | 	recursion[rctx]--; | 
 | 2657 | } | 
 | 2658 |  | 
 | 2659 | static struct perf_callchain_entry *get_callchain_entry(int *rctx) | 
 | 2660 | { | 
 | 2661 | 	int cpu; | 
 | 2662 | 	struct callchain_cpus_entries *entries; | 
 | 2663 |  | 
 | 2664 | 	*rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); | 
 | 2665 | 	if (*rctx == -1) | 
 | 2666 | 		return NULL; | 
 | 2667 |  | 
 | 2668 | 	entries = rcu_dereference(callchain_cpus_entries); | 
 | 2669 | 	if (!entries) | 
 | 2670 | 		return NULL; | 
 | 2671 |  | 
 | 2672 | 	cpu = smp_processor_id(); | 
 | 2673 |  | 
 | 2674 | 	return &entries->cpu_entries[cpu][*rctx]; | 
 | 2675 | } | 
 | 2676 |  | 
 | 2677 | static void | 
 | 2678 | put_callchain_entry(int rctx) | 
 | 2679 | { | 
 | 2680 | 	put_recursion_context(__get_cpu_var(callchain_recursion), rctx); | 
 | 2681 | } | 
 | 2682 |  | 
 | 2683 | static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | 
 | 2684 | { | 
 | 2685 | 	int rctx; | 
 | 2686 | 	struct perf_callchain_entry *entry; | 
 | 2687 |  | 
 | 2688 |  | 
 | 2689 | 	entry = get_callchain_entry(&rctx); | 
 | 2690 | 	if (rctx == -1) | 
 | 2691 | 		return NULL; | 
 | 2692 |  | 
 | 2693 | 	if (!entry) | 
 | 2694 | 		goto exit_put; | 
 | 2695 |  | 
 | 2696 | 	entry->nr = 0; | 
 | 2697 |  | 
 | 2698 | 	if (!user_mode(regs)) { | 
 | 2699 | 		perf_callchain_store(entry, PERF_CONTEXT_KERNEL); | 
 | 2700 | 		perf_callchain_kernel(entry, regs); | 
 | 2701 | 		if (current->mm) | 
 | 2702 | 			regs = task_pt_regs(current); | 
 | 2703 | 		else | 
 | 2704 | 			regs = NULL; | 
 | 2705 | 	} | 
 | 2706 |  | 
 | 2707 | 	if (regs) { | 
 | 2708 | 		perf_callchain_store(entry, PERF_CONTEXT_USER); | 
 | 2709 | 		perf_callchain_user(entry, regs); | 
 | 2710 | 	} | 
 | 2711 |  | 
 | 2712 | exit_put: | 
 | 2713 | 	put_callchain_entry(rctx); | 
 | 2714 |  | 
 | 2715 | 	return entry; | 
 | 2716 | } | 
 | 2717 |  | 
 | 2718 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2719 |  * Initialize the perf_event context in a task_struct: | 
 | 2720 |  */ | 
| Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 2721 | static void __perf_event_init_context(struct perf_event_context *ctx) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2722 | { | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2723 | 	raw_spin_lock_init(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2724 | 	mutex_init(&ctx->mutex); | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 2725 | 	INIT_LIST_HEAD(&ctx->pinned_groups); | 
 | 2726 | 	INIT_LIST_HEAD(&ctx->flexible_groups); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2727 | 	INIT_LIST_HEAD(&ctx->event_list); | 
 | 2728 | 	atomic_set(&ctx->refcount, 1); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2729 | } | 
 | 2730 |  | 
| Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 2731 | static struct perf_event_context * | 
 | 2732 | alloc_perf_context(struct pmu *pmu, struct task_struct *task) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2733 | { | 
 | 2734 | 	struct perf_event_context *ctx; | 
| Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 2735 |  | 
 | 2736 | 	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); | 
 | 2737 | 	if (!ctx) | 
 | 2738 | 		return NULL; | 
 | 2739 |  | 
 | 2740 | 	__perf_event_init_context(ctx); | 
 | 2741 | 	if (task) { | 
 | 2742 | 		ctx->task = task; | 
 | 2743 | 		get_task_struct(task); | 
 | 2744 | 	} | 
 | 2745 | 	ctx->pmu = pmu; | 
 | 2746 |  | 
 | 2747 | 	return ctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2748 | } | 
 | 2749 |  | 
| Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 2750 | static struct task_struct * | 
 | 2751 | find_lively_task_by_vpid(pid_t vpid) | 
 | 2752 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2753 | 	struct task_struct *task; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2754 | 	int err; | 
 | 2755 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2756 | 	rcu_read_lock(); | 
| Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 2757 | 	if (!vpid) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2758 | 		task = current; | 
 | 2759 | 	else | 
| Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 2760 | 		task = find_task_by_vpid(vpid); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2761 | 	if (task) | 
 | 2762 | 		get_task_struct(task); | 
 | 2763 | 	rcu_read_unlock(); | 
 | 2764 |  | 
 | 2765 | 	if (!task) | 
 | 2766 | 		return ERR_PTR(-ESRCH); | 
 | 2767 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2768 | 	/* Reuse ptrace permission checks for now. */ | 
 | 2769 | 	err = -EACCES; | 
 | 2770 | 	if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 
 | 2771 | 		goto errout; | 
 | 2772 |  | 
| Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 2773 | 	return task; | 
 | 2774 | errout: | 
 | 2775 | 	put_task_struct(task); | 
 | 2776 | 	return ERR_PTR(err); | 
 | 2777 |  | 
 | 2778 | } | 
 | 2779 |  | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2780 | /* | 
 | 2781 |  * Returns a matching context with refcount and pincount. | 
 | 2782 |  */ | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 2783 | static struct perf_event_context * | 
| Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 2784 | find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2785 | { | 
 | 2786 | 	struct perf_event_context *ctx; | 
 | 2787 | 	struct perf_cpu_context *cpuctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2788 | 	unsigned long flags; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2789 | 	int ctxn, err; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2790 |  | 
| Oleg Nesterov | 22a4ec7 | 2011-01-18 17:10:08 +0100 | [diff] [blame] | 2791 | 	if (!task) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2792 | 		/* Must be root to operate on a CPU event: */ | 
 | 2793 | 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 
 | 2794 | 			return ERR_PTR(-EACCES); | 
 | 2795 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2796 | 		/* | 
 | 2797 | 		 * We could be clever and allow to attach a event to an | 
 | 2798 | 		 * offline CPU and activate it when the CPU comes up, but | 
 | 2799 | 		 * that's for later. | 
 | 2800 | 		 */ | 
 | 2801 | 		if (!cpu_online(cpu)) | 
 | 2802 | 			return ERR_PTR(-ENODEV); | 
 | 2803 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 2804 | 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2805 | 		ctx = &cpuctx->ctx; | 
 | 2806 | 		get_ctx(ctx); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2807 | 		++ctx->pin_count; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2808 |  | 
 | 2809 | 		return ctx; | 
 | 2810 | 	} | 
 | 2811 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2812 | 	err = -EINVAL; | 
 | 2813 | 	ctxn = pmu->task_ctx_nr; | 
 | 2814 | 	if (ctxn < 0) | 
 | 2815 | 		goto errout; | 
 | 2816 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 2817 | retry: | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 2818 | 	ctx = perf_lock_task_context(task, ctxn, &flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2819 | 	if (ctx) { | 
 | 2820 | 		unclone_ctx(ctx); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2821 | 		++ctx->pin_count; | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2822 | 		raw_spin_unlock_irqrestore(&ctx->lock, flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2823 | 	} | 
 | 2824 |  | 
 | 2825 | 	if (!ctx) { | 
| Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 2826 | 		ctx = alloc_perf_context(pmu, task); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2827 | 		err = -ENOMEM; | 
 | 2828 | 		if (!ctx) | 
 | 2829 | 			goto errout; | 
| Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 2830 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2831 | 		get_ctx(ctx); | 
| Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 2832 |  | 
| Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 2833 | 		err = 0; | 
 | 2834 | 		mutex_lock(&task->perf_event_mutex); | 
 | 2835 | 		/* | 
 | 2836 | 		 * If it has already passed perf_event_exit_task(). | 
 | 2837 | 		 * we must see PF_EXITING, it takes this mutex too. | 
 | 2838 | 		 */ | 
 | 2839 | 		if (task->flags & PF_EXITING) | 
 | 2840 | 			err = -ESRCH; | 
 | 2841 | 		else if (task->perf_event_ctxp[ctxn]) | 
 | 2842 | 			err = -EAGAIN; | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2843 | 		else { | 
 | 2844 | 			++ctx->pin_count; | 
| Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 2845 | 			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2846 | 		} | 
| Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 2847 | 		mutex_unlock(&task->perf_event_mutex); | 
 | 2848 |  | 
 | 2849 | 		if (unlikely(err)) { | 
| Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 2850 | 			put_task_struct(task); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2851 | 			kfree(ctx); | 
| Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 2852 |  | 
 | 2853 | 			if (err == -EAGAIN) | 
 | 2854 | 				goto retry; | 
 | 2855 | 			goto errout; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2856 | 		} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2857 | 	} | 
 | 2858 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2859 | 	return ctx; | 
 | 2860 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 2861 | errout: | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2862 | 	return ERR_PTR(err); | 
 | 2863 | } | 
 | 2864 |  | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 2865 | static void perf_event_free_filter(struct perf_event *event); | 
 | 2866 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2867 | static void free_event_rcu(struct rcu_head *head) | 
 | 2868 | { | 
 | 2869 | 	struct perf_event *event; | 
 | 2870 |  | 
 | 2871 | 	event = container_of(head, struct perf_event, rcu_head); | 
 | 2872 | 	if (event->ns) | 
 | 2873 | 		put_pid_ns(event->ns); | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 2874 | 	perf_event_free_filter(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2875 | 	kfree(event); | 
 | 2876 | } | 
 | 2877 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 2878 | static void perf_buffer_put(struct perf_buffer *buffer); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2879 |  | 
 | 2880 | static void free_event(struct perf_event *event) | 
 | 2881 | { | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 2882 | 	irq_work_sync(&event->pending); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2883 |  | 
 | 2884 | 	if (!event->parent) { | 
| Peter Zijlstra | 82cd6de | 2010-10-14 17:57:23 +0200 | [diff] [blame] | 2885 | 		if (event->attach_state & PERF_ATTACH_TASK) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2886 | 			jump_label_dec(&perf_sched_events); | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 2887 | 		if (event->attr.mmap || event->attr.mmap_data) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2888 | 			atomic_dec(&nr_mmap_events); | 
 | 2889 | 		if (event->attr.comm) | 
 | 2890 | 			atomic_dec(&nr_comm_events); | 
 | 2891 | 		if (event->attr.task) | 
 | 2892 | 			atomic_dec(&nr_task_events); | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 2893 | 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) | 
 | 2894 | 			put_callchain_buffers(); | 
| Peter Zijlstra | 0830937 | 2011-03-03 11:31:20 +0100 | [diff] [blame] | 2895 | 		if (is_cgroup_event(event)) { | 
 | 2896 | 			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); | 
 | 2897 | 			jump_label_dec(&perf_sched_events); | 
 | 2898 | 		} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2899 | 	} | 
 | 2900 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 2901 | 	if (event->buffer) { | 
 | 2902 | 		perf_buffer_put(event->buffer); | 
 | 2903 | 		event->buffer = NULL; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2904 | 	} | 
 | 2905 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2906 | 	if (is_cgroup_event(event)) | 
 | 2907 | 		perf_detach_cgroup(event); | 
 | 2908 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2909 | 	if (event->destroy) | 
 | 2910 | 		event->destroy(event); | 
 | 2911 |  | 
| Peter Zijlstra | 0c67b40 | 2010-09-13 11:15:58 +0200 | [diff] [blame] | 2912 | 	if (event->ctx) | 
 | 2913 | 		put_ctx(event->ctx); | 
 | 2914 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2915 | 	call_rcu(&event->rcu_head, free_event_rcu); | 
 | 2916 | } | 
 | 2917 |  | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 2918 | int perf_event_release_kernel(struct perf_event *event) | 
 | 2919 | { | 
 | 2920 | 	struct perf_event_context *ctx = event->ctx; | 
 | 2921 |  | 
| Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 2922 | 	/* | 
 | 2923 | 	 * Remove from the PMU, can't get re-enabled since we got | 
 | 2924 | 	 * here because the last ref went. | 
 | 2925 | 	 */ | 
 | 2926 | 	perf_event_disable(event); | 
 | 2927 |  | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 2928 | 	WARN_ON_ONCE(ctx->parent_ctx); | 
| Peter Zijlstra | a0507c8 | 2010-05-06 15:42:53 +0200 | [diff] [blame] | 2929 | 	/* | 
 | 2930 | 	 * There are two ways this annotation is useful: | 
 | 2931 | 	 * | 
 | 2932 | 	 *  1) there is a lock recursion from perf_event_exit_task | 
 | 2933 | 	 *     see the comment there. | 
 | 2934 | 	 * | 
 | 2935 | 	 *  2) there is a lock-inversion with mmap_sem through | 
 | 2936 | 	 *     perf_event_read_group(), which takes faults while | 
 | 2937 | 	 *     holding ctx->mutex, however this is called after | 
 | 2938 | 	 *     the last filedesc died, so there is no possibility | 
 | 2939 | 	 *     to trigger the AB-BA case. | 
 | 2940 | 	 */ | 
 | 2941 | 	mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); | 
| Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 2942 | 	raw_spin_lock_irq(&ctx->lock); | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2943 | 	perf_group_detach(event); | 
| Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 2944 | 	list_del_event(event, ctx); | 
| Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 2945 | 	raw_spin_unlock_irq(&ctx->lock); | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 2946 | 	mutex_unlock(&ctx->mutex); | 
 | 2947 |  | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 2948 | 	free_event(event); | 
 | 2949 |  | 
 | 2950 | 	return 0; | 
 | 2951 | } | 
 | 2952 | EXPORT_SYMBOL_GPL(perf_event_release_kernel); | 
 | 2953 |  | 
| Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 2954 | /* | 
 | 2955 |  * Called when the last reference to the file is gone. | 
 | 2956 |  */ | 
 | 2957 | static int perf_release(struct inode *inode, struct file *file) | 
 | 2958 | { | 
 | 2959 | 	struct perf_event *event = file->private_data; | 
| Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 2960 | 	struct task_struct *owner; | 
| Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 2961 |  | 
 | 2962 | 	file->private_data = NULL; | 
 | 2963 |  | 
| Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 2964 | 	rcu_read_lock(); | 
 | 2965 | 	owner = ACCESS_ONCE(event->owner); | 
 | 2966 | 	/* | 
 | 2967 | 	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe | 
 | 2968 | 	 * !owner it means the list deletion is complete and we can indeed | 
 | 2969 | 	 * free this event, otherwise we need to serialize on | 
 | 2970 | 	 * owner->perf_event_mutex. | 
 | 2971 | 	 */ | 
 | 2972 | 	smp_read_barrier_depends(); | 
 | 2973 | 	if (owner) { | 
 | 2974 | 		/* | 
 | 2975 | 		 * Since delayed_put_task_struct() also drops the last | 
 | 2976 | 		 * task reference we can safely take a new reference | 
 | 2977 | 		 * while holding the rcu_read_lock(). | 
 | 2978 | 		 */ | 
 | 2979 | 		get_task_struct(owner); | 
 | 2980 | 	} | 
 | 2981 | 	rcu_read_unlock(); | 
 | 2982 |  | 
 | 2983 | 	if (owner) { | 
 | 2984 | 		mutex_lock(&owner->perf_event_mutex); | 
 | 2985 | 		/* | 
 | 2986 | 		 * We have to re-check the event->owner field, if it is cleared | 
 | 2987 | 		 * we raced with perf_event_exit_task(), acquiring the mutex | 
 | 2988 | 		 * ensured they're done, and we can proceed with freeing the | 
 | 2989 | 		 * event. | 
 | 2990 | 		 */ | 
 | 2991 | 		if (event->owner) | 
 | 2992 | 			list_del_init(&event->owner_entry); | 
 | 2993 | 		mutex_unlock(&owner->perf_event_mutex); | 
 | 2994 | 		put_task_struct(owner); | 
 | 2995 | 	} | 
 | 2996 |  | 
| Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 2997 | 	return perf_event_release_kernel(event); | 
 | 2998 | } | 
 | 2999 |  | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3000 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3001 | { | 
 | 3002 | 	struct perf_event *child; | 
 | 3003 | 	u64 total = 0; | 
 | 3004 |  | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3005 | 	*enabled = 0; | 
 | 3006 | 	*running = 0; | 
 | 3007 |  | 
| Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 3008 | 	mutex_lock(&event->child_mutex); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3009 | 	total += perf_event_read(event); | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3010 | 	*enabled += event->total_time_enabled + | 
 | 3011 | 			atomic64_read(&event->child_total_time_enabled); | 
 | 3012 | 	*running += event->total_time_running + | 
 | 3013 | 			atomic64_read(&event->child_total_time_running); | 
 | 3014 |  | 
 | 3015 | 	list_for_each_entry(child, &event->child_list, child_list) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3016 | 		total += perf_event_read(child); | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3017 | 		*enabled += child->total_time_enabled; | 
 | 3018 | 		*running += child->total_time_running; | 
 | 3019 | 	} | 
| Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 3020 | 	mutex_unlock(&event->child_mutex); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3021 |  | 
 | 3022 | 	return total; | 
 | 3023 | } | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 3024 | EXPORT_SYMBOL_GPL(perf_event_read_value); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3025 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3026 | static int perf_event_read_group(struct perf_event *event, | 
 | 3027 | 				   u64 read_format, char __user *buf) | 
 | 3028 | { | 
 | 3029 | 	struct perf_event *leader = event->group_leader, *sub; | 
| Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 3030 | 	int n = 0, size = 0, ret = -EFAULT; | 
 | 3031 | 	struct perf_event_context *ctx = leader->ctx; | 
| Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 3032 | 	u64 values[5]; | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3033 | 	u64 count, enabled, running; | 
| Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 3034 |  | 
| Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 3035 | 	mutex_lock(&ctx->mutex); | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3036 | 	count = perf_event_read_value(leader, &enabled, &running); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3037 |  | 
 | 3038 | 	values[n++] = 1 + leader->nr_siblings; | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3039 | 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 
 | 3040 | 		values[n++] = enabled; | 
 | 3041 | 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 
 | 3042 | 		values[n++] = running; | 
| Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 3043 | 	values[n++] = count; | 
 | 3044 | 	if (read_format & PERF_FORMAT_ID) | 
 | 3045 | 		values[n++] = primary_event_id(leader); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3046 |  | 
 | 3047 | 	size = n * sizeof(u64); | 
 | 3048 |  | 
 | 3049 | 	if (copy_to_user(buf, values, size)) | 
| Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 3050 | 		goto unlock; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3051 |  | 
| Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 3052 | 	ret = size; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3053 |  | 
 | 3054 | 	list_for_each_entry(sub, &leader->sibling_list, group_entry) { | 
| Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 3055 | 		n = 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3056 |  | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3057 | 		values[n++] = perf_event_read_value(sub, &enabled, &running); | 
| Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 3058 | 		if (read_format & PERF_FORMAT_ID) | 
 | 3059 | 			values[n++] = primary_event_id(sub); | 
 | 3060 |  | 
 | 3061 | 		size = n * sizeof(u64); | 
 | 3062 |  | 
| Stephane Eranian | 184d3da | 2009-11-23 21:40:49 -0800 | [diff] [blame] | 3063 | 		if (copy_to_user(buf + ret, values, size)) { | 
| Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 3064 | 			ret = -EFAULT; | 
 | 3065 | 			goto unlock; | 
 | 3066 | 		} | 
| Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 3067 |  | 
 | 3068 | 		ret += size; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3069 | 	} | 
| Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 3070 | unlock: | 
 | 3071 | 	mutex_unlock(&ctx->mutex); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3072 |  | 
| Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 3073 | 	return ret; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3074 | } | 
 | 3075 |  | 
 | 3076 | static int perf_event_read_one(struct perf_event *event, | 
 | 3077 | 				 u64 read_format, char __user *buf) | 
 | 3078 | { | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3079 | 	u64 enabled, running; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3080 | 	u64 values[4]; | 
 | 3081 | 	int n = 0; | 
 | 3082 |  | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 3083 | 	values[n++] = perf_event_read_value(event, &enabled, &running); | 
 | 3084 | 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 
 | 3085 | 		values[n++] = enabled; | 
 | 3086 | 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 
 | 3087 | 		values[n++] = running; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3088 | 	if (read_format & PERF_FORMAT_ID) | 
 | 3089 | 		values[n++] = primary_event_id(event); | 
 | 3090 |  | 
 | 3091 | 	if (copy_to_user(buf, values, n * sizeof(u64))) | 
 | 3092 | 		return -EFAULT; | 
 | 3093 |  | 
 | 3094 | 	return n * sizeof(u64); | 
 | 3095 | } | 
 | 3096 |  | 
 | 3097 | /* | 
 | 3098 |  * Read the performance event - simple non blocking version for now | 
 | 3099 |  */ | 
 | 3100 | static ssize_t | 
 | 3101 | perf_read_hw(struct perf_event *event, char __user *buf, size_t count) | 
 | 3102 | { | 
 | 3103 | 	u64 read_format = event->attr.read_format; | 
 | 3104 | 	int ret; | 
 | 3105 |  | 
 | 3106 | 	/* | 
 | 3107 | 	 * Return end-of-file for a read on a event that is in | 
 | 3108 | 	 * error state (i.e. because it was pinned but it couldn't be | 
 | 3109 | 	 * scheduled on to the CPU at some point). | 
 | 3110 | 	 */ | 
 | 3111 | 	if (event->state == PERF_EVENT_STATE_ERROR) | 
 | 3112 | 		return 0; | 
 | 3113 |  | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 3114 | 	if (count < event->read_size) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3115 | 		return -ENOSPC; | 
 | 3116 |  | 
 | 3117 | 	WARN_ON_ONCE(event->ctx->parent_ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3118 | 	if (read_format & PERF_FORMAT_GROUP) | 
 | 3119 | 		ret = perf_event_read_group(event, read_format, buf); | 
 | 3120 | 	else | 
 | 3121 | 		ret = perf_event_read_one(event, read_format, buf); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3122 |  | 
 | 3123 | 	return ret; | 
 | 3124 | } | 
 | 3125 |  | 
 | 3126 | static ssize_t | 
 | 3127 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 
 | 3128 | { | 
 | 3129 | 	struct perf_event *event = file->private_data; | 
 | 3130 |  | 
 | 3131 | 	return perf_read_hw(event, buf, count); | 
 | 3132 | } | 
 | 3133 |  | 
 | 3134 | static unsigned int perf_poll(struct file *file, poll_table *wait) | 
 | 3135 | { | 
 | 3136 | 	struct perf_event *event = file->private_data; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3137 | 	struct perf_buffer *buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3138 | 	unsigned int events = POLL_HUP; | 
 | 3139 |  | 
 | 3140 | 	rcu_read_lock(); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3141 | 	buffer = rcu_dereference(event->buffer); | 
 | 3142 | 	if (buffer) | 
 | 3143 | 		events = atomic_xchg(&buffer->poll, 0); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3144 | 	rcu_read_unlock(); | 
 | 3145 |  | 
 | 3146 | 	poll_wait(file, &event->waitq, wait); | 
 | 3147 |  | 
 | 3148 | 	return events; | 
 | 3149 | } | 
 | 3150 |  | 
 | 3151 | static void perf_event_reset(struct perf_event *event) | 
 | 3152 | { | 
 | 3153 | 	(void)perf_event_read(event); | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 3154 | 	local64_set(&event->count, 0); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3155 | 	perf_event_update_userpage(event); | 
 | 3156 | } | 
 | 3157 |  | 
 | 3158 | /* | 
 | 3159 |  * Holding the top-level event's child_mutex means that any | 
 | 3160 |  * descendant process that has inherited this event will block | 
 | 3161 |  * in sync_child_event if it goes to exit, thus satisfying the | 
 | 3162 |  * task existence requirements of perf_event_enable/disable. | 
 | 3163 |  */ | 
 | 3164 | static void perf_event_for_each_child(struct perf_event *event, | 
 | 3165 | 					void (*func)(struct perf_event *)) | 
 | 3166 | { | 
 | 3167 | 	struct perf_event *child; | 
 | 3168 |  | 
 | 3169 | 	WARN_ON_ONCE(event->ctx->parent_ctx); | 
 | 3170 | 	mutex_lock(&event->child_mutex); | 
 | 3171 | 	func(event); | 
 | 3172 | 	list_for_each_entry(child, &event->child_list, child_list) | 
 | 3173 | 		func(child); | 
 | 3174 | 	mutex_unlock(&event->child_mutex); | 
 | 3175 | } | 
 | 3176 |  | 
 | 3177 | static void perf_event_for_each(struct perf_event *event, | 
 | 3178 | 				  void (*func)(struct perf_event *)) | 
 | 3179 | { | 
 | 3180 | 	struct perf_event_context *ctx = event->ctx; | 
 | 3181 | 	struct perf_event *sibling; | 
 | 3182 |  | 
 | 3183 | 	WARN_ON_ONCE(ctx->parent_ctx); | 
 | 3184 | 	mutex_lock(&ctx->mutex); | 
 | 3185 | 	event = event->group_leader; | 
 | 3186 |  | 
 | 3187 | 	perf_event_for_each_child(event, func); | 
 | 3188 | 	func(event); | 
 | 3189 | 	list_for_each_entry(sibling, &event->sibling_list, group_entry) | 
 | 3190 | 		perf_event_for_each_child(event, func); | 
 | 3191 | 	mutex_unlock(&ctx->mutex); | 
 | 3192 | } | 
 | 3193 |  | 
 | 3194 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 
 | 3195 | { | 
 | 3196 | 	struct perf_event_context *ctx = event->ctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3197 | 	int ret = 0; | 
 | 3198 | 	u64 value; | 
 | 3199 |  | 
| Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 3200 | 	if (!is_sampling_event(event)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3201 | 		return -EINVAL; | 
 | 3202 |  | 
| John Blackwood | ad0cf34 | 2010-09-28 18:03:11 -0400 | [diff] [blame] | 3203 | 	if (copy_from_user(&value, arg, sizeof(value))) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3204 | 		return -EFAULT; | 
 | 3205 |  | 
 | 3206 | 	if (!value) | 
 | 3207 | 		return -EINVAL; | 
 | 3208 |  | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 3209 | 	raw_spin_lock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3210 | 	if (event->attr.freq) { | 
 | 3211 | 		if (value > sysctl_perf_event_sample_rate) { | 
 | 3212 | 			ret = -EINVAL; | 
 | 3213 | 			goto unlock; | 
 | 3214 | 		} | 
 | 3215 |  | 
 | 3216 | 		event->attr.sample_freq = value; | 
 | 3217 | 	} else { | 
 | 3218 | 		event->attr.sample_period = value; | 
 | 3219 | 		event->hw.sample_period = value; | 
 | 3220 | 	} | 
 | 3221 | unlock: | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 3222 | 	raw_spin_unlock_irq(&ctx->lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3223 |  | 
 | 3224 | 	return ret; | 
 | 3225 | } | 
 | 3226 |  | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3227 | static const struct file_operations perf_fops; | 
 | 3228 |  | 
 | 3229 | static struct perf_event *perf_fget_light(int fd, int *fput_needed) | 
 | 3230 | { | 
 | 3231 | 	struct file *file; | 
 | 3232 |  | 
 | 3233 | 	file = fget_light(fd, fput_needed); | 
 | 3234 | 	if (!file) | 
 | 3235 | 		return ERR_PTR(-EBADF); | 
 | 3236 |  | 
 | 3237 | 	if (file->f_op != &perf_fops) { | 
 | 3238 | 		fput_light(file, *fput_needed); | 
 | 3239 | 		*fput_needed = 0; | 
 | 3240 | 		return ERR_PTR(-EBADF); | 
 | 3241 | 	} | 
 | 3242 |  | 
 | 3243 | 	return file->private_data; | 
 | 3244 | } | 
 | 3245 |  | 
 | 3246 | static int perf_event_set_output(struct perf_event *event, | 
 | 3247 | 				 struct perf_event *output_event); | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 3248 | static int perf_event_set_filter(struct perf_event *event, void __user *arg); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3249 |  | 
 | 3250 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 
 | 3251 | { | 
 | 3252 | 	struct perf_event *event = file->private_data; | 
 | 3253 | 	void (*func)(struct perf_event *); | 
 | 3254 | 	u32 flags = arg; | 
 | 3255 |  | 
 | 3256 | 	switch (cmd) { | 
 | 3257 | 	case PERF_EVENT_IOC_ENABLE: | 
 | 3258 | 		func = perf_event_enable; | 
 | 3259 | 		break; | 
 | 3260 | 	case PERF_EVENT_IOC_DISABLE: | 
 | 3261 | 		func = perf_event_disable; | 
 | 3262 | 		break; | 
 | 3263 | 	case PERF_EVENT_IOC_RESET: | 
 | 3264 | 		func = perf_event_reset; | 
 | 3265 | 		break; | 
 | 3266 |  | 
 | 3267 | 	case PERF_EVENT_IOC_REFRESH: | 
 | 3268 | 		return perf_event_refresh(event, arg); | 
 | 3269 |  | 
 | 3270 | 	case PERF_EVENT_IOC_PERIOD: | 
 | 3271 | 		return perf_event_period(event, (u64 __user *)arg); | 
 | 3272 |  | 
 | 3273 | 	case PERF_EVENT_IOC_SET_OUTPUT: | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3274 | 	{ | 
 | 3275 | 		struct perf_event *output_event = NULL; | 
 | 3276 | 		int fput_needed = 0; | 
 | 3277 | 		int ret; | 
 | 3278 |  | 
 | 3279 | 		if (arg != -1) { | 
 | 3280 | 			output_event = perf_fget_light(arg, &fput_needed); | 
 | 3281 | 			if (IS_ERR(output_event)) | 
 | 3282 | 				return PTR_ERR(output_event); | 
 | 3283 | 		} | 
 | 3284 |  | 
 | 3285 | 		ret = perf_event_set_output(event, output_event); | 
 | 3286 | 		if (output_event) | 
 | 3287 | 			fput_light(output_event->filp, fput_needed); | 
 | 3288 |  | 
 | 3289 | 		return ret; | 
 | 3290 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3291 |  | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 3292 | 	case PERF_EVENT_IOC_SET_FILTER: | 
 | 3293 | 		return perf_event_set_filter(event, (void __user *)arg); | 
 | 3294 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3295 | 	default: | 
 | 3296 | 		return -ENOTTY; | 
 | 3297 | 	} | 
 | 3298 |  | 
 | 3299 | 	if (flags & PERF_IOC_FLAG_GROUP) | 
 | 3300 | 		perf_event_for_each(event, func); | 
 | 3301 | 	else | 
 | 3302 | 		perf_event_for_each_child(event, func); | 
 | 3303 |  | 
 | 3304 | 	return 0; | 
 | 3305 | } | 
 | 3306 |  | 
 | 3307 | int perf_event_task_enable(void) | 
 | 3308 | { | 
 | 3309 | 	struct perf_event *event; | 
 | 3310 |  | 
 | 3311 | 	mutex_lock(¤t->perf_event_mutex); | 
 | 3312 | 	list_for_each_entry(event, ¤t->perf_event_list, owner_entry) | 
 | 3313 | 		perf_event_for_each_child(event, perf_event_enable); | 
 | 3314 | 	mutex_unlock(¤t->perf_event_mutex); | 
 | 3315 |  | 
 | 3316 | 	return 0; | 
 | 3317 | } | 
 | 3318 |  | 
 | 3319 | int perf_event_task_disable(void) | 
 | 3320 | { | 
 | 3321 | 	struct perf_event *event; | 
 | 3322 |  | 
 | 3323 | 	mutex_lock(¤t->perf_event_mutex); | 
 | 3324 | 	list_for_each_entry(event, ¤t->perf_event_list, owner_entry) | 
 | 3325 | 		perf_event_for_each_child(event, perf_event_disable); | 
 | 3326 | 	mutex_unlock(¤t->perf_event_mutex); | 
 | 3327 |  | 
 | 3328 | 	return 0; | 
 | 3329 | } | 
 | 3330 |  | 
 | 3331 | #ifndef PERF_EVENT_INDEX_OFFSET | 
 | 3332 | # define PERF_EVENT_INDEX_OFFSET 0 | 
 | 3333 | #endif | 
 | 3334 |  | 
 | 3335 | static int perf_event_index(struct perf_event *event) | 
 | 3336 | { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 3337 | 	if (event->hw.state & PERF_HES_STOPPED) | 
 | 3338 | 		return 0; | 
 | 3339 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3340 | 	if (event->state != PERF_EVENT_STATE_ACTIVE) | 
 | 3341 | 		return 0; | 
 | 3342 |  | 
 | 3343 | 	return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; | 
 | 3344 | } | 
 | 3345 |  | 
 | 3346 | /* | 
 | 3347 |  * Callers need to ensure there can be no nesting of this function, otherwise | 
 | 3348 |  * the seqlock logic goes bad. We can not serialize this because the arch | 
 | 3349 |  * code calls this from NMI context. | 
 | 3350 |  */ | 
 | 3351 | void perf_event_update_userpage(struct perf_event *event) | 
 | 3352 | { | 
 | 3353 | 	struct perf_event_mmap_page *userpg; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3354 | 	struct perf_buffer *buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3355 |  | 
 | 3356 | 	rcu_read_lock(); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3357 | 	buffer = rcu_dereference(event->buffer); | 
 | 3358 | 	if (!buffer) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3359 | 		goto unlock; | 
 | 3360 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3361 | 	userpg = buffer->user_page; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3362 |  | 
 | 3363 | 	/* | 
 | 3364 | 	 * Disable preemption so as to not let the corresponding user-space | 
 | 3365 | 	 * spin too long if we get preempted. | 
 | 3366 | 	 */ | 
 | 3367 | 	preempt_disable(); | 
 | 3368 | 	++userpg->lock; | 
 | 3369 | 	barrier(); | 
 | 3370 | 	userpg->index = perf_event_index(event); | 
| Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 3371 | 	userpg->offset = perf_event_count(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3372 | 	if (event->state == PERF_EVENT_STATE_ACTIVE) | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 3373 | 		userpg->offset -= local64_read(&event->hw.prev_count); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3374 |  | 
 | 3375 | 	userpg->time_enabled = event->total_time_enabled + | 
 | 3376 | 			atomic64_read(&event->child_total_time_enabled); | 
 | 3377 |  | 
 | 3378 | 	userpg->time_running = event->total_time_running + | 
 | 3379 | 			atomic64_read(&event->child_total_time_running); | 
 | 3380 |  | 
 | 3381 | 	barrier(); | 
 | 3382 | 	++userpg->lock; | 
 | 3383 | 	preempt_enable(); | 
 | 3384 | unlock: | 
 | 3385 | 	rcu_read_unlock(); | 
 | 3386 | } | 
 | 3387 |  | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3388 | static unsigned long perf_data_size(struct perf_buffer *buffer); | 
 | 3389 |  | 
 | 3390 | static void | 
 | 3391 | perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) | 
 | 3392 | { | 
 | 3393 | 	long max_size = perf_data_size(buffer); | 
 | 3394 |  | 
 | 3395 | 	if (watermark) | 
 | 3396 | 		buffer->watermark = min(max_size, watermark); | 
 | 3397 |  | 
 | 3398 | 	if (!buffer->watermark) | 
 | 3399 | 		buffer->watermark = max_size / 2; | 
 | 3400 |  | 
 | 3401 | 	if (flags & PERF_BUFFER_WRITABLE) | 
 | 3402 | 		buffer->writable = 1; | 
 | 3403 |  | 
 | 3404 | 	atomic_set(&buffer->refcount, 1); | 
 | 3405 | } | 
 | 3406 |  | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3407 | #ifndef CONFIG_PERF_USE_VMALLOC | 
 | 3408 |  | 
 | 3409 | /* | 
 | 3410 |  * Back perf_mmap() with regular GFP_KERNEL-0 pages. | 
 | 3411 |  */ | 
 | 3412 |  | 
 | 3413 | static struct page * | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3414 | perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3415 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3416 | 	if (pgoff > buffer->nr_pages) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3417 | 		return NULL; | 
 | 3418 |  | 
 | 3419 | 	if (pgoff == 0) | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3420 | 		return virt_to_page(buffer->user_page); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3421 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3422 | 	return virt_to_page(buffer->data_pages[pgoff - 1]); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3423 | } | 
 | 3424 |  | 
| Peter Zijlstra | a19d35c | 2010-05-17 18:48:00 +0200 | [diff] [blame] | 3425 | static void *perf_mmap_alloc_page(int cpu) | 
 | 3426 | { | 
 | 3427 | 	struct page *page; | 
 | 3428 | 	int node; | 
 | 3429 |  | 
 | 3430 | 	node = (cpu == -1) ? cpu : cpu_to_node(cpu); | 
 | 3431 | 	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | 
 | 3432 | 	if (!page) | 
 | 3433 | 		return NULL; | 
 | 3434 |  | 
 | 3435 | 	return page_address(page); | 
 | 3436 | } | 
 | 3437 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3438 | static struct perf_buffer * | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3439 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3440 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3441 | 	struct perf_buffer *buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3442 | 	unsigned long size; | 
 | 3443 | 	int i; | 
 | 3444 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3445 | 	size = sizeof(struct perf_buffer); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3446 | 	size += nr_pages * sizeof(void *); | 
 | 3447 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3448 | 	buffer = kzalloc(size, GFP_KERNEL); | 
 | 3449 | 	if (!buffer) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3450 | 		goto fail; | 
 | 3451 |  | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3452 | 	buffer->user_page = perf_mmap_alloc_page(cpu); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3453 | 	if (!buffer->user_page) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3454 | 		goto fail_user_page; | 
 | 3455 |  | 
 | 3456 | 	for (i = 0; i < nr_pages; i++) { | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3457 | 		buffer->data_pages[i] = perf_mmap_alloc_page(cpu); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3458 | 		if (!buffer->data_pages[i]) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3459 | 			goto fail_data_pages; | 
 | 3460 | 	} | 
 | 3461 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3462 | 	buffer->nr_pages = nr_pages; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3463 |  | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3464 | 	perf_buffer_init(buffer, watermark, flags); | 
 | 3465 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3466 | 	return buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3467 |  | 
 | 3468 | fail_data_pages: | 
 | 3469 | 	for (i--; i >= 0; i--) | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3470 | 		free_page((unsigned long)buffer->data_pages[i]); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3471 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3472 | 	free_page((unsigned long)buffer->user_page); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3473 |  | 
 | 3474 | fail_user_page: | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3475 | 	kfree(buffer); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3476 |  | 
 | 3477 | fail: | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3478 | 	return NULL; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3479 | } | 
 | 3480 |  | 
 | 3481 | static void perf_mmap_free_page(unsigned long addr) | 
 | 3482 | { | 
 | 3483 | 	struct page *page = virt_to_page((void *)addr); | 
 | 3484 |  | 
 | 3485 | 	page->mapping = NULL; | 
 | 3486 | 	__free_page(page); | 
 | 3487 | } | 
 | 3488 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3489 | static void perf_buffer_free(struct perf_buffer *buffer) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3490 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3491 | 	int i; | 
 | 3492 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3493 | 	perf_mmap_free_page((unsigned long)buffer->user_page); | 
 | 3494 | 	for (i = 0; i < buffer->nr_pages; i++) | 
 | 3495 | 		perf_mmap_free_page((unsigned long)buffer->data_pages[i]); | 
 | 3496 | 	kfree(buffer); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3497 | } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3498 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3499 | static inline int page_order(struct perf_buffer *buffer) | 
| Peter Zijlstra | 3cafa9f | 2010-05-20 19:07:56 +0200 | [diff] [blame] | 3500 | { | 
 | 3501 | 	return 0; | 
 | 3502 | } | 
 | 3503 |  | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3504 | #else | 
 | 3505 |  | 
 | 3506 | /* | 
 | 3507 |  * Back perf_mmap() with vmalloc memory. | 
 | 3508 |  * | 
 | 3509 |  * Required for architectures that have d-cache aliasing issues. | 
 | 3510 |  */ | 
 | 3511 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3512 | static inline int page_order(struct perf_buffer *buffer) | 
| Peter Zijlstra | 3cafa9f | 2010-05-20 19:07:56 +0200 | [diff] [blame] | 3513 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3514 | 	return buffer->page_order; | 
| Peter Zijlstra | 3cafa9f | 2010-05-20 19:07:56 +0200 | [diff] [blame] | 3515 | } | 
 | 3516 |  | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3517 | static struct page * | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3518 | perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3519 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3520 | 	if (pgoff > (1UL << page_order(buffer))) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3521 | 		return NULL; | 
 | 3522 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3523 | 	return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3524 | } | 
 | 3525 |  | 
 | 3526 | static void perf_mmap_unmark_page(void *addr) | 
 | 3527 | { | 
 | 3528 | 	struct page *page = vmalloc_to_page(addr); | 
 | 3529 |  | 
 | 3530 | 	page->mapping = NULL; | 
 | 3531 | } | 
 | 3532 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3533 | static void perf_buffer_free_work(struct work_struct *work) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3534 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3535 | 	struct perf_buffer *buffer; | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3536 | 	void *base; | 
 | 3537 | 	int i, nr; | 
 | 3538 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3539 | 	buffer = container_of(work, struct perf_buffer, work); | 
 | 3540 | 	nr = 1 << page_order(buffer); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3541 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3542 | 	base = buffer->user_page; | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3543 | 	for (i = 0; i < nr + 1; i++) | 
 | 3544 | 		perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | 
 | 3545 |  | 
 | 3546 | 	vfree(base); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3547 | 	kfree(buffer); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3548 | } | 
 | 3549 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3550 | static void perf_buffer_free(struct perf_buffer *buffer) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3551 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3552 | 	schedule_work(&buffer->work); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3553 | } | 
 | 3554 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3555 | static struct perf_buffer * | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3556 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3557 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3558 | 	struct perf_buffer *buffer; | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3559 | 	unsigned long size; | 
 | 3560 | 	void *all_buf; | 
 | 3561 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3562 | 	size = sizeof(struct perf_buffer); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3563 | 	size += sizeof(void *); | 
 | 3564 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3565 | 	buffer = kzalloc(size, GFP_KERNEL); | 
 | 3566 | 	if (!buffer) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3567 | 		goto fail; | 
 | 3568 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3569 | 	INIT_WORK(&buffer->work, perf_buffer_free_work); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3570 |  | 
 | 3571 | 	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | 
 | 3572 | 	if (!all_buf) | 
 | 3573 | 		goto fail_all_buf; | 
 | 3574 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3575 | 	buffer->user_page = all_buf; | 
 | 3576 | 	buffer->data_pages[0] = all_buf + PAGE_SIZE; | 
 | 3577 | 	buffer->page_order = ilog2(nr_pages); | 
 | 3578 | 	buffer->nr_pages = 1; | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3579 |  | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3580 | 	perf_buffer_init(buffer, watermark, flags); | 
 | 3581 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3582 | 	return buffer; | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3583 |  | 
 | 3584 | fail_all_buf: | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3585 | 	kfree(buffer); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3586 |  | 
 | 3587 | fail: | 
 | 3588 | 	return NULL; | 
 | 3589 | } | 
 | 3590 |  | 
 | 3591 | #endif | 
 | 3592 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3593 | static unsigned long perf_data_size(struct perf_buffer *buffer) | 
| Peter Zijlstra | 3cafa9f | 2010-05-20 19:07:56 +0200 | [diff] [blame] | 3594 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3595 | 	return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer)); | 
| Peter Zijlstra | 3cafa9f | 2010-05-20 19:07:56 +0200 | [diff] [blame] | 3596 | } | 
 | 3597 |  | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3598 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
 | 3599 | { | 
 | 3600 | 	struct perf_event *event = vma->vm_file->private_data; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3601 | 	struct perf_buffer *buffer; | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3602 | 	int ret = VM_FAULT_SIGBUS; | 
 | 3603 |  | 
 | 3604 | 	if (vmf->flags & FAULT_FLAG_MKWRITE) { | 
 | 3605 | 		if (vmf->pgoff == 0) | 
 | 3606 | 			ret = 0; | 
 | 3607 | 		return ret; | 
 | 3608 | 	} | 
 | 3609 |  | 
 | 3610 | 	rcu_read_lock(); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3611 | 	buffer = rcu_dereference(event->buffer); | 
 | 3612 | 	if (!buffer) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3613 | 		goto unlock; | 
 | 3614 |  | 
 | 3615 | 	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) | 
 | 3616 | 		goto unlock; | 
 | 3617 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3618 | 	vmf->page = perf_mmap_to_page(buffer, vmf->pgoff); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3619 | 	if (!vmf->page) | 
 | 3620 | 		goto unlock; | 
 | 3621 |  | 
 | 3622 | 	get_page(vmf->page); | 
 | 3623 | 	vmf->page->mapping = vma->vm_file->f_mapping; | 
 | 3624 | 	vmf->page->index   = vmf->pgoff; | 
 | 3625 |  | 
 | 3626 | 	ret = 0; | 
 | 3627 | unlock: | 
 | 3628 | 	rcu_read_unlock(); | 
 | 3629 |  | 
 | 3630 | 	return ret; | 
 | 3631 | } | 
 | 3632 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3633 | static void perf_buffer_free_rcu(struct rcu_head *rcu_head) | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3634 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3635 | 	struct perf_buffer *buffer; | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3636 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3637 | 	buffer = container_of(rcu_head, struct perf_buffer, rcu_head); | 
 | 3638 | 	perf_buffer_free(buffer); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3639 | } | 
 | 3640 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3641 | static struct perf_buffer *perf_buffer_get(struct perf_event *event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3642 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3643 | 	struct perf_buffer *buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3644 |  | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3645 | 	rcu_read_lock(); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3646 | 	buffer = rcu_dereference(event->buffer); | 
 | 3647 | 	if (buffer) { | 
 | 3648 | 		if (!atomic_inc_not_zero(&buffer->refcount)) | 
 | 3649 | 			buffer = NULL; | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3650 | 	} | 
 | 3651 | 	rcu_read_unlock(); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3652 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3653 | 	return buffer; | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3654 | } | 
 | 3655 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3656 | static void perf_buffer_put(struct perf_buffer *buffer) | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3657 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3658 | 	if (!atomic_dec_and_test(&buffer->refcount)) | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3659 | 		return; | 
 | 3660 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3661 | 	call_rcu(&buffer->rcu_head, perf_buffer_free_rcu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3662 | } | 
 | 3663 |  | 
 | 3664 | static void perf_mmap_open(struct vm_area_struct *vma) | 
 | 3665 | { | 
 | 3666 | 	struct perf_event *event = vma->vm_file->private_data; | 
 | 3667 |  | 
 | 3668 | 	atomic_inc(&event->mmap_count); | 
 | 3669 | } | 
 | 3670 |  | 
 | 3671 | static void perf_mmap_close(struct vm_area_struct *vma) | 
 | 3672 | { | 
 | 3673 | 	struct perf_event *event = vma->vm_file->private_data; | 
 | 3674 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3675 | 	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3676 | 		unsigned long size = perf_data_size(event->buffer); | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3677 | 		struct user_struct *user = event->mmap_user; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3678 | 		struct perf_buffer *buffer = event->buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3679 |  | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3680 | 		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3681 | 		vma->vm_mm->locked_vm -= event->mmap_locked; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3682 | 		rcu_assign_pointer(event->buffer, NULL); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3683 | 		mutex_unlock(&event->mmap_mutex); | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3684 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3685 | 		perf_buffer_put(buffer); | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3686 | 		free_uid(user); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3687 | 	} | 
 | 3688 | } | 
 | 3689 |  | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 3690 | static const struct vm_operations_struct perf_mmap_vmops = { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3691 | 	.open		= perf_mmap_open, | 
 | 3692 | 	.close		= perf_mmap_close, | 
 | 3693 | 	.fault		= perf_mmap_fault, | 
 | 3694 | 	.page_mkwrite	= perf_mmap_fault, | 
 | 3695 | }; | 
 | 3696 |  | 
 | 3697 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) | 
 | 3698 | { | 
 | 3699 | 	struct perf_event *event = file->private_data; | 
 | 3700 | 	unsigned long user_locked, user_lock_limit; | 
 | 3701 | 	struct user_struct *user = current_user(); | 
 | 3702 | 	unsigned long locked, lock_limit; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3703 | 	struct perf_buffer *buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3704 | 	unsigned long vma_size; | 
 | 3705 | 	unsigned long nr_pages; | 
 | 3706 | 	long user_extra, extra; | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3707 | 	int ret = 0, flags = 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3708 |  | 
| Peter Zijlstra | c792061 | 2010-05-18 10:33:24 +0200 | [diff] [blame] | 3709 | 	/* | 
 | 3710 | 	 * Don't allow mmap() of inherited per-task counters. This would | 
 | 3711 | 	 * create a performance issue due to all children writing to the | 
 | 3712 | 	 * same buffer. | 
 | 3713 | 	 */ | 
 | 3714 | 	if (event->cpu == -1 && event->attr.inherit) | 
 | 3715 | 		return -EINVAL; | 
 | 3716 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3717 | 	if (!(vma->vm_flags & VM_SHARED)) | 
 | 3718 | 		return -EINVAL; | 
 | 3719 |  | 
 | 3720 | 	vma_size = vma->vm_end - vma->vm_start; | 
 | 3721 | 	nr_pages = (vma_size / PAGE_SIZE) - 1; | 
 | 3722 |  | 
 | 3723 | 	/* | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3724 | 	 * If we have buffer pages ensure they're a power-of-two number, so we | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3725 | 	 * can do bitmasks instead of modulo. | 
 | 3726 | 	 */ | 
 | 3727 | 	if (nr_pages != 0 && !is_power_of_2(nr_pages)) | 
 | 3728 | 		return -EINVAL; | 
 | 3729 |  | 
 | 3730 | 	if (vma_size != PAGE_SIZE * (1 + nr_pages)) | 
 | 3731 | 		return -EINVAL; | 
 | 3732 |  | 
 | 3733 | 	if (vma->vm_pgoff != 0) | 
 | 3734 | 		return -EINVAL; | 
 | 3735 |  | 
 | 3736 | 	WARN_ON_ONCE(event->ctx->parent_ctx); | 
 | 3737 | 	mutex_lock(&event->mmap_mutex); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3738 | 	if (event->buffer) { | 
 | 3739 | 		if (event->buffer->nr_pages == nr_pages) | 
 | 3740 | 			atomic_inc(&event->buffer->refcount); | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3741 | 		else | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3742 | 			ret = -EINVAL; | 
 | 3743 | 		goto unlock; | 
 | 3744 | 	} | 
 | 3745 |  | 
 | 3746 | 	user_extra = nr_pages + 1; | 
 | 3747 | 	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); | 
 | 3748 |  | 
 | 3749 | 	/* | 
 | 3750 | 	 * Increase the limit linearly with more CPUs: | 
 | 3751 | 	 */ | 
 | 3752 | 	user_lock_limit *= num_online_cpus(); | 
 | 3753 |  | 
 | 3754 | 	user_locked = atomic_long_read(&user->locked_vm) + user_extra; | 
 | 3755 |  | 
 | 3756 | 	extra = 0; | 
 | 3757 | 	if (user_locked > user_lock_limit) | 
 | 3758 | 		extra = user_locked - user_lock_limit; | 
 | 3759 |  | 
| Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 3760 | 	lock_limit = rlimit(RLIMIT_MEMLOCK); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3761 | 	lock_limit >>= PAGE_SHIFT; | 
 | 3762 | 	locked = vma->vm_mm->locked_vm + extra; | 
 | 3763 |  | 
 | 3764 | 	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && | 
 | 3765 | 		!capable(CAP_IPC_LOCK)) { | 
 | 3766 | 		ret = -EPERM; | 
 | 3767 | 		goto unlock; | 
 | 3768 | 	} | 
 | 3769 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3770 | 	WARN_ON(event->buffer); | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 3771 |  | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3772 | 	if (vma->vm_flags & VM_WRITE) | 
 | 3773 | 		flags |= PERF_BUFFER_WRITABLE; | 
 | 3774 |  | 
 | 3775 | 	buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, | 
 | 3776 | 				   event->cpu, flags); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3777 | 	if (!buffer) { | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3778 | 		ret = -ENOMEM; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3779 | 		goto unlock; | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3780 | 	} | 
| Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 3781 | 	rcu_assign_pointer(event->buffer, buffer); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3782 |  | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3783 | 	atomic_long_add(user_extra, &user->locked_vm); | 
 | 3784 | 	event->mmap_locked = extra; | 
 | 3785 | 	event->mmap_user = get_current_user(); | 
 | 3786 | 	vma->vm_mm->locked_vm += event->mmap_locked; | 
 | 3787 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3788 | unlock: | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 3789 | 	if (!ret) | 
 | 3790 | 		atomic_inc(&event->mmap_count); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3791 | 	mutex_unlock(&event->mmap_mutex); | 
 | 3792 |  | 
 | 3793 | 	vma->vm_flags |= VM_RESERVED; | 
 | 3794 | 	vma->vm_ops = &perf_mmap_vmops; | 
 | 3795 |  | 
 | 3796 | 	return ret; | 
 | 3797 | } | 
 | 3798 |  | 
 | 3799 | static int perf_fasync(int fd, struct file *filp, int on) | 
 | 3800 | { | 
 | 3801 | 	struct inode *inode = filp->f_path.dentry->d_inode; | 
 | 3802 | 	struct perf_event *event = filp->private_data; | 
 | 3803 | 	int retval; | 
 | 3804 |  | 
 | 3805 | 	mutex_lock(&inode->i_mutex); | 
 | 3806 | 	retval = fasync_helper(fd, filp, on, &event->fasync); | 
 | 3807 | 	mutex_unlock(&inode->i_mutex); | 
 | 3808 |  | 
 | 3809 | 	if (retval < 0) | 
 | 3810 | 		return retval; | 
 | 3811 |  | 
 | 3812 | 	return 0; | 
 | 3813 | } | 
 | 3814 |  | 
 | 3815 | static const struct file_operations perf_fops = { | 
| Arnd Bergmann | 3326c1c | 2010-03-23 19:09:33 +0100 | [diff] [blame] | 3816 | 	.llseek			= no_llseek, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3817 | 	.release		= perf_release, | 
 | 3818 | 	.read			= perf_read, | 
 | 3819 | 	.poll			= perf_poll, | 
 | 3820 | 	.unlocked_ioctl		= perf_ioctl, | 
 | 3821 | 	.compat_ioctl		= perf_ioctl, | 
 | 3822 | 	.mmap			= perf_mmap, | 
 | 3823 | 	.fasync			= perf_fasync, | 
 | 3824 | }; | 
 | 3825 |  | 
 | 3826 | /* | 
 | 3827 |  * Perf event wakeup | 
 | 3828 |  * | 
 | 3829 |  * If there's data, ensure we set the poll() state and publish everything | 
 | 3830 |  * to user-space before waking everybody up. | 
 | 3831 |  */ | 
 | 3832 |  | 
 | 3833 | void perf_event_wakeup(struct perf_event *event) | 
 | 3834 | { | 
 | 3835 | 	wake_up_all(&event->waitq); | 
 | 3836 |  | 
 | 3837 | 	if (event->pending_kill) { | 
 | 3838 | 		kill_fasync(&event->fasync, SIGIO, event->pending_kill); | 
 | 3839 | 		event->pending_kill = 0; | 
 | 3840 | 	} | 
 | 3841 | } | 
 | 3842 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 3843 | static void perf_pending_event(struct irq_work *entry) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3844 | { | 
 | 3845 | 	struct perf_event *event = container_of(entry, | 
 | 3846 | 			struct perf_event, pending); | 
 | 3847 |  | 
 | 3848 | 	if (event->pending_disable) { | 
 | 3849 | 		event->pending_disable = 0; | 
 | 3850 | 		__perf_event_disable(event); | 
 | 3851 | 	} | 
 | 3852 |  | 
 | 3853 | 	if (event->pending_wakeup) { | 
 | 3854 | 		event->pending_wakeup = 0; | 
 | 3855 | 		perf_event_wakeup(event); | 
 | 3856 | 	} | 
 | 3857 | } | 
 | 3858 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3859 | /* | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 3860 |  * We assume there is only KVM supporting the callbacks. | 
 | 3861 |  * Later on, we might change it to a list if there is | 
 | 3862 |  * another virtualization implementation supporting the callbacks. | 
 | 3863 |  */ | 
 | 3864 | struct perf_guest_info_callbacks *perf_guest_cbs; | 
 | 3865 |  | 
 | 3866 | int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) | 
 | 3867 | { | 
 | 3868 | 	perf_guest_cbs = cbs; | 
 | 3869 | 	return 0; | 
 | 3870 | } | 
 | 3871 | EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); | 
 | 3872 |  | 
 | 3873 | int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) | 
 | 3874 | { | 
 | 3875 | 	perf_guest_cbs = NULL; | 
 | 3876 | 	return 0; | 
 | 3877 | } | 
 | 3878 | EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); | 
 | 3879 |  | 
 | 3880 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3881 |  * Output | 
 | 3882 |  */ | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3883 | static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3884 | 			      unsigned long offset, unsigned long head) | 
 | 3885 | { | 
 | 3886 | 	unsigned long mask; | 
 | 3887 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3888 | 	if (!buffer->writable) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3889 | 		return true; | 
 | 3890 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3891 | 	mask = perf_data_size(buffer) - 1; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3892 |  | 
 | 3893 | 	offset = (offset - tail) & mask; | 
 | 3894 | 	head   = (head   - tail) & mask; | 
 | 3895 |  | 
 | 3896 | 	if ((int)(head - offset) < 0) | 
 | 3897 | 		return false; | 
 | 3898 |  | 
 | 3899 | 	return true; | 
 | 3900 | } | 
 | 3901 |  | 
 | 3902 | static void perf_output_wakeup(struct perf_output_handle *handle) | 
 | 3903 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3904 | 	atomic_set(&handle->buffer->poll, POLL_IN); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3905 |  | 
 | 3906 | 	if (handle->nmi) { | 
 | 3907 | 		handle->event->pending_wakeup = 1; | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 3908 | 		irq_work_queue(&handle->event->pending); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3909 | 	} else | 
 | 3910 | 		perf_event_wakeup(handle->event); | 
 | 3911 | } | 
 | 3912 |  | 
 | 3913 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3914 |  * We need to ensure a later event_id doesn't publish a head when a former | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3915 |  * event isn't done writing. However since we need to deal with NMIs we | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3916 |  * cannot fully serialize things. | 
 | 3917 |  * | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3918 |  * We only publish the head (and generate a wakeup) when the outer-most | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3919 |  * event completes. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3920 |  */ | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3921 | static void perf_output_get_handle(struct perf_output_handle *handle) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3922 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3923 | 	struct perf_buffer *buffer = handle->buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3924 |  | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3925 | 	preempt_disable(); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3926 | 	local_inc(&buffer->nest); | 
 | 3927 | 	handle->wakeup = local_read(&buffer->wakeup); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3928 | } | 
 | 3929 |  | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3930 | static void perf_output_put_handle(struct perf_output_handle *handle) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3931 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3932 | 	struct perf_buffer *buffer = handle->buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3933 | 	unsigned long head; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3934 |  | 
 | 3935 | again: | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3936 | 	head = local_read(&buffer->head); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3937 |  | 
 | 3938 | 	/* | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3939 | 	 * IRQ/NMI can happen here, which means we can miss a head update. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3940 | 	 */ | 
 | 3941 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3942 | 	if (!local_dec_and_test(&buffer->nest)) | 
| Frederic Weisbecker | acd35a4 | 2010-05-20 21:28:34 +0200 | [diff] [blame] | 3943 | 		goto out; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3944 |  | 
 | 3945 | 	/* | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3946 | 	 * Publish the known good head. Rely on the full barrier implied | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3947 | 	 * by atomic_dec_and_test() order the buffer->head read and this | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3948 | 	 * write. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3949 | 	 */ | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3950 | 	buffer->user_page->data_head = head; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3951 |  | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3952 | 	/* | 
 | 3953 | 	 * Now check if we missed an update, rely on the (compiler) | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3954 | 	 * barrier in atomic_dec_and_test() to re-read buffer->head. | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3955 | 	 */ | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3956 | 	if (unlikely(head != local_read(&buffer->head))) { | 
 | 3957 | 		local_inc(&buffer->nest); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3958 | 		goto again; | 
 | 3959 | 	} | 
 | 3960 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3961 | 	if (handle->wakeup != local_read(&buffer->wakeup)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3962 | 		perf_output_wakeup(handle); | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3963 |  | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 3964 | out: | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 3965 | 	preempt_enable(); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3966 | } | 
 | 3967 |  | 
| Peter Zijlstra | a94ffaa | 2010-05-20 19:50:07 +0200 | [diff] [blame] | 3968 | __always_inline void perf_output_copy(struct perf_output_handle *handle, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3969 | 		      const void *buf, unsigned int len) | 
 | 3970 | { | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 3971 | 	do { | 
| Peter Zijlstra | a94ffaa | 2010-05-20 19:50:07 +0200 | [diff] [blame] | 3972 | 		unsigned long size = min_t(unsigned long, handle->size, len); | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 3973 |  | 
 | 3974 | 		memcpy(handle->addr, buf, size); | 
 | 3975 |  | 
 | 3976 | 		len -= size; | 
 | 3977 | 		handle->addr += size; | 
| Frederic Weisbecker | 74048f8 | 2010-05-27 21:34:58 +0200 | [diff] [blame] | 3978 | 		buf += size; | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 3979 | 		handle->size -= size; | 
 | 3980 | 		if (!handle->size) { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3981 | 			struct perf_buffer *buffer = handle->buffer; | 
| Peter Zijlstra | 3cafa9f | 2010-05-20 19:07:56 +0200 | [diff] [blame] | 3982 |  | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 3983 | 			handle->page++; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 3984 | 			handle->page &= buffer->nr_pages - 1; | 
 | 3985 | 			handle->addr = buffer->data_pages[handle->page]; | 
 | 3986 | 			handle->size = PAGE_SIZE << page_order(buffer); | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 3987 | 		} | 
 | 3988 | 	} while (len); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3989 | } | 
 | 3990 |  | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 3991 | static void __perf_event_header__init_id(struct perf_event_header *header, | 
 | 3992 | 					 struct perf_sample_data *data, | 
 | 3993 | 					 struct perf_event *event) | 
| Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 3994 | { | 
 | 3995 | 	u64 sample_type = event->attr.sample_type; | 
 | 3996 |  | 
 | 3997 | 	data->type = sample_type; | 
 | 3998 | 	header->size += event->id_header_size; | 
 | 3999 |  | 
 | 4000 | 	if (sample_type & PERF_SAMPLE_TID) { | 
 | 4001 | 		/* namespace issues */ | 
 | 4002 | 		data->tid_entry.pid = perf_event_pid(event, current); | 
 | 4003 | 		data->tid_entry.tid = perf_event_tid(event, current); | 
 | 4004 | 	} | 
 | 4005 |  | 
 | 4006 | 	if (sample_type & PERF_SAMPLE_TIME) | 
 | 4007 | 		data->time = perf_clock(); | 
 | 4008 |  | 
 | 4009 | 	if (sample_type & PERF_SAMPLE_ID) | 
 | 4010 | 		data->id = primary_event_id(event); | 
 | 4011 |  | 
 | 4012 | 	if (sample_type & PERF_SAMPLE_STREAM_ID) | 
 | 4013 | 		data->stream_id = event->id; | 
 | 4014 |  | 
 | 4015 | 	if (sample_type & PERF_SAMPLE_CPU) { | 
 | 4016 | 		data->cpu_entry.cpu	 = raw_smp_processor_id(); | 
 | 4017 | 		data->cpu_entry.reserved = 0; | 
 | 4018 | 	} | 
 | 4019 | } | 
 | 4020 |  | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4021 | static void perf_event_header__init_id(struct perf_event_header *header, | 
 | 4022 | 				       struct perf_sample_data *data, | 
 | 4023 | 				       struct perf_event *event) | 
 | 4024 | { | 
 | 4025 | 	if (event->attr.sample_id_all) | 
 | 4026 | 		__perf_event_header__init_id(header, data, event); | 
 | 4027 | } | 
 | 4028 |  | 
 | 4029 | static void __perf_event__output_id_sample(struct perf_output_handle *handle, | 
 | 4030 | 					   struct perf_sample_data *data) | 
 | 4031 | { | 
 | 4032 | 	u64 sample_type = data->type; | 
 | 4033 |  | 
 | 4034 | 	if (sample_type & PERF_SAMPLE_TID) | 
 | 4035 | 		perf_output_put(handle, data->tid_entry); | 
 | 4036 |  | 
 | 4037 | 	if (sample_type & PERF_SAMPLE_TIME) | 
 | 4038 | 		perf_output_put(handle, data->time); | 
 | 4039 |  | 
 | 4040 | 	if (sample_type & PERF_SAMPLE_ID) | 
 | 4041 | 		perf_output_put(handle, data->id); | 
 | 4042 |  | 
 | 4043 | 	if (sample_type & PERF_SAMPLE_STREAM_ID) | 
 | 4044 | 		perf_output_put(handle, data->stream_id); | 
 | 4045 |  | 
 | 4046 | 	if (sample_type & PERF_SAMPLE_CPU) | 
 | 4047 | 		perf_output_put(handle, data->cpu_entry); | 
 | 4048 | } | 
 | 4049 |  | 
 | 4050 | static void perf_event__output_id_sample(struct perf_event *event, | 
 | 4051 | 					 struct perf_output_handle *handle, | 
 | 4052 | 					 struct perf_sample_data *sample) | 
 | 4053 | { | 
 | 4054 | 	if (event->attr.sample_id_all) | 
 | 4055 | 		__perf_event__output_id_sample(handle, sample); | 
 | 4056 | } | 
 | 4057 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4058 | int perf_output_begin(struct perf_output_handle *handle, | 
 | 4059 | 		      struct perf_event *event, unsigned int size, | 
 | 4060 | 		      int nmi, int sample) | 
 | 4061 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4062 | 	struct perf_buffer *buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4063 | 	unsigned long tail, offset, head; | 
 | 4064 | 	int have_lost; | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4065 | 	struct perf_sample_data sample_data; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4066 | 	struct { | 
 | 4067 | 		struct perf_event_header header; | 
 | 4068 | 		u64			 id; | 
 | 4069 | 		u64			 lost; | 
 | 4070 | 	} lost_event; | 
 | 4071 |  | 
 | 4072 | 	rcu_read_lock(); | 
 | 4073 | 	/* | 
 | 4074 | 	 * For inherited events we send all the output towards the parent. | 
 | 4075 | 	 */ | 
 | 4076 | 	if (event->parent) | 
 | 4077 | 		event = event->parent; | 
 | 4078 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4079 | 	buffer = rcu_dereference(event->buffer); | 
 | 4080 | 	if (!buffer) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4081 | 		goto out; | 
 | 4082 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4083 | 	handle->buffer	= buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4084 | 	handle->event	= event; | 
 | 4085 | 	handle->nmi	= nmi; | 
 | 4086 | 	handle->sample	= sample; | 
 | 4087 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4088 | 	if (!buffer->nr_pages) | 
| Stephane Eranian | 00d1d0b | 2010-05-17 12:46:01 +0200 | [diff] [blame] | 4089 | 		goto out; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4090 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4091 | 	have_lost = local_read(&buffer->lost); | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4092 | 	if (have_lost) { | 
 | 4093 | 		lost_event.header.size = sizeof(lost_event); | 
 | 4094 | 		perf_event_header__init_id(&lost_event.header, &sample_data, | 
 | 4095 | 					   event); | 
 | 4096 | 		size += lost_event.header.size; | 
 | 4097 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4098 |  | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 4099 | 	perf_output_get_handle(handle); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4100 |  | 
 | 4101 | 	do { | 
 | 4102 | 		/* | 
 | 4103 | 		 * Userspace could choose to issue a mb() before updating the | 
 | 4104 | 		 * tail pointer. So that all reads will be completed before the | 
 | 4105 | 		 * write is issued. | 
 | 4106 | 		 */ | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4107 | 		tail = ACCESS_ONCE(buffer->user_page->data_tail); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4108 | 		smp_rmb(); | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4109 | 		offset = head = local_read(&buffer->head); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4110 | 		head += size; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4111 | 		if (unlikely(!perf_output_space(buffer, tail, offset, head))) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4112 | 			goto fail; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4113 | 	} while (local_cmpxchg(&buffer->head, offset, head) != offset); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4114 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4115 | 	if (head - local_read(&buffer->wakeup) > buffer->watermark) | 
 | 4116 | 		local_add(buffer->watermark, &buffer->wakeup); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4117 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4118 | 	handle->page = offset >> (PAGE_SHIFT + page_order(buffer)); | 
 | 4119 | 	handle->page &= buffer->nr_pages - 1; | 
 | 4120 | 	handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1); | 
 | 4121 | 	handle->addr = buffer->data_pages[handle->page]; | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 4122 | 	handle->addr += handle->size; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4123 | 	handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size; | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 4124 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4125 | 	if (have_lost) { | 
 | 4126 | 		lost_event.header.type = PERF_RECORD_LOST; | 
 | 4127 | 		lost_event.header.misc = 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4128 | 		lost_event.id          = event->id; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4129 | 		lost_event.lost        = local_xchg(&buffer->lost, 0); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4130 |  | 
 | 4131 | 		perf_output_put(handle, lost_event); | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4132 | 		perf_event__output_id_sample(event, handle, &sample_data); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4133 | 	} | 
 | 4134 |  | 
 | 4135 | 	return 0; | 
 | 4136 |  | 
 | 4137 | fail: | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4138 | 	local_inc(&buffer->lost); | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 4139 | 	perf_output_put_handle(handle); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4140 | out: | 
 | 4141 | 	rcu_read_unlock(); | 
 | 4142 |  | 
 | 4143 | 	return -ENOSPC; | 
 | 4144 | } | 
 | 4145 |  | 
 | 4146 | void perf_output_end(struct perf_output_handle *handle) | 
 | 4147 | { | 
 | 4148 | 	struct perf_event *event = handle->event; | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4149 | 	struct perf_buffer *buffer = handle->buffer; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4150 |  | 
 | 4151 | 	int wakeup_events = event->attr.wakeup_events; | 
 | 4152 |  | 
 | 4153 | 	if (handle->sample && wakeup_events) { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4154 | 		int events = local_inc_return(&buffer->events); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4155 | 		if (events >= wakeup_events) { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 4156 | 			local_sub(wakeup_events, &buffer->events); | 
 | 4157 | 			local_inc(&buffer->wakeup); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4158 | 		} | 
 | 4159 | 	} | 
 | 4160 |  | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 4161 | 	perf_output_put_handle(handle); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4162 | 	rcu_read_unlock(); | 
 | 4163 | } | 
 | 4164 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4165 | static void perf_output_read_one(struct perf_output_handle *handle, | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4166 | 				 struct perf_event *event, | 
 | 4167 | 				 u64 enabled, u64 running) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4168 | { | 
 | 4169 | 	u64 read_format = event->attr.read_format; | 
 | 4170 | 	u64 values[4]; | 
 | 4171 | 	int n = 0; | 
 | 4172 |  | 
| Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 4173 | 	values[n++] = perf_event_count(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4174 | 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4175 | 		values[n++] = enabled + | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4176 | 			atomic64_read(&event->child_total_time_enabled); | 
 | 4177 | 	} | 
 | 4178 | 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4179 | 		values[n++] = running + | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4180 | 			atomic64_read(&event->child_total_time_running); | 
 | 4181 | 	} | 
 | 4182 | 	if (read_format & PERF_FORMAT_ID) | 
 | 4183 | 		values[n++] = primary_event_id(event); | 
 | 4184 |  | 
 | 4185 | 	perf_output_copy(handle, values, n * sizeof(u64)); | 
 | 4186 | } | 
 | 4187 |  | 
 | 4188 | /* | 
 | 4189 |  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. | 
 | 4190 |  */ | 
 | 4191 | static void perf_output_read_group(struct perf_output_handle *handle, | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4192 | 			    struct perf_event *event, | 
 | 4193 | 			    u64 enabled, u64 running) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4194 | { | 
 | 4195 | 	struct perf_event *leader = event->group_leader, *sub; | 
 | 4196 | 	u64 read_format = event->attr.read_format; | 
 | 4197 | 	u64 values[5]; | 
 | 4198 | 	int n = 0; | 
 | 4199 |  | 
 | 4200 | 	values[n++] = 1 + leader->nr_siblings; | 
 | 4201 |  | 
 | 4202 | 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4203 | 		values[n++] = enabled; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4204 |  | 
 | 4205 | 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4206 | 		values[n++] = running; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4207 |  | 
 | 4208 | 	if (leader != event) | 
 | 4209 | 		leader->pmu->read(leader); | 
 | 4210 |  | 
| Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 4211 | 	values[n++] = perf_event_count(leader); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4212 | 	if (read_format & PERF_FORMAT_ID) | 
 | 4213 | 		values[n++] = primary_event_id(leader); | 
 | 4214 |  | 
 | 4215 | 	perf_output_copy(handle, values, n * sizeof(u64)); | 
 | 4216 |  | 
 | 4217 | 	list_for_each_entry(sub, &leader->sibling_list, group_entry) { | 
 | 4218 | 		n = 0; | 
 | 4219 |  | 
 | 4220 | 		if (sub != event) | 
 | 4221 | 			sub->pmu->read(sub); | 
 | 4222 |  | 
| Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 4223 | 		values[n++] = perf_event_count(sub); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4224 | 		if (read_format & PERF_FORMAT_ID) | 
 | 4225 | 			values[n++] = primary_event_id(sub); | 
 | 4226 |  | 
 | 4227 | 		perf_output_copy(handle, values, n * sizeof(u64)); | 
 | 4228 | 	} | 
 | 4229 | } | 
 | 4230 |  | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4231 | #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ | 
 | 4232 | 				 PERF_FORMAT_TOTAL_TIME_RUNNING) | 
 | 4233 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4234 | static void perf_output_read(struct perf_output_handle *handle, | 
 | 4235 | 			     struct perf_event *event) | 
 | 4236 | { | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4237 | 	u64 enabled = 0, running = 0, now, ctx_time; | 
 | 4238 | 	u64 read_format = event->attr.read_format; | 
 | 4239 |  | 
 | 4240 | 	/* | 
 | 4241 | 	 * compute total_time_enabled, total_time_running | 
 | 4242 | 	 * based on snapshot values taken when the event | 
 | 4243 | 	 * was last scheduled in. | 
 | 4244 | 	 * | 
 | 4245 | 	 * we cannot simply called update_context_time() | 
 | 4246 | 	 * because of locking issue as we are called in | 
 | 4247 | 	 * NMI context | 
 | 4248 | 	 */ | 
 | 4249 | 	if (read_format & PERF_FORMAT_TOTAL_TIMES) { | 
 | 4250 | 		now = perf_clock(); | 
 | 4251 | 		ctx_time = event->shadow_ctx_time + now; | 
 | 4252 | 		enabled = ctx_time - event->tstamp_enabled; | 
 | 4253 | 		running = ctx_time - event->tstamp_running; | 
 | 4254 | 	} | 
 | 4255 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4256 | 	if (event->attr.read_format & PERF_FORMAT_GROUP) | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4257 | 		perf_output_read_group(handle, event, enabled, running); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4258 | 	else | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 4259 | 		perf_output_read_one(handle, event, enabled, running); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4260 | } | 
 | 4261 |  | 
 | 4262 | void perf_output_sample(struct perf_output_handle *handle, | 
 | 4263 | 			struct perf_event_header *header, | 
 | 4264 | 			struct perf_sample_data *data, | 
 | 4265 | 			struct perf_event *event) | 
 | 4266 | { | 
 | 4267 | 	u64 sample_type = data->type; | 
 | 4268 |  | 
 | 4269 | 	perf_output_put(handle, *header); | 
 | 4270 |  | 
 | 4271 | 	if (sample_type & PERF_SAMPLE_IP) | 
 | 4272 | 		perf_output_put(handle, data->ip); | 
 | 4273 |  | 
 | 4274 | 	if (sample_type & PERF_SAMPLE_TID) | 
 | 4275 | 		perf_output_put(handle, data->tid_entry); | 
 | 4276 |  | 
 | 4277 | 	if (sample_type & PERF_SAMPLE_TIME) | 
 | 4278 | 		perf_output_put(handle, data->time); | 
 | 4279 |  | 
 | 4280 | 	if (sample_type & PERF_SAMPLE_ADDR) | 
 | 4281 | 		perf_output_put(handle, data->addr); | 
 | 4282 |  | 
 | 4283 | 	if (sample_type & PERF_SAMPLE_ID) | 
 | 4284 | 		perf_output_put(handle, data->id); | 
 | 4285 |  | 
 | 4286 | 	if (sample_type & PERF_SAMPLE_STREAM_ID) | 
 | 4287 | 		perf_output_put(handle, data->stream_id); | 
 | 4288 |  | 
 | 4289 | 	if (sample_type & PERF_SAMPLE_CPU) | 
 | 4290 | 		perf_output_put(handle, data->cpu_entry); | 
 | 4291 |  | 
 | 4292 | 	if (sample_type & PERF_SAMPLE_PERIOD) | 
 | 4293 | 		perf_output_put(handle, data->period); | 
 | 4294 |  | 
 | 4295 | 	if (sample_type & PERF_SAMPLE_READ) | 
 | 4296 | 		perf_output_read(handle, event); | 
 | 4297 |  | 
 | 4298 | 	if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 
 | 4299 | 		if (data->callchain) { | 
 | 4300 | 			int size = 1; | 
 | 4301 |  | 
 | 4302 | 			if (data->callchain) | 
 | 4303 | 				size += data->callchain->nr; | 
 | 4304 |  | 
 | 4305 | 			size *= sizeof(u64); | 
 | 4306 |  | 
 | 4307 | 			perf_output_copy(handle, data->callchain, size); | 
 | 4308 | 		} else { | 
 | 4309 | 			u64 nr = 0; | 
 | 4310 | 			perf_output_put(handle, nr); | 
 | 4311 | 		} | 
 | 4312 | 	} | 
 | 4313 |  | 
 | 4314 | 	if (sample_type & PERF_SAMPLE_RAW) { | 
 | 4315 | 		if (data->raw) { | 
 | 4316 | 			perf_output_put(handle, data->raw->size); | 
 | 4317 | 			perf_output_copy(handle, data->raw->data, | 
 | 4318 | 					 data->raw->size); | 
 | 4319 | 		} else { | 
 | 4320 | 			struct { | 
 | 4321 | 				u32	size; | 
 | 4322 | 				u32	data; | 
 | 4323 | 			} raw = { | 
 | 4324 | 				.size = sizeof(u32), | 
 | 4325 | 				.data = 0, | 
 | 4326 | 			}; | 
 | 4327 | 			perf_output_put(handle, raw); | 
 | 4328 | 		} | 
 | 4329 | 	} | 
 | 4330 | } | 
 | 4331 |  | 
 | 4332 | void perf_prepare_sample(struct perf_event_header *header, | 
 | 4333 | 			 struct perf_sample_data *data, | 
 | 4334 | 			 struct perf_event *event, | 
 | 4335 | 			 struct pt_regs *regs) | 
 | 4336 | { | 
 | 4337 | 	u64 sample_type = event->attr.sample_type; | 
 | 4338 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4339 | 	header->type = PERF_RECORD_SAMPLE; | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 4340 | 	header->size = sizeof(*header) + event->header_size; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4341 |  | 
 | 4342 | 	header->misc = 0; | 
 | 4343 | 	header->misc |= perf_misc_flags(regs); | 
 | 4344 |  | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4345 | 	__perf_event_header__init_id(header, data, event); | 
| Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 4346 |  | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 4347 | 	if (sample_type & PERF_SAMPLE_IP) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4348 | 		data->ip = perf_instruction_pointer(regs); | 
 | 4349 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4350 | 	if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 
 | 4351 | 		int size = 1; | 
 | 4352 |  | 
 | 4353 | 		data->callchain = perf_callchain(regs); | 
 | 4354 |  | 
 | 4355 | 		if (data->callchain) | 
 | 4356 | 			size += data->callchain->nr; | 
 | 4357 |  | 
 | 4358 | 		header->size += size * sizeof(u64); | 
 | 4359 | 	} | 
 | 4360 |  | 
 | 4361 | 	if (sample_type & PERF_SAMPLE_RAW) { | 
 | 4362 | 		int size = sizeof(u32); | 
 | 4363 |  | 
 | 4364 | 		if (data->raw) | 
 | 4365 | 			size += data->raw->size; | 
 | 4366 | 		else | 
 | 4367 | 			size += sizeof(u32); | 
 | 4368 |  | 
 | 4369 | 		WARN_ON_ONCE(size & (sizeof(u64)-1)); | 
 | 4370 | 		header->size += size; | 
 | 4371 | 	} | 
 | 4372 | } | 
 | 4373 |  | 
 | 4374 | static void perf_event_output(struct perf_event *event, int nmi, | 
 | 4375 | 				struct perf_sample_data *data, | 
 | 4376 | 				struct pt_regs *regs) | 
 | 4377 | { | 
 | 4378 | 	struct perf_output_handle handle; | 
 | 4379 | 	struct perf_event_header header; | 
 | 4380 |  | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 4381 | 	/* protect the callchain buffers */ | 
 | 4382 | 	rcu_read_lock(); | 
 | 4383 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4384 | 	perf_prepare_sample(&header, data, event, regs); | 
 | 4385 |  | 
 | 4386 | 	if (perf_output_begin(&handle, event, header.size, nmi, 1)) | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 4387 | 		goto exit; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4388 |  | 
 | 4389 | 	perf_output_sample(&handle, &header, data, event); | 
 | 4390 |  | 
 | 4391 | 	perf_output_end(&handle); | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 4392 |  | 
 | 4393 | exit: | 
 | 4394 | 	rcu_read_unlock(); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4395 | } | 
 | 4396 |  | 
 | 4397 | /* | 
 | 4398 |  * read event_id | 
 | 4399 |  */ | 
 | 4400 |  | 
 | 4401 | struct perf_read_event { | 
 | 4402 | 	struct perf_event_header	header; | 
 | 4403 |  | 
 | 4404 | 	u32				pid; | 
 | 4405 | 	u32				tid; | 
 | 4406 | }; | 
 | 4407 |  | 
 | 4408 | static void | 
 | 4409 | perf_event_read_event(struct perf_event *event, | 
 | 4410 | 			struct task_struct *task) | 
 | 4411 | { | 
 | 4412 | 	struct perf_output_handle handle; | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4413 | 	struct perf_sample_data sample; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4414 | 	struct perf_read_event read_event = { | 
 | 4415 | 		.header = { | 
 | 4416 | 			.type = PERF_RECORD_READ, | 
 | 4417 | 			.misc = 0, | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 4418 | 			.size = sizeof(read_event) + event->read_size, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4419 | 		}, | 
 | 4420 | 		.pid = perf_event_pid(event, task), | 
 | 4421 | 		.tid = perf_event_tid(event, task), | 
 | 4422 | 	}; | 
 | 4423 | 	int ret; | 
 | 4424 |  | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4425 | 	perf_event_header__init_id(&read_event.header, &sample, event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4426 | 	ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); | 
 | 4427 | 	if (ret) | 
 | 4428 | 		return; | 
 | 4429 |  | 
 | 4430 | 	perf_output_put(&handle, read_event); | 
 | 4431 | 	perf_output_read(&handle, event); | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4432 | 	perf_event__output_id_sample(event, &handle, &sample); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4433 |  | 
 | 4434 | 	perf_output_end(&handle); | 
 | 4435 | } | 
 | 4436 |  | 
 | 4437 | /* | 
 | 4438 |  * task tracking -- fork/exit | 
 | 4439 |  * | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 4440 |  * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4441 |  */ | 
 | 4442 |  | 
 | 4443 | struct perf_task_event { | 
 | 4444 | 	struct task_struct		*task; | 
 | 4445 | 	struct perf_event_context	*task_ctx; | 
 | 4446 |  | 
 | 4447 | 	struct { | 
 | 4448 | 		struct perf_event_header	header; | 
 | 4449 |  | 
 | 4450 | 		u32				pid; | 
 | 4451 | 		u32				ppid; | 
 | 4452 | 		u32				tid; | 
 | 4453 | 		u32				ptid; | 
 | 4454 | 		u64				time; | 
 | 4455 | 	} event_id; | 
 | 4456 | }; | 
 | 4457 |  | 
 | 4458 | static void perf_event_task_output(struct perf_event *event, | 
 | 4459 | 				     struct perf_task_event *task_event) | 
 | 4460 | { | 
 | 4461 | 	struct perf_output_handle handle; | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4462 | 	struct perf_sample_data	sample; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4463 | 	struct task_struct *task = task_event->task; | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4464 | 	int ret, size = task_event->event_id.header.size; | 
| Mike Galbraith | 8bb39f9 | 2010-03-26 11:11:33 +0100 | [diff] [blame] | 4465 |  | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4466 | 	perf_event_header__init_id(&task_event->event_id.header, &sample, event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4467 |  | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4468 | 	ret = perf_output_begin(&handle, event, | 
 | 4469 | 				task_event->event_id.header.size, 0, 0); | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 4470 | 	if (ret) | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4471 | 		goto out; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4472 |  | 
 | 4473 | 	task_event->event_id.pid = perf_event_pid(event, task); | 
 | 4474 | 	task_event->event_id.ppid = perf_event_pid(event, current); | 
 | 4475 |  | 
 | 4476 | 	task_event->event_id.tid = perf_event_tid(event, task); | 
 | 4477 | 	task_event->event_id.ptid = perf_event_tid(event, current); | 
 | 4478 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4479 | 	perf_output_put(&handle, task_event->event_id); | 
 | 4480 |  | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4481 | 	perf_event__output_id_sample(event, &handle, &sample); | 
 | 4482 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4483 | 	perf_output_end(&handle); | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4484 | out: | 
 | 4485 | 	task_event->event_id.header.size = size; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4486 | } | 
 | 4487 |  | 
 | 4488 | static int perf_event_task_match(struct perf_event *event) | 
 | 4489 | { | 
| Peter Zijlstra | 6f93d0a | 2010-02-14 11:12:04 +0100 | [diff] [blame] | 4490 | 	if (event->state < PERF_EVENT_STATE_INACTIVE) | 
| Peter Zijlstra | 22e1908 | 2010-01-18 09:12:32 +0100 | [diff] [blame] | 4491 | 		return 0; | 
 | 4492 |  | 
| Stephane Eranian | 5632ab1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 4493 | 	if (!event_filter_match(event)) | 
| Peter Zijlstra | 5d27c23 | 2009-12-17 13:16:32 +0100 | [diff] [blame] | 4494 | 		return 0; | 
 | 4495 |  | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 4496 | 	if (event->attr.comm || event->attr.mmap || | 
 | 4497 | 	    event->attr.mmap_data || event->attr.task) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4498 | 		return 1; | 
 | 4499 |  | 
 | 4500 | 	return 0; | 
 | 4501 | } | 
 | 4502 |  | 
 | 4503 | static void perf_event_task_ctx(struct perf_event_context *ctx, | 
 | 4504 | 				  struct perf_task_event *task_event) | 
 | 4505 | { | 
 | 4506 | 	struct perf_event *event; | 
 | 4507 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4508 | 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 
 | 4509 | 		if (perf_event_task_match(event)) | 
 | 4510 | 			perf_event_task_output(event, task_event); | 
 | 4511 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4512 | } | 
 | 4513 |  | 
 | 4514 | static void perf_event_task_event(struct perf_task_event *task_event) | 
 | 4515 | { | 
 | 4516 | 	struct perf_cpu_context *cpuctx; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4517 | 	struct perf_event_context *ctx; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4518 | 	struct pmu *pmu; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4519 | 	int ctxn; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4520 |  | 
| Peter Zijlstra | d6ff86c | 2009-11-20 22:19:46 +0100 | [diff] [blame] | 4521 | 	rcu_read_lock(); | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4522 | 	list_for_each_entry_rcu(pmu, &pmus, entry) { | 
| Peter Zijlstra | 41945f6 | 2010-09-16 19:17:24 +0200 | [diff] [blame] | 4523 | 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 4524 | 		if (cpuctx->active_pmu != pmu) | 
 | 4525 | 			goto next; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4526 | 		perf_event_task_ctx(&cpuctx->ctx, task_event); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4527 |  | 
 | 4528 | 		ctx = task_event->task_ctx; | 
 | 4529 | 		if (!ctx) { | 
 | 4530 | 			ctxn = pmu->task_ctx_nr; | 
 | 4531 | 			if (ctxn < 0) | 
| Peter Zijlstra | 41945f6 | 2010-09-16 19:17:24 +0200 | [diff] [blame] | 4532 | 				goto next; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4533 | 			ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 
 | 4534 | 		} | 
 | 4535 | 		if (ctx) | 
 | 4536 | 			perf_event_task_ctx(ctx, task_event); | 
| Peter Zijlstra | 41945f6 | 2010-09-16 19:17:24 +0200 | [diff] [blame] | 4537 | next: | 
 | 4538 | 		put_cpu_ptr(pmu->pmu_cpu_context); | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4539 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4540 | 	rcu_read_unlock(); | 
 | 4541 | } | 
 | 4542 |  | 
 | 4543 | static void perf_event_task(struct task_struct *task, | 
 | 4544 | 			      struct perf_event_context *task_ctx, | 
 | 4545 | 			      int new) | 
 | 4546 | { | 
 | 4547 | 	struct perf_task_event task_event; | 
 | 4548 |  | 
 | 4549 | 	if (!atomic_read(&nr_comm_events) && | 
 | 4550 | 	    !atomic_read(&nr_mmap_events) && | 
 | 4551 | 	    !atomic_read(&nr_task_events)) | 
 | 4552 | 		return; | 
 | 4553 |  | 
 | 4554 | 	task_event = (struct perf_task_event){ | 
 | 4555 | 		.task	  = task, | 
 | 4556 | 		.task_ctx = task_ctx, | 
 | 4557 | 		.event_id    = { | 
 | 4558 | 			.header = { | 
 | 4559 | 				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, | 
 | 4560 | 				.misc = 0, | 
 | 4561 | 				.size = sizeof(task_event.event_id), | 
 | 4562 | 			}, | 
 | 4563 | 			/* .pid  */ | 
 | 4564 | 			/* .ppid */ | 
 | 4565 | 			/* .tid  */ | 
 | 4566 | 			/* .ptid */ | 
| Peter Zijlstra | 6f93d0a | 2010-02-14 11:12:04 +0100 | [diff] [blame] | 4567 | 			.time = perf_clock(), | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4568 | 		}, | 
 | 4569 | 	}; | 
 | 4570 |  | 
 | 4571 | 	perf_event_task_event(&task_event); | 
 | 4572 | } | 
 | 4573 |  | 
 | 4574 | void perf_event_fork(struct task_struct *task) | 
 | 4575 | { | 
 | 4576 | 	perf_event_task(task, NULL, 1); | 
 | 4577 | } | 
 | 4578 |  | 
 | 4579 | /* | 
 | 4580 |  * comm tracking | 
 | 4581 |  */ | 
 | 4582 |  | 
 | 4583 | struct perf_comm_event { | 
 | 4584 | 	struct task_struct	*task; | 
 | 4585 | 	char			*comm; | 
 | 4586 | 	int			comm_size; | 
 | 4587 |  | 
 | 4588 | 	struct { | 
 | 4589 | 		struct perf_event_header	header; | 
 | 4590 |  | 
 | 4591 | 		u32				pid; | 
 | 4592 | 		u32				tid; | 
 | 4593 | 	} event_id; | 
 | 4594 | }; | 
 | 4595 |  | 
 | 4596 | static void perf_event_comm_output(struct perf_event *event, | 
 | 4597 | 				     struct perf_comm_event *comm_event) | 
 | 4598 | { | 
 | 4599 | 	struct perf_output_handle handle; | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4600 | 	struct perf_sample_data sample; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4601 | 	int size = comm_event->event_id.header.size; | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4602 | 	int ret; | 
 | 4603 |  | 
 | 4604 | 	perf_event_header__init_id(&comm_event->event_id.header, &sample, event); | 
 | 4605 | 	ret = perf_output_begin(&handle, event, | 
 | 4606 | 				comm_event->event_id.header.size, 0, 0); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4607 |  | 
 | 4608 | 	if (ret) | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4609 | 		goto out; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4610 |  | 
 | 4611 | 	comm_event->event_id.pid = perf_event_pid(event, comm_event->task); | 
 | 4612 | 	comm_event->event_id.tid = perf_event_tid(event, comm_event->task); | 
 | 4613 |  | 
 | 4614 | 	perf_output_put(&handle, comm_event->event_id); | 
 | 4615 | 	perf_output_copy(&handle, comm_event->comm, | 
 | 4616 | 				   comm_event->comm_size); | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4617 |  | 
 | 4618 | 	perf_event__output_id_sample(event, &handle, &sample); | 
 | 4619 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4620 | 	perf_output_end(&handle); | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4621 | out: | 
 | 4622 | 	comm_event->event_id.header.size = size; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4623 | } | 
 | 4624 |  | 
 | 4625 | static int perf_event_comm_match(struct perf_event *event) | 
 | 4626 | { | 
| Peter Zijlstra | 6f93d0a | 2010-02-14 11:12:04 +0100 | [diff] [blame] | 4627 | 	if (event->state < PERF_EVENT_STATE_INACTIVE) | 
| Peter Zijlstra | 22e1908 | 2010-01-18 09:12:32 +0100 | [diff] [blame] | 4628 | 		return 0; | 
 | 4629 |  | 
| Stephane Eranian | 5632ab1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 4630 | 	if (!event_filter_match(event)) | 
| Peter Zijlstra | 5d27c23 | 2009-12-17 13:16:32 +0100 | [diff] [blame] | 4631 | 		return 0; | 
 | 4632 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4633 | 	if (event->attr.comm) | 
 | 4634 | 		return 1; | 
 | 4635 |  | 
 | 4636 | 	return 0; | 
 | 4637 | } | 
 | 4638 |  | 
 | 4639 | static void perf_event_comm_ctx(struct perf_event_context *ctx, | 
 | 4640 | 				  struct perf_comm_event *comm_event) | 
 | 4641 | { | 
 | 4642 | 	struct perf_event *event; | 
 | 4643 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4644 | 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 
 | 4645 | 		if (perf_event_comm_match(event)) | 
 | 4646 | 			perf_event_comm_output(event, comm_event); | 
 | 4647 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4648 | } | 
 | 4649 |  | 
 | 4650 | static void perf_event_comm_event(struct perf_comm_event *comm_event) | 
 | 4651 | { | 
 | 4652 | 	struct perf_cpu_context *cpuctx; | 
 | 4653 | 	struct perf_event_context *ctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4654 | 	char comm[TASK_COMM_LEN]; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4655 | 	unsigned int size; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4656 | 	struct pmu *pmu; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4657 | 	int ctxn; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4658 |  | 
 | 4659 | 	memset(comm, 0, sizeof(comm)); | 
| Márton Németh | 96b02d7 | 2009-11-21 23:10:15 +0100 | [diff] [blame] | 4660 | 	strlcpy(comm, comm_event->task->comm, sizeof(comm)); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4661 | 	size = ALIGN(strlen(comm)+1, sizeof(u64)); | 
 | 4662 |  | 
 | 4663 | 	comm_event->comm = comm; | 
 | 4664 | 	comm_event->comm_size = size; | 
 | 4665 |  | 
 | 4666 | 	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | 
| Peter Zijlstra | f6595f3 | 2009-11-20 22:19:47 +0100 | [diff] [blame] | 4667 | 	rcu_read_lock(); | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4668 | 	list_for_each_entry_rcu(pmu, &pmus, entry) { | 
| Peter Zijlstra | 41945f6 | 2010-09-16 19:17:24 +0200 | [diff] [blame] | 4669 | 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 4670 | 		if (cpuctx->active_pmu != pmu) | 
 | 4671 | 			goto next; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4672 | 		perf_event_comm_ctx(&cpuctx->ctx, comm_event); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4673 |  | 
 | 4674 | 		ctxn = pmu->task_ctx_nr; | 
 | 4675 | 		if (ctxn < 0) | 
| Peter Zijlstra | 41945f6 | 2010-09-16 19:17:24 +0200 | [diff] [blame] | 4676 | 			goto next; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4677 |  | 
 | 4678 | 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 
 | 4679 | 		if (ctx) | 
 | 4680 | 			perf_event_comm_ctx(ctx, comm_event); | 
| Peter Zijlstra | 41945f6 | 2010-09-16 19:17:24 +0200 | [diff] [blame] | 4681 | next: | 
 | 4682 | 		put_cpu_ptr(pmu->pmu_cpu_context); | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4683 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4684 | 	rcu_read_unlock(); | 
 | 4685 | } | 
 | 4686 |  | 
 | 4687 | void perf_event_comm(struct task_struct *task) | 
 | 4688 | { | 
 | 4689 | 	struct perf_comm_event comm_event; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4690 | 	struct perf_event_context *ctx; | 
 | 4691 | 	int ctxn; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4692 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4693 | 	for_each_task_context_nr(ctxn) { | 
 | 4694 | 		ctx = task->perf_event_ctxp[ctxn]; | 
 | 4695 | 		if (!ctx) | 
 | 4696 | 			continue; | 
 | 4697 |  | 
 | 4698 | 		perf_event_enable_on_exec(ctx); | 
 | 4699 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4700 |  | 
 | 4701 | 	if (!atomic_read(&nr_comm_events)) | 
 | 4702 | 		return; | 
 | 4703 |  | 
 | 4704 | 	comm_event = (struct perf_comm_event){ | 
 | 4705 | 		.task	= task, | 
 | 4706 | 		/* .comm      */ | 
 | 4707 | 		/* .comm_size */ | 
 | 4708 | 		.event_id  = { | 
 | 4709 | 			.header = { | 
 | 4710 | 				.type = PERF_RECORD_COMM, | 
 | 4711 | 				.misc = 0, | 
 | 4712 | 				/* .size */ | 
 | 4713 | 			}, | 
 | 4714 | 			/* .pid */ | 
 | 4715 | 			/* .tid */ | 
 | 4716 | 		}, | 
 | 4717 | 	}; | 
 | 4718 |  | 
 | 4719 | 	perf_event_comm_event(&comm_event); | 
 | 4720 | } | 
 | 4721 |  | 
 | 4722 | /* | 
 | 4723 |  * mmap tracking | 
 | 4724 |  */ | 
 | 4725 |  | 
 | 4726 | struct perf_mmap_event { | 
 | 4727 | 	struct vm_area_struct	*vma; | 
 | 4728 |  | 
 | 4729 | 	const char		*file_name; | 
 | 4730 | 	int			file_size; | 
 | 4731 |  | 
 | 4732 | 	struct { | 
 | 4733 | 		struct perf_event_header	header; | 
 | 4734 |  | 
 | 4735 | 		u32				pid; | 
 | 4736 | 		u32				tid; | 
 | 4737 | 		u64				start; | 
 | 4738 | 		u64				len; | 
 | 4739 | 		u64				pgoff; | 
 | 4740 | 	} event_id; | 
 | 4741 | }; | 
 | 4742 |  | 
 | 4743 | static void perf_event_mmap_output(struct perf_event *event, | 
 | 4744 | 				     struct perf_mmap_event *mmap_event) | 
 | 4745 | { | 
 | 4746 | 	struct perf_output_handle handle; | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4747 | 	struct perf_sample_data sample; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4748 | 	int size = mmap_event->event_id.header.size; | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4749 | 	int ret; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4750 |  | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4751 | 	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); | 
 | 4752 | 	ret = perf_output_begin(&handle, event, | 
 | 4753 | 				mmap_event->event_id.header.size, 0, 0); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4754 | 	if (ret) | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4755 | 		goto out; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4756 |  | 
 | 4757 | 	mmap_event->event_id.pid = perf_event_pid(event, current); | 
 | 4758 | 	mmap_event->event_id.tid = perf_event_tid(event, current); | 
 | 4759 |  | 
 | 4760 | 	perf_output_put(&handle, mmap_event->event_id); | 
 | 4761 | 	perf_output_copy(&handle, mmap_event->file_name, | 
 | 4762 | 				   mmap_event->file_size); | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4763 |  | 
 | 4764 | 	perf_event__output_id_sample(event, &handle, &sample); | 
 | 4765 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4766 | 	perf_output_end(&handle); | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4767 | out: | 
 | 4768 | 	mmap_event->event_id.header.size = size; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4769 | } | 
 | 4770 |  | 
 | 4771 | static int perf_event_mmap_match(struct perf_event *event, | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 4772 | 				   struct perf_mmap_event *mmap_event, | 
 | 4773 | 				   int executable) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4774 | { | 
| Peter Zijlstra | 6f93d0a | 2010-02-14 11:12:04 +0100 | [diff] [blame] | 4775 | 	if (event->state < PERF_EVENT_STATE_INACTIVE) | 
| Peter Zijlstra | 22e1908 | 2010-01-18 09:12:32 +0100 | [diff] [blame] | 4776 | 		return 0; | 
 | 4777 |  | 
| Stephane Eranian | 5632ab1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 4778 | 	if (!event_filter_match(event)) | 
| Peter Zijlstra | 5d27c23 | 2009-12-17 13:16:32 +0100 | [diff] [blame] | 4779 | 		return 0; | 
 | 4780 |  | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 4781 | 	if ((!executable && event->attr.mmap_data) || | 
 | 4782 | 	    (executable && event->attr.mmap)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4783 | 		return 1; | 
 | 4784 |  | 
 | 4785 | 	return 0; | 
 | 4786 | } | 
 | 4787 |  | 
 | 4788 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 4789 | 				  struct perf_mmap_event *mmap_event, | 
 | 4790 | 				  int executable) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4791 | { | 
 | 4792 | 	struct perf_event *event; | 
 | 4793 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4794 | 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 4795 | 		if (perf_event_mmap_match(event, mmap_event, executable)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4796 | 			perf_event_mmap_output(event, mmap_event); | 
 | 4797 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4798 | } | 
 | 4799 |  | 
 | 4800 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | 
 | 4801 | { | 
 | 4802 | 	struct perf_cpu_context *cpuctx; | 
 | 4803 | 	struct perf_event_context *ctx; | 
 | 4804 | 	struct vm_area_struct *vma = mmap_event->vma; | 
 | 4805 | 	struct file *file = vma->vm_file; | 
 | 4806 | 	unsigned int size; | 
 | 4807 | 	char tmp[16]; | 
 | 4808 | 	char *buf = NULL; | 
 | 4809 | 	const char *name; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4810 | 	struct pmu *pmu; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4811 | 	int ctxn; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4812 |  | 
 | 4813 | 	memset(tmp, 0, sizeof(tmp)); | 
 | 4814 |  | 
 | 4815 | 	if (file) { | 
 | 4816 | 		/* | 
 | 4817 | 		 * d_path works from the end of the buffer backwards, so we | 
 | 4818 | 		 * need to add enough zero bytes after the string to handle | 
 | 4819 | 		 * the 64bit alignment we do later. | 
 | 4820 | 		 */ | 
 | 4821 | 		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | 
 | 4822 | 		if (!buf) { | 
 | 4823 | 			name = strncpy(tmp, "//enomem", sizeof(tmp)); | 
 | 4824 | 			goto got_name; | 
 | 4825 | 		} | 
 | 4826 | 		name = d_path(&file->f_path, buf, PATH_MAX); | 
 | 4827 | 		if (IS_ERR(name)) { | 
 | 4828 | 			name = strncpy(tmp, "//toolong", sizeof(tmp)); | 
 | 4829 | 			goto got_name; | 
 | 4830 | 		} | 
 | 4831 | 	} else { | 
 | 4832 | 		if (arch_vma_name(mmap_event->vma)) { | 
 | 4833 | 			name = strncpy(tmp, arch_vma_name(mmap_event->vma), | 
 | 4834 | 				       sizeof(tmp)); | 
 | 4835 | 			goto got_name; | 
 | 4836 | 		} | 
 | 4837 |  | 
 | 4838 | 		if (!vma->vm_mm) { | 
 | 4839 | 			name = strncpy(tmp, "[vdso]", sizeof(tmp)); | 
 | 4840 | 			goto got_name; | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 4841 | 		} else if (vma->vm_start <= vma->vm_mm->start_brk && | 
 | 4842 | 				vma->vm_end >= vma->vm_mm->brk) { | 
 | 4843 | 			name = strncpy(tmp, "[heap]", sizeof(tmp)); | 
 | 4844 | 			goto got_name; | 
 | 4845 | 		} else if (vma->vm_start <= vma->vm_mm->start_stack && | 
 | 4846 | 				vma->vm_end >= vma->vm_mm->start_stack) { | 
 | 4847 | 			name = strncpy(tmp, "[stack]", sizeof(tmp)); | 
 | 4848 | 			goto got_name; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4849 | 		} | 
 | 4850 |  | 
 | 4851 | 		name = strncpy(tmp, "//anon", sizeof(tmp)); | 
 | 4852 | 		goto got_name; | 
 | 4853 | 	} | 
 | 4854 |  | 
 | 4855 | got_name: | 
 | 4856 | 	size = ALIGN(strlen(name)+1, sizeof(u64)); | 
 | 4857 |  | 
 | 4858 | 	mmap_event->file_name = name; | 
 | 4859 | 	mmap_event->file_size = size; | 
 | 4860 |  | 
 | 4861 | 	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; | 
 | 4862 |  | 
| Peter Zijlstra | f6d9dd2 | 2009-11-20 22:19:48 +0100 | [diff] [blame] | 4863 | 	rcu_read_lock(); | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4864 | 	list_for_each_entry_rcu(pmu, &pmus, entry) { | 
| Peter Zijlstra | 41945f6 | 2010-09-16 19:17:24 +0200 | [diff] [blame] | 4865 | 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 4866 | 		if (cpuctx->active_pmu != pmu) | 
 | 4867 | 			goto next; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4868 | 		perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, | 
 | 4869 | 					vma->vm_flags & VM_EXEC); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4870 |  | 
 | 4871 | 		ctxn = pmu->task_ctx_nr; | 
 | 4872 | 		if (ctxn < 0) | 
| Peter Zijlstra | 41945f6 | 2010-09-16 19:17:24 +0200 | [diff] [blame] | 4873 | 			goto next; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4874 |  | 
 | 4875 | 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 
 | 4876 | 		if (ctx) { | 
 | 4877 | 			perf_event_mmap_ctx(ctx, mmap_event, | 
 | 4878 | 					vma->vm_flags & VM_EXEC); | 
 | 4879 | 		} | 
| Peter Zijlstra | 41945f6 | 2010-09-16 19:17:24 +0200 | [diff] [blame] | 4880 | next: | 
 | 4881 | 		put_cpu_ptr(pmu->pmu_cpu_context); | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4882 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4883 | 	rcu_read_unlock(); | 
 | 4884 |  | 
 | 4885 | 	kfree(buf); | 
 | 4886 | } | 
 | 4887 |  | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 4888 | void perf_event_mmap(struct vm_area_struct *vma) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4889 | { | 
 | 4890 | 	struct perf_mmap_event mmap_event; | 
 | 4891 |  | 
 | 4892 | 	if (!atomic_read(&nr_mmap_events)) | 
 | 4893 | 		return; | 
 | 4894 |  | 
 | 4895 | 	mmap_event = (struct perf_mmap_event){ | 
 | 4896 | 		.vma	= vma, | 
 | 4897 | 		/* .file_name */ | 
 | 4898 | 		/* .file_size */ | 
 | 4899 | 		.event_id  = { | 
 | 4900 | 			.header = { | 
 | 4901 | 				.type = PERF_RECORD_MMAP, | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 4902 | 				.misc = PERF_RECORD_MISC_USER, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4903 | 				/* .size */ | 
 | 4904 | 			}, | 
 | 4905 | 			/* .pid */ | 
 | 4906 | 			/* .tid */ | 
 | 4907 | 			.start  = vma->vm_start, | 
 | 4908 | 			.len    = vma->vm_end - vma->vm_start, | 
| Peter Zijlstra | 3a0304e | 2010-02-26 10:33:41 +0100 | [diff] [blame] | 4909 | 			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4910 | 		}, | 
 | 4911 | 	}; | 
 | 4912 |  | 
 | 4913 | 	perf_event_mmap_event(&mmap_event); | 
 | 4914 | } | 
 | 4915 |  | 
 | 4916 | /* | 
 | 4917 |  * IRQ throttle logging | 
 | 4918 |  */ | 
 | 4919 |  | 
 | 4920 | static void perf_log_throttle(struct perf_event *event, int enable) | 
 | 4921 | { | 
 | 4922 | 	struct perf_output_handle handle; | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4923 | 	struct perf_sample_data sample; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4924 | 	int ret; | 
 | 4925 |  | 
 | 4926 | 	struct { | 
 | 4927 | 		struct perf_event_header	header; | 
 | 4928 | 		u64				time; | 
 | 4929 | 		u64				id; | 
 | 4930 | 		u64				stream_id; | 
 | 4931 | 	} throttle_event = { | 
 | 4932 | 		.header = { | 
 | 4933 | 			.type = PERF_RECORD_THROTTLE, | 
 | 4934 | 			.misc = 0, | 
 | 4935 | 			.size = sizeof(throttle_event), | 
 | 4936 | 		}, | 
 | 4937 | 		.time		= perf_clock(), | 
 | 4938 | 		.id		= primary_event_id(event), | 
 | 4939 | 		.stream_id	= event->id, | 
 | 4940 | 	}; | 
 | 4941 |  | 
 | 4942 | 	if (enable) | 
 | 4943 | 		throttle_event.header.type = PERF_RECORD_UNTHROTTLE; | 
 | 4944 |  | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4945 | 	perf_event_header__init_id(&throttle_event.header, &sample, event); | 
 | 4946 |  | 
 | 4947 | 	ret = perf_output_begin(&handle, event, | 
 | 4948 | 				throttle_event.header.size, 1, 0); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4949 | 	if (ret) | 
 | 4950 | 		return; | 
 | 4951 |  | 
 | 4952 | 	perf_output_put(&handle, throttle_event); | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 4953 | 	perf_event__output_id_sample(event, &handle, &sample); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4954 | 	perf_output_end(&handle); | 
 | 4955 | } | 
 | 4956 |  | 
 | 4957 | /* | 
 | 4958 |  * Generic event overflow handling, sampling. | 
 | 4959 |  */ | 
 | 4960 |  | 
 | 4961 | static int __perf_event_overflow(struct perf_event *event, int nmi, | 
 | 4962 | 				   int throttle, struct perf_sample_data *data, | 
 | 4963 | 				   struct pt_regs *regs) | 
 | 4964 | { | 
 | 4965 | 	int events = atomic_read(&event->event_limit); | 
 | 4966 | 	struct hw_perf_event *hwc = &event->hw; | 
 | 4967 | 	int ret = 0; | 
 | 4968 |  | 
| Peter Zijlstra | 9639882 | 2010-11-24 18:55:29 +0100 | [diff] [blame] | 4969 | 	/* | 
 | 4970 | 	 * Non-sampling counters might still use the PMI to fold short | 
 | 4971 | 	 * hardware counters, ignore those. | 
 | 4972 | 	 */ | 
 | 4973 | 	if (unlikely(!is_sampling_event(event))) | 
 | 4974 | 		return 0; | 
 | 4975 |  | 
| Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 4976 | 	if (unlikely(hwc->interrupts >= max_samples_per_tick)) { | 
 | 4977 | 		if (throttle) { | 
 | 4978 | 			hwc->interrupts = MAX_INTERRUPTS; | 
 | 4979 | 			perf_log_throttle(event, 0); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4980 | 			ret = 1; | 
 | 4981 | 		} | 
| Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 4982 | 	} else | 
 | 4983 | 		hwc->interrupts++; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4984 |  | 
 | 4985 | 	if (event->attr.freq) { | 
 | 4986 | 		u64 now = perf_clock(); | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4987 | 		s64 delta = now - hwc->freq_time_stamp; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4988 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4989 | 		hwc->freq_time_stamp = now; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4990 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4991 | 		if (delta > 0 && delta < 2*TICK_NSEC) | 
 | 4992 | 			perf_adjust_period(event, delta, hwc->last_period); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4993 | 	} | 
 | 4994 |  | 
 | 4995 | 	/* | 
 | 4996 | 	 * XXX event_limit might not quite work as expected on inherited | 
 | 4997 | 	 * events | 
 | 4998 | 	 */ | 
 | 4999 |  | 
 | 5000 | 	event->pending_kill = POLL_IN; | 
 | 5001 | 	if (events && atomic_dec_and_test(&event->event_limit)) { | 
 | 5002 | 		ret = 1; | 
 | 5003 | 		event->pending_kill = POLL_HUP; | 
 | 5004 | 		if (nmi) { | 
 | 5005 | 			event->pending_disable = 1; | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 5006 | 			irq_work_queue(&event->pending); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5007 | 		} else | 
 | 5008 | 			perf_event_disable(event); | 
 | 5009 | 	} | 
 | 5010 |  | 
| Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 5011 | 	if (event->overflow_handler) | 
 | 5012 | 		event->overflow_handler(event, nmi, data, regs); | 
 | 5013 | 	else | 
 | 5014 | 		perf_event_output(event, nmi, data, regs); | 
 | 5015 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5016 | 	return ret; | 
 | 5017 | } | 
 | 5018 |  | 
 | 5019 | int perf_event_overflow(struct perf_event *event, int nmi, | 
 | 5020 | 			  struct perf_sample_data *data, | 
 | 5021 | 			  struct pt_regs *regs) | 
 | 5022 | { | 
 | 5023 | 	return __perf_event_overflow(event, nmi, 1, data, regs); | 
 | 5024 | } | 
 | 5025 |  | 
 | 5026 | /* | 
 | 5027 |  * Generic software event infrastructure | 
 | 5028 |  */ | 
 | 5029 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5030 | struct swevent_htable { | 
 | 5031 | 	struct swevent_hlist		*swevent_hlist; | 
 | 5032 | 	struct mutex			hlist_mutex; | 
 | 5033 | 	int				hlist_refcount; | 
 | 5034 |  | 
 | 5035 | 	/* Recursion avoidance in each contexts */ | 
 | 5036 | 	int				recursion[PERF_NR_CONTEXTS]; | 
 | 5037 | }; | 
 | 5038 |  | 
 | 5039 | static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); | 
 | 5040 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5041 | /* | 
 | 5042 |  * We directly increment event->count and keep a second value in | 
 | 5043 |  * event->hw.period_left to count intervals. This period event | 
 | 5044 |  * is kept in the range [-sample_period, 0] so that we can use the | 
 | 5045 |  * sign as trigger. | 
 | 5046 |  */ | 
 | 5047 |  | 
 | 5048 | static u64 perf_swevent_set_period(struct perf_event *event) | 
 | 5049 | { | 
 | 5050 | 	struct hw_perf_event *hwc = &event->hw; | 
 | 5051 | 	u64 period = hwc->last_period; | 
 | 5052 | 	u64 nr, offset; | 
 | 5053 | 	s64 old, val; | 
 | 5054 |  | 
 | 5055 | 	hwc->last_period = hwc->sample_period; | 
 | 5056 |  | 
 | 5057 | again: | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 5058 | 	old = val = local64_read(&hwc->period_left); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5059 | 	if (val < 0) | 
 | 5060 | 		return 0; | 
 | 5061 |  | 
 | 5062 | 	nr = div64_u64(period + val, period); | 
 | 5063 | 	offset = nr * period; | 
 | 5064 | 	val -= offset; | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 5065 | 	if (local64_cmpxchg(&hwc->period_left, old, val) != old) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5066 | 		goto again; | 
 | 5067 |  | 
 | 5068 | 	return nr; | 
 | 5069 | } | 
 | 5070 |  | 
| Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 5071 | static void perf_swevent_overflow(struct perf_event *event, u64 overflow, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5072 | 				    int nmi, struct perf_sample_data *data, | 
 | 5073 | 				    struct pt_regs *regs) | 
 | 5074 | { | 
 | 5075 | 	struct hw_perf_event *hwc = &event->hw; | 
 | 5076 | 	int throttle = 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5077 |  | 
 | 5078 | 	data->period = event->hw.last_period; | 
| Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 5079 | 	if (!overflow) | 
 | 5080 | 		overflow = perf_swevent_set_period(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5081 |  | 
 | 5082 | 	if (hwc->interrupts == MAX_INTERRUPTS) | 
 | 5083 | 		return; | 
 | 5084 |  | 
 | 5085 | 	for (; overflow; overflow--) { | 
 | 5086 | 		if (__perf_event_overflow(event, nmi, throttle, | 
 | 5087 | 					    data, regs)) { | 
 | 5088 | 			/* | 
 | 5089 | 			 * We inhibit the overflow from happening when | 
 | 5090 | 			 * hwc->interrupts == MAX_INTERRUPTS. | 
 | 5091 | 			 */ | 
 | 5092 | 			break; | 
 | 5093 | 		} | 
 | 5094 | 		throttle = 1; | 
 | 5095 | 	} | 
 | 5096 | } | 
 | 5097 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5098 | static void perf_swevent_event(struct perf_event *event, u64 nr, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5099 | 			       int nmi, struct perf_sample_data *data, | 
 | 5100 | 			       struct pt_regs *regs) | 
 | 5101 | { | 
 | 5102 | 	struct hw_perf_event *hwc = &event->hw; | 
 | 5103 |  | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 5104 | 	local64_add(nr, &event->count); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5105 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5106 | 	if (!regs) | 
 | 5107 | 		return; | 
 | 5108 |  | 
| Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 5109 | 	if (!is_sampling_event(event)) | 
| Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 5110 | 		return; | 
 | 5111 |  | 
 | 5112 | 	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) | 
 | 5113 | 		return perf_swevent_overflow(event, 1, nmi, data, regs); | 
 | 5114 |  | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 5115 | 	if (local64_add_negative(nr, &hwc->period_left)) | 
| Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 5116 | 		return; | 
 | 5117 |  | 
 | 5118 | 	perf_swevent_overflow(event, 0, nmi, data, regs); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5119 | } | 
 | 5120 |  | 
| Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 5121 | static int perf_exclude_event(struct perf_event *event, | 
 | 5122 | 			      struct pt_regs *regs) | 
 | 5123 | { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5124 | 	if (event->hw.state & PERF_HES_STOPPED) | 
| Frederic Weisbecker | 91b2f48 | 2011-03-07 21:27:08 +0100 | [diff] [blame] | 5125 | 		return 1; | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5126 |  | 
| Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 5127 | 	if (regs) { | 
 | 5128 | 		if (event->attr.exclude_user && user_mode(regs)) | 
 | 5129 | 			return 1; | 
 | 5130 |  | 
 | 5131 | 		if (event->attr.exclude_kernel && !user_mode(regs)) | 
 | 5132 | 			return 1; | 
 | 5133 | 	} | 
 | 5134 |  | 
 | 5135 | 	return 0; | 
 | 5136 | } | 
 | 5137 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5138 | static int perf_swevent_match(struct perf_event *event, | 
 | 5139 | 				enum perf_type_id type, | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 5140 | 				u32 event_id, | 
 | 5141 | 				struct perf_sample_data *data, | 
 | 5142 | 				struct pt_regs *regs) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5143 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5144 | 	if (event->attr.type != type) | 
 | 5145 | 		return 0; | 
| Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 5146 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5147 | 	if (event->attr.config != event_id) | 
 | 5148 | 		return 0; | 
 | 5149 |  | 
| Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 5150 | 	if (perf_exclude_event(event, regs)) | 
 | 5151 | 		return 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5152 |  | 
 | 5153 | 	return 1; | 
 | 5154 | } | 
 | 5155 |  | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5156 | static inline u64 swevent_hash(u64 type, u32 event_id) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5157 | { | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5158 | 	u64 val = event_id | (type << 32); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5159 |  | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5160 | 	return hash_64(val, SWEVENT_HLIST_BITS); | 
 | 5161 | } | 
 | 5162 |  | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5163 | static inline struct hlist_head * | 
 | 5164 | __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5165 | { | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5166 | 	u64 hash = swevent_hash(type, event_id); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5167 |  | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5168 | 	return &hlist->heads[hash]; | 
 | 5169 | } | 
 | 5170 |  | 
 | 5171 | /* For the read side: events when they trigger */ | 
 | 5172 | static inline struct hlist_head * | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5173 | find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5174 | { | 
 | 5175 | 	struct swevent_hlist *hlist; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5176 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5177 | 	hlist = rcu_dereference(swhash->swevent_hlist); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5178 | 	if (!hlist) | 
 | 5179 | 		return NULL; | 
 | 5180 |  | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5181 | 	return __find_swevent_head(hlist, type, event_id); | 
 | 5182 | } | 
 | 5183 |  | 
 | 5184 | /* For the event head insertion and removal in the hlist */ | 
 | 5185 | static inline struct hlist_head * | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5186 | find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5187 | { | 
 | 5188 | 	struct swevent_hlist *hlist; | 
 | 5189 | 	u32 event_id = event->attr.config; | 
 | 5190 | 	u64 type = event->attr.type; | 
 | 5191 |  | 
 | 5192 | 	/* | 
 | 5193 | 	 * Event scheduling is always serialized against hlist allocation | 
 | 5194 | 	 * and release. Which makes the protected version suitable here. | 
 | 5195 | 	 * The context lock guarantees that. | 
 | 5196 | 	 */ | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5197 | 	hlist = rcu_dereference_protected(swhash->swevent_hlist, | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5198 | 					  lockdep_is_held(&event->ctx->lock)); | 
 | 5199 | 	if (!hlist) | 
 | 5200 | 		return NULL; | 
 | 5201 |  | 
 | 5202 | 	return __find_swevent_head(hlist, type, event_id); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5203 | } | 
 | 5204 |  | 
 | 5205 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | 
 | 5206 | 				    u64 nr, int nmi, | 
 | 5207 | 				    struct perf_sample_data *data, | 
 | 5208 | 				    struct pt_regs *regs) | 
 | 5209 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5210 | 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5211 | 	struct perf_event *event; | 
 | 5212 | 	struct hlist_node *node; | 
 | 5213 | 	struct hlist_head *head; | 
 | 5214 |  | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5215 | 	rcu_read_lock(); | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5216 | 	head = find_swevent_head_rcu(swhash, type, event_id); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5217 | 	if (!head) | 
 | 5218 | 		goto end; | 
 | 5219 |  | 
 | 5220 | 	hlist_for_each_entry_rcu(event, node, head, hlist_entry) { | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 5221 | 		if (perf_swevent_match(event, type, event_id, data, regs)) | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5222 | 			perf_swevent_event(event, nr, nmi, data, regs); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5223 | 	} | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5224 | end: | 
 | 5225 | 	rcu_read_unlock(); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5226 | } | 
 | 5227 |  | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 5228 | int perf_swevent_get_recursion_context(void) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5229 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5230 | 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | 
| Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 5231 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5232 | 	return get_recursion_context(swhash->recursion); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5233 | } | 
| Ingo Molnar | 645e8cc | 2009-11-22 12:20:19 +0100 | [diff] [blame] | 5234 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5235 |  | 
| Jesper Juhl | fa9f90b | 2010-11-28 21:39:34 +0100 | [diff] [blame] | 5236 | inline void perf_swevent_put_recursion_context(int rctx) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5237 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5238 | 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 5239 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5240 | 	put_recursion_context(swhash->recursion, rctx); | 
| Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 5241 | } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5242 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5243 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, | 
 | 5244 | 			    struct pt_regs *regs, u64 addr) | 
 | 5245 | { | 
| Ingo Molnar | a4234bf | 2009-11-23 10:57:59 +0100 | [diff] [blame] | 5246 | 	struct perf_sample_data data; | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 5247 | 	int rctx; | 
 | 5248 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5249 | 	preempt_disable_notrace(); | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 5250 | 	rctx = perf_swevent_get_recursion_context(); | 
 | 5251 | 	if (rctx < 0) | 
 | 5252 | 		return; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5253 |  | 
| Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 5254 | 	perf_sample_data_init(&data, addr); | 
| Ingo Molnar | a4234bf | 2009-11-23 10:57:59 +0100 | [diff] [blame] | 5255 |  | 
 | 5256 | 	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 5257 |  | 
 | 5258 | 	perf_swevent_put_recursion_context(rctx); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5259 | 	preempt_enable_notrace(); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5260 | } | 
 | 5261 |  | 
 | 5262 | static void perf_swevent_read(struct perf_event *event) | 
 | 5263 | { | 
 | 5264 | } | 
 | 5265 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5266 | static int perf_swevent_add(struct perf_event *event, int flags) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5267 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5268 | 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5269 | 	struct hw_perf_event *hwc = &event->hw; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5270 | 	struct hlist_head *head; | 
 | 5271 |  | 
| Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 5272 | 	if (is_sampling_event(event)) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5273 | 		hwc->last_period = hwc->sample_period; | 
 | 5274 | 		perf_swevent_set_period(event); | 
 | 5275 | 	} | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5276 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5277 | 	hwc->state = !(flags & PERF_EF_START); | 
 | 5278 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5279 | 	head = find_swevent_head(swhash, event); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5280 | 	if (WARN_ON_ONCE(!head)) | 
 | 5281 | 		return -EINVAL; | 
 | 5282 |  | 
 | 5283 | 	hlist_add_head_rcu(&event->hlist_entry, head); | 
 | 5284 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5285 | 	return 0; | 
 | 5286 | } | 
 | 5287 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5288 | static void perf_swevent_del(struct perf_event *event, int flags) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5289 | { | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5290 | 	hlist_del_rcu(&event->hlist_entry); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5291 | } | 
 | 5292 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5293 | static void perf_swevent_start(struct perf_event *event, int flags) | 
| Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 5294 | { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5295 | 	event->hw.state = 0; | 
| Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 5296 | } | 
 | 5297 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5298 | static void perf_swevent_stop(struct perf_event *event, int flags) | 
| Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 5299 | { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5300 | 	event->hw.state = PERF_HES_STOPPED; | 
| Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 5301 | } | 
 | 5302 |  | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5303 | /* Deref the hlist from the update side */ | 
 | 5304 | static inline struct swevent_hlist * | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5305 | swevent_hlist_deref(struct swevent_htable *swhash) | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5306 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5307 | 	return rcu_dereference_protected(swhash->swevent_hlist, | 
 | 5308 | 					 lockdep_is_held(&swhash->hlist_mutex)); | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5309 | } | 
 | 5310 |  | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5311 | static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) | 
 | 5312 | { | 
 | 5313 | 	struct swevent_hlist *hlist; | 
 | 5314 |  | 
 | 5315 | 	hlist = container_of(rcu_head, struct swevent_hlist, rcu_head); | 
 | 5316 | 	kfree(hlist); | 
 | 5317 | } | 
 | 5318 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5319 | static void swevent_hlist_release(struct swevent_htable *swhash) | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5320 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5321 | 	struct swevent_hlist *hlist = swevent_hlist_deref(swhash); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5322 |  | 
| Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 5323 | 	if (!hlist) | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5324 | 		return; | 
 | 5325 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5326 | 	rcu_assign_pointer(swhash->swevent_hlist, NULL); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5327 | 	call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); | 
 | 5328 | } | 
 | 5329 |  | 
 | 5330 | static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) | 
 | 5331 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5332 | 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5333 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5334 | 	mutex_lock(&swhash->hlist_mutex); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5335 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5336 | 	if (!--swhash->hlist_refcount) | 
 | 5337 | 		swevent_hlist_release(swhash); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5338 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5339 | 	mutex_unlock(&swhash->hlist_mutex); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5340 | } | 
 | 5341 |  | 
 | 5342 | static void swevent_hlist_put(struct perf_event *event) | 
 | 5343 | { | 
 | 5344 | 	int cpu; | 
 | 5345 |  | 
 | 5346 | 	if (event->cpu != -1) { | 
 | 5347 | 		swevent_hlist_put_cpu(event, event->cpu); | 
 | 5348 | 		return; | 
 | 5349 | 	} | 
 | 5350 |  | 
 | 5351 | 	for_each_possible_cpu(cpu) | 
 | 5352 | 		swevent_hlist_put_cpu(event, cpu); | 
 | 5353 | } | 
 | 5354 |  | 
 | 5355 | static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) | 
 | 5356 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5357 | 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5358 | 	int err = 0; | 
 | 5359 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5360 | 	mutex_lock(&swhash->hlist_mutex); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5361 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5362 | 	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5363 | 		struct swevent_hlist *hlist; | 
 | 5364 |  | 
 | 5365 | 		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); | 
 | 5366 | 		if (!hlist) { | 
 | 5367 | 			err = -ENOMEM; | 
 | 5368 | 			goto exit; | 
 | 5369 | 		} | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5370 | 		rcu_assign_pointer(swhash->swevent_hlist, hlist); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5371 | 	} | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5372 | 	swhash->hlist_refcount++; | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 5373 | exit: | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 5374 | 	mutex_unlock(&swhash->hlist_mutex); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5375 |  | 
 | 5376 | 	return err; | 
 | 5377 | } | 
 | 5378 |  | 
 | 5379 | static int swevent_hlist_get(struct perf_event *event) | 
 | 5380 | { | 
 | 5381 | 	int err; | 
 | 5382 | 	int cpu, failed_cpu; | 
 | 5383 |  | 
 | 5384 | 	if (event->cpu != -1) | 
 | 5385 | 		return swevent_hlist_get_cpu(event, event->cpu); | 
 | 5386 |  | 
 | 5387 | 	get_online_cpus(); | 
 | 5388 | 	for_each_possible_cpu(cpu) { | 
 | 5389 | 		err = swevent_hlist_get_cpu(event, cpu); | 
 | 5390 | 		if (err) { | 
 | 5391 | 			failed_cpu = cpu; | 
 | 5392 | 			goto fail; | 
 | 5393 | 		} | 
 | 5394 | 	} | 
 | 5395 | 	put_online_cpus(); | 
 | 5396 |  | 
 | 5397 | 	return 0; | 
| Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 5398 | fail: | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5399 | 	for_each_possible_cpu(cpu) { | 
 | 5400 | 		if (cpu == failed_cpu) | 
 | 5401 | 			break; | 
 | 5402 | 		swevent_hlist_put_cpu(event, cpu); | 
 | 5403 | 	} | 
 | 5404 |  | 
 | 5405 | 	put_online_cpus(); | 
 | 5406 | 	return err; | 
 | 5407 | } | 
 | 5408 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5409 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 
| Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 5410 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5411 | static void sw_perf_event_destroy(struct perf_event *event) | 
 | 5412 | { | 
 | 5413 | 	u64 event_id = event->attr.config; | 
 | 5414 |  | 
 | 5415 | 	WARN_ON(event->parent); | 
 | 5416 |  | 
| Peter Zijlstra | 7e54a5a | 2010-10-14 22:32:45 +0200 | [diff] [blame] | 5417 | 	jump_label_dec(&perf_swevent_enabled[event_id]); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5418 | 	swevent_hlist_put(event); | 
 | 5419 | } | 
 | 5420 |  | 
 | 5421 | static int perf_swevent_init(struct perf_event *event) | 
 | 5422 | { | 
 | 5423 | 	int event_id = event->attr.config; | 
 | 5424 |  | 
 | 5425 | 	if (event->attr.type != PERF_TYPE_SOFTWARE) | 
 | 5426 | 		return -ENOENT; | 
 | 5427 |  | 
 | 5428 | 	switch (event_id) { | 
 | 5429 | 	case PERF_COUNT_SW_CPU_CLOCK: | 
 | 5430 | 	case PERF_COUNT_SW_TASK_CLOCK: | 
 | 5431 | 		return -ENOENT; | 
 | 5432 |  | 
 | 5433 | 	default: | 
 | 5434 | 		break; | 
 | 5435 | 	} | 
 | 5436 |  | 
| Dan Carpenter | ce67783 | 2010-10-24 21:50:42 +0200 | [diff] [blame] | 5437 | 	if (event_id >= PERF_COUNT_SW_MAX) | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5438 | 		return -ENOENT; | 
 | 5439 |  | 
 | 5440 | 	if (!event->parent) { | 
 | 5441 | 		int err; | 
 | 5442 |  | 
 | 5443 | 		err = swevent_hlist_get(event); | 
 | 5444 | 		if (err) | 
 | 5445 | 			return err; | 
 | 5446 |  | 
| Peter Zijlstra | 7e54a5a | 2010-10-14 22:32:45 +0200 | [diff] [blame] | 5447 | 		jump_label_inc(&perf_swevent_enabled[event_id]); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5448 | 		event->destroy = sw_perf_event_destroy; | 
 | 5449 | 	} | 
 | 5450 |  | 
 | 5451 | 	return 0; | 
 | 5452 | } | 
 | 5453 |  | 
 | 5454 | static struct pmu perf_swevent = { | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 5455 | 	.task_ctx_nr	= perf_sw_context, | 
 | 5456 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5457 | 	.event_init	= perf_swevent_init, | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5458 | 	.add		= perf_swevent_add, | 
 | 5459 | 	.del		= perf_swevent_del, | 
 | 5460 | 	.start		= perf_swevent_start, | 
 | 5461 | 	.stop		= perf_swevent_stop, | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5462 | 	.read		= perf_swevent_read, | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5463 | }; | 
| Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 5464 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5465 | #ifdef CONFIG_EVENT_TRACING | 
 | 5466 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5467 | static int perf_tp_filter_match(struct perf_event *event, | 
| Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 5468 | 				struct perf_sample_data *data) | 
 | 5469 | { | 
 | 5470 | 	void *record = data->raw->data; | 
 | 5471 |  | 
 | 5472 | 	if (likely(!event->filter) || filter_match_preds(event->filter, record)) | 
 | 5473 | 		return 1; | 
 | 5474 | 	return 0; | 
 | 5475 | } | 
 | 5476 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5477 | static int perf_tp_event_match(struct perf_event *event, | 
 | 5478 | 				struct perf_sample_data *data, | 
 | 5479 | 				struct pt_regs *regs) | 
 | 5480 | { | 
| Frederic Weisbecker | a0f7d0f | 2011-03-07 21:27:09 +0100 | [diff] [blame] | 5481 | 	if (event->hw.state & PERF_HES_STOPPED) | 
 | 5482 | 		return 0; | 
| Peter Zijlstra | 580d607 | 2010-05-20 20:54:31 +0200 | [diff] [blame] | 5483 | 	/* | 
 | 5484 | 	 * All tracepoints are from kernel-space. | 
 | 5485 | 	 */ | 
 | 5486 | 	if (event->attr.exclude_kernel) | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5487 | 		return 0; | 
 | 5488 |  | 
 | 5489 | 	if (!perf_tp_filter_match(event, data)) | 
 | 5490 | 		return 0; | 
 | 5491 |  | 
 | 5492 | 	return 1; | 
 | 5493 | } | 
 | 5494 |  | 
 | 5495 | void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, | 
| Peter Zijlstra | ecc55f8 | 2010-05-21 15:11:34 +0200 | [diff] [blame] | 5496 | 		   struct pt_regs *regs, struct hlist_head *head, int rctx) | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5497 | { | 
 | 5498 | 	struct perf_sample_data data; | 
 | 5499 | 	struct perf_event *event; | 
 | 5500 | 	struct hlist_node *node; | 
 | 5501 |  | 
 | 5502 | 	struct perf_raw_record raw = { | 
 | 5503 | 		.size = entry_size, | 
 | 5504 | 		.data = record, | 
 | 5505 | 	}; | 
 | 5506 |  | 
 | 5507 | 	perf_sample_data_init(&data, addr); | 
 | 5508 | 	data.raw = &raw; | 
 | 5509 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5510 | 	hlist_for_each_entry_rcu(event, node, head, hlist_entry) { | 
 | 5511 | 		if (perf_tp_event_match(event, &data, regs)) | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5512 | 			perf_swevent_event(event, count, 1, &data, regs); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5513 | 	} | 
| Peter Zijlstra | ecc55f8 | 2010-05-21 15:11:34 +0200 | [diff] [blame] | 5514 |  | 
 | 5515 | 	perf_swevent_put_recursion_context(rctx); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5516 | } | 
 | 5517 | EXPORT_SYMBOL_GPL(perf_tp_event); | 
 | 5518 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5519 | static void tp_perf_event_destroy(struct perf_event *event) | 
 | 5520 | { | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5521 | 	perf_trace_destroy(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5522 | } | 
 | 5523 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5524 | static int perf_tp_event_init(struct perf_event *event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5525 | { | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 5526 | 	int err; | 
 | 5527 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5528 | 	if (event->attr.type != PERF_TYPE_TRACEPOINT) | 
 | 5529 | 		return -ENOENT; | 
 | 5530 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 5531 | 	err = perf_trace_init(event); | 
 | 5532 | 	if (err) | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5533 | 		return err; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5534 |  | 
 | 5535 | 	event->destroy = tp_perf_event_destroy; | 
 | 5536 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5537 | 	return 0; | 
 | 5538 | } | 
 | 5539 |  | 
 | 5540 | static struct pmu perf_tracepoint = { | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 5541 | 	.task_ctx_nr	= perf_sw_context, | 
 | 5542 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5543 | 	.event_init	= perf_tp_event_init, | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5544 | 	.add		= perf_trace_add, | 
 | 5545 | 	.del		= perf_trace_del, | 
 | 5546 | 	.start		= perf_swevent_start, | 
 | 5547 | 	.stop		= perf_swevent_stop, | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5548 | 	.read		= perf_swevent_read, | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5549 | }; | 
 | 5550 |  | 
 | 5551 | static inline void perf_tp_register(void) | 
 | 5552 | { | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 5553 | 	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5554 | } | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 5555 |  | 
 | 5556 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | 
 | 5557 | { | 
 | 5558 | 	char *filter_str; | 
 | 5559 | 	int ret; | 
 | 5560 |  | 
 | 5561 | 	if (event->attr.type != PERF_TYPE_TRACEPOINT) | 
 | 5562 | 		return -EINVAL; | 
 | 5563 |  | 
 | 5564 | 	filter_str = strndup_user(arg, PAGE_SIZE); | 
 | 5565 | 	if (IS_ERR(filter_str)) | 
 | 5566 | 		return PTR_ERR(filter_str); | 
 | 5567 |  | 
 | 5568 | 	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); | 
 | 5569 |  | 
 | 5570 | 	kfree(filter_str); | 
 | 5571 | 	return ret; | 
 | 5572 | } | 
 | 5573 |  | 
 | 5574 | static void perf_event_free_filter(struct perf_event *event) | 
 | 5575 | { | 
 | 5576 | 	ftrace_profile_free_filter(event); | 
 | 5577 | } | 
 | 5578 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5579 | #else | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 5580 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5581 | static inline void perf_tp_register(void) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5582 | { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5583 | } | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 5584 |  | 
 | 5585 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | 
 | 5586 | { | 
 | 5587 | 	return -ENOENT; | 
 | 5588 | } | 
 | 5589 |  | 
 | 5590 | static void perf_event_free_filter(struct perf_event *event) | 
 | 5591 | { | 
 | 5592 | } | 
 | 5593 |  | 
| Li Zefan | 07b139c | 2009-12-21 14:27:35 +0800 | [diff] [blame] | 5594 | #endif /* CONFIG_EVENT_TRACING */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5595 |  | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 5596 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 
| Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 5597 | void perf_bp_event(struct perf_event *bp, void *data) | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 5598 | { | 
| Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 5599 | 	struct perf_sample_data sample; | 
 | 5600 | 	struct pt_regs *regs = data; | 
 | 5601 |  | 
| Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 5602 | 	perf_sample_data_init(&sample, bp->attr.bp_addr); | 
| Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 5603 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5604 | 	if (!bp->hw.state && !perf_exclude_event(bp, regs)) | 
 | 5605 | 		perf_swevent_event(bp, 1, 1, &sample, regs); | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 5606 | } | 
 | 5607 | #endif | 
 | 5608 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5609 | /* | 
 | 5610 |  * hrtimer based swevent callback | 
 | 5611 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5612 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5613 | static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5614 | { | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5615 | 	enum hrtimer_restart ret = HRTIMER_RESTART; | 
 | 5616 | 	struct perf_sample_data data; | 
 | 5617 | 	struct pt_regs *regs; | 
 | 5618 | 	struct perf_event *event; | 
 | 5619 | 	u64 period; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5620 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5621 | 	event = container_of(hrtimer, struct perf_event, hw.hrtimer); | 
| Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 5622 |  | 
 | 5623 | 	if (event->state != PERF_EVENT_STATE_ACTIVE) | 
 | 5624 | 		return HRTIMER_NORESTART; | 
 | 5625 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5626 | 	event->pmu->read(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5627 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5628 | 	perf_sample_data_init(&data, 0); | 
 | 5629 | 	data.period = event->hw.last_period; | 
 | 5630 | 	regs = get_irq_regs(); | 
 | 5631 |  | 
 | 5632 | 	if (regs && !perf_exclude_event(event, regs)) { | 
 | 5633 | 		if (!(event->attr.exclude_idle && current->pid == 0)) | 
 | 5634 | 			if (perf_event_overflow(event, 0, &data, regs)) | 
 | 5635 | 				ret = HRTIMER_NORESTART; | 
 | 5636 | 	} | 
 | 5637 |  | 
 | 5638 | 	period = max_t(u64, 10000, event->hw.sample_period); | 
 | 5639 | 	hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | 
 | 5640 |  | 
 | 5641 | 	return ret; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5642 | } | 
 | 5643 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5644 | static void perf_swevent_start_hrtimer(struct perf_event *event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5645 | { | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5646 | 	struct hw_perf_event *hwc = &event->hw; | 
| Franck Bui-Huu | 5d508e8 | 2010-11-23 16:21:45 +0100 | [diff] [blame] | 5647 | 	s64 period; | 
 | 5648 |  | 
 | 5649 | 	if (!is_sampling_event(event)) | 
 | 5650 | 		return; | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5651 |  | 
| Franck Bui-Huu | 5d508e8 | 2010-11-23 16:21:45 +0100 | [diff] [blame] | 5652 | 	period = local64_read(&hwc->period_left); | 
 | 5653 | 	if (period) { | 
 | 5654 | 		if (period < 0) | 
 | 5655 | 			period = 10000; | 
| Peter Zijlstra | fa407f3 | 2010-06-24 12:35:12 +0200 | [diff] [blame] | 5656 |  | 
| Franck Bui-Huu | 5d508e8 | 2010-11-23 16:21:45 +0100 | [diff] [blame] | 5657 | 		local64_set(&hwc->period_left, 0); | 
 | 5658 | 	} else { | 
 | 5659 | 		period = max_t(u64, 10000, hwc->sample_period); | 
 | 5660 | 	} | 
 | 5661 | 	__hrtimer_start_range_ns(&hwc->hrtimer, | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5662 | 				ns_to_ktime(period), 0, | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 5663 | 				HRTIMER_MODE_REL_PINNED, 0); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5664 | } | 
 | 5665 |  | 
 | 5666 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | 
 | 5667 | { | 
 | 5668 | 	struct hw_perf_event *hwc = &event->hw; | 
 | 5669 |  | 
| Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 5670 | 	if (is_sampling_event(event)) { | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5671 | 		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | 
| Peter Zijlstra | fa407f3 | 2010-06-24 12:35:12 +0200 | [diff] [blame] | 5672 | 		local64_set(&hwc->period_left, ktime_to_ns(remaining)); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5673 |  | 
 | 5674 | 		hrtimer_cancel(&hwc->hrtimer); | 
 | 5675 | 	} | 
 | 5676 | } | 
 | 5677 |  | 
| Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 5678 | static void perf_swevent_init_hrtimer(struct perf_event *event) | 
 | 5679 | { | 
 | 5680 | 	struct hw_perf_event *hwc = &event->hw; | 
 | 5681 |  | 
 | 5682 | 	if (!is_sampling_event(event)) | 
 | 5683 | 		return; | 
 | 5684 |  | 
 | 5685 | 	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 
 | 5686 | 	hwc->hrtimer.function = perf_swevent_hrtimer; | 
 | 5687 |  | 
 | 5688 | 	/* | 
 | 5689 | 	 * Since hrtimers have a fixed rate, we can do a static freq->period | 
 | 5690 | 	 * mapping and avoid the whole period adjust feedback stuff. | 
 | 5691 | 	 */ | 
 | 5692 | 	if (event->attr.freq) { | 
 | 5693 | 		long freq = event->attr.sample_freq; | 
 | 5694 |  | 
 | 5695 | 		event->attr.sample_period = NSEC_PER_SEC / freq; | 
 | 5696 | 		hwc->sample_period = event->attr.sample_period; | 
 | 5697 | 		local64_set(&hwc->period_left, hwc->sample_period); | 
 | 5698 | 		event->attr.freq = 0; | 
 | 5699 | 	} | 
 | 5700 | } | 
 | 5701 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5702 | /* | 
 | 5703 |  * Software event: cpu wall time clock | 
 | 5704 |  */ | 
 | 5705 |  | 
 | 5706 | static void cpu_clock_event_update(struct perf_event *event) | 
 | 5707 | { | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5708 | 	s64 prev; | 
 | 5709 | 	u64 now; | 
 | 5710 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5711 | 	now = local_clock(); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5712 | 	prev = local64_xchg(&event->hw.prev_count, now); | 
 | 5713 | 	local64_add(now - prev, &event->count); | 
 | 5714 | } | 
 | 5715 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5716 | static void cpu_clock_event_start(struct perf_event *event, int flags) | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5717 | { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5718 | 	local64_set(&event->hw.prev_count, local_clock()); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5719 | 	perf_swevent_start_hrtimer(event); | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5720 | } | 
 | 5721 |  | 
 | 5722 | static void cpu_clock_event_stop(struct perf_event *event, int flags) | 
 | 5723 | { | 
 | 5724 | 	perf_swevent_cancel_hrtimer(event); | 
 | 5725 | 	cpu_clock_event_update(event); | 
 | 5726 | } | 
 | 5727 |  | 
 | 5728 | static int cpu_clock_event_add(struct perf_event *event, int flags) | 
 | 5729 | { | 
 | 5730 | 	if (flags & PERF_EF_START) | 
 | 5731 | 		cpu_clock_event_start(event, flags); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5732 |  | 
 | 5733 | 	return 0; | 
 | 5734 | } | 
 | 5735 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5736 | static void cpu_clock_event_del(struct perf_event *event, int flags) | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5737 | { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5738 | 	cpu_clock_event_stop(event, flags); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5739 | } | 
 | 5740 |  | 
 | 5741 | static void cpu_clock_event_read(struct perf_event *event) | 
 | 5742 | { | 
 | 5743 | 	cpu_clock_event_update(event); | 
 | 5744 | } | 
 | 5745 |  | 
 | 5746 | static int cpu_clock_event_init(struct perf_event *event) | 
 | 5747 | { | 
 | 5748 | 	if (event->attr.type != PERF_TYPE_SOFTWARE) | 
 | 5749 | 		return -ENOENT; | 
 | 5750 |  | 
 | 5751 | 	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) | 
 | 5752 | 		return -ENOENT; | 
 | 5753 |  | 
| Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 5754 | 	perf_swevent_init_hrtimer(event); | 
 | 5755 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5756 | 	return 0; | 
 | 5757 | } | 
 | 5758 |  | 
 | 5759 | static struct pmu perf_cpu_clock = { | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 5760 | 	.task_ctx_nr	= perf_sw_context, | 
 | 5761 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5762 | 	.event_init	= cpu_clock_event_init, | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5763 | 	.add		= cpu_clock_event_add, | 
 | 5764 | 	.del		= cpu_clock_event_del, | 
 | 5765 | 	.start		= cpu_clock_event_start, | 
 | 5766 | 	.stop		= cpu_clock_event_stop, | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5767 | 	.read		= cpu_clock_event_read, | 
 | 5768 | }; | 
 | 5769 |  | 
 | 5770 | /* | 
 | 5771 |  * Software event: task time clock | 
 | 5772 |  */ | 
 | 5773 |  | 
 | 5774 | static void task_clock_event_update(struct perf_event *event, u64 now) | 
 | 5775 | { | 
 | 5776 | 	u64 prev; | 
 | 5777 | 	s64 delta; | 
 | 5778 |  | 
 | 5779 | 	prev = local64_xchg(&event->hw.prev_count, now); | 
 | 5780 | 	delta = now - prev; | 
 | 5781 | 	local64_add(delta, &event->count); | 
 | 5782 | } | 
 | 5783 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5784 | static void task_clock_event_start(struct perf_event *event, int flags) | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5785 | { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5786 | 	local64_set(&event->hw.prev_count, event->ctx->time); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5787 | 	perf_swevent_start_hrtimer(event); | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5788 | } | 
 | 5789 |  | 
 | 5790 | static void task_clock_event_stop(struct perf_event *event, int flags) | 
 | 5791 | { | 
 | 5792 | 	perf_swevent_cancel_hrtimer(event); | 
 | 5793 | 	task_clock_event_update(event, event->ctx->time); | 
 | 5794 | } | 
 | 5795 |  | 
 | 5796 | static int task_clock_event_add(struct perf_event *event, int flags) | 
 | 5797 | { | 
 | 5798 | 	if (flags & PERF_EF_START) | 
 | 5799 | 		task_clock_event_start(event, flags); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5800 |  | 
 | 5801 | 	return 0; | 
 | 5802 | } | 
 | 5803 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5804 | static void task_clock_event_del(struct perf_event *event, int flags) | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5805 | { | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5806 | 	task_clock_event_stop(event, PERF_EF_UPDATE); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5807 | } | 
 | 5808 |  | 
 | 5809 | static void task_clock_event_read(struct perf_event *event) | 
 | 5810 | { | 
| Peter Zijlstra | 768a06e | 2011-02-22 16:52:24 +0100 | [diff] [blame] | 5811 | 	u64 now = perf_clock(); | 
 | 5812 | 	u64 delta = now - event->ctx->timestamp; | 
 | 5813 | 	u64 time = event->ctx->time + delta; | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5814 |  | 
 | 5815 | 	task_clock_event_update(event, time); | 
 | 5816 | } | 
 | 5817 |  | 
 | 5818 | static int task_clock_event_init(struct perf_event *event) | 
 | 5819 | { | 
 | 5820 | 	if (event->attr.type != PERF_TYPE_SOFTWARE) | 
 | 5821 | 		return -ENOENT; | 
 | 5822 |  | 
 | 5823 | 	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) | 
 | 5824 | 		return -ENOENT; | 
 | 5825 |  | 
| Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 5826 | 	perf_swevent_init_hrtimer(event); | 
 | 5827 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5828 | 	return 0; | 
 | 5829 | } | 
 | 5830 |  | 
 | 5831 | static struct pmu perf_task_clock = { | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 5832 | 	.task_ctx_nr	= perf_sw_context, | 
 | 5833 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5834 | 	.event_init	= task_clock_event_init, | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5835 | 	.add		= task_clock_event_add, | 
 | 5836 | 	.del		= task_clock_event_del, | 
 | 5837 | 	.start		= task_clock_event_start, | 
 | 5838 | 	.stop		= task_clock_event_stop, | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5839 | 	.read		= task_clock_event_read, | 
 | 5840 | }; | 
 | 5841 |  | 
| Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 5842 | static void perf_pmu_nop_void(struct pmu *pmu) | 
 | 5843 | { | 
 | 5844 | } | 
 | 5845 |  | 
 | 5846 | static int perf_pmu_nop_int(struct pmu *pmu) | 
 | 5847 | { | 
 | 5848 | 	return 0; | 
 | 5849 | } | 
 | 5850 |  | 
 | 5851 | static void perf_pmu_start_txn(struct pmu *pmu) | 
 | 5852 | { | 
 | 5853 | 	perf_pmu_disable(pmu); | 
 | 5854 | } | 
 | 5855 |  | 
 | 5856 | static int perf_pmu_commit_txn(struct pmu *pmu) | 
 | 5857 | { | 
 | 5858 | 	perf_pmu_enable(pmu); | 
 | 5859 | 	return 0; | 
 | 5860 | } | 
 | 5861 |  | 
 | 5862 | static void perf_pmu_cancel_txn(struct pmu *pmu) | 
 | 5863 | { | 
 | 5864 | 	perf_pmu_enable(pmu); | 
 | 5865 | } | 
 | 5866 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 5867 | /* | 
 | 5868 |  * Ensures all contexts with the same task_ctx_nr have the same | 
 | 5869 |  * pmu_cpu_context too. | 
 | 5870 |  */ | 
 | 5871 | static void *find_pmu_context(int ctxn) | 
 | 5872 | { | 
 | 5873 | 	struct pmu *pmu; | 
 | 5874 |  | 
 | 5875 | 	if (ctxn < 0) | 
 | 5876 | 		return NULL; | 
 | 5877 |  | 
 | 5878 | 	list_for_each_entry(pmu, &pmus, entry) { | 
 | 5879 | 		if (pmu->task_ctx_nr == ctxn) | 
 | 5880 | 			return pmu->pmu_cpu_context; | 
 | 5881 | 	} | 
 | 5882 |  | 
 | 5883 | 	return NULL; | 
 | 5884 | } | 
 | 5885 |  | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 5886 | static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 5887 | { | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 5888 | 	int cpu; | 
 | 5889 |  | 
 | 5890 | 	for_each_possible_cpu(cpu) { | 
 | 5891 | 		struct perf_cpu_context *cpuctx; | 
 | 5892 |  | 
 | 5893 | 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); | 
 | 5894 |  | 
 | 5895 | 		if (cpuctx->active_pmu == old_pmu) | 
 | 5896 | 			cpuctx->active_pmu = pmu; | 
 | 5897 | 	} | 
 | 5898 | } | 
 | 5899 |  | 
 | 5900 | static void free_pmu_context(struct pmu *pmu) | 
 | 5901 | { | 
 | 5902 | 	struct pmu *i; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 5903 |  | 
 | 5904 | 	mutex_lock(&pmus_lock); | 
 | 5905 | 	/* | 
 | 5906 | 	 * Like a real lame refcount. | 
 | 5907 | 	 */ | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 5908 | 	list_for_each_entry(i, &pmus, entry) { | 
 | 5909 | 		if (i->pmu_cpu_context == pmu->pmu_cpu_context) { | 
 | 5910 | 			update_pmu_context(i, pmu); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 5911 | 			goto out; | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 5912 | 		} | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 5913 | 	} | 
 | 5914 |  | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 5915 | 	free_percpu(pmu->pmu_cpu_context); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 5916 | out: | 
 | 5917 | 	mutex_unlock(&pmus_lock); | 
 | 5918 | } | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 5919 | static struct idr pmu_idr; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 5920 |  | 
| Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 5921 | static ssize_t | 
 | 5922 | type_show(struct device *dev, struct device_attribute *attr, char *page) | 
 | 5923 | { | 
 | 5924 | 	struct pmu *pmu = dev_get_drvdata(dev); | 
 | 5925 |  | 
 | 5926 | 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); | 
 | 5927 | } | 
 | 5928 |  | 
 | 5929 | static struct device_attribute pmu_dev_attrs[] = { | 
 | 5930 |        __ATTR_RO(type), | 
 | 5931 |        __ATTR_NULL, | 
 | 5932 | }; | 
 | 5933 |  | 
 | 5934 | static int pmu_bus_running; | 
 | 5935 | static struct bus_type pmu_bus = { | 
 | 5936 | 	.name		= "event_source", | 
 | 5937 | 	.dev_attrs	= pmu_dev_attrs, | 
 | 5938 | }; | 
 | 5939 |  | 
 | 5940 | static void pmu_dev_release(struct device *dev) | 
 | 5941 | { | 
 | 5942 | 	kfree(dev); | 
 | 5943 | } | 
 | 5944 |  | 
 | 5945 | static int pmu_dev_alloc(struct pmu *pmu) | 
 | 5946 | { | 
 | 5947 | 	int ret = -ENOMEM; | 
 | 5948 |  | 
 | 5949 | 	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); | 
 | 5950 | 	if (!pmu->dev) | 
 | 5951 | 		goto out; | 
 | 5952 |  | 
 | 5953 | 	device_initialize(pmu->dev); | 
 | 5954 | 	ret = dev_set_name(pmu->dev, "%s", pmu->name); | 
 | 5955 | 	if (ret) | 
 | 5956 | 		goto free_dev; | 
 | 5957 |  | 
 | 5958 | 	dev_set_drvdata(pmu->dev, pmu); | 
 | 5959 | 	pmu->dev->bus = &pmu_bus; | 
 | 5960 | 	pmu->dev->release = pmu_dev_release; | 
 | 5961 | 	ret = device_add(pmu->dev); | 
 | 5962 | 	if (ret) | 
 | 5963 | 		goto free_dev; | 
 | 5964 |  | 
 | 5965 | out: | 
 | 5966 | 	return ret; | 
 | 5967 |  | 
 | 5968 | free_dev: | 
 | 5969 | 	put_device(pmu->dev); | 
 | 5970 | 	goto out; | 
 | 5971 | } | 
 | 5972 |  | 
| Peter Zijlstra | 547e9fd | 2011-01-19 12:51:39 +0100 | [diff] [blame] | 5973 | static struct lock_class_key cpuctx_mutex; | 
 | 5974 |  | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 5975 | int perf_pmu_register(struct pmu *pmu, char *name, int type) | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5976 | { | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 5977 | 	int cpu, ret; | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 5978 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 5979 | 	mutex_lock(&pmus_lock); | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 5980 | 	ret = -ENOMEM; | 
 | 5981 | 	pmu->pmu_disable_count = alloc_percpu(int); | 
 | 5982 | 	if (!pmu->pmu_disable_count) | 
 | 5983 | 		goto unlock; | 
| Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 5984 |  | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 5985 | 	pmu->type = -1; | 
 | 5986 | 	if (!name) | 
 | 5987 | 		goto skip_type; | 
 | 5988 | 	pmu->name = name; | 
 | 5989 |  | 
 | 5990 | 	if (type < 0) { | 
 | 5991 | 		int err = idr_pre_get(&pmu_idr, GFP_KERNEL); | 
 | 5992 | 		if (!err) | 
 | 5993 | 			goto free_pdc; | 
 | 5994 |  | 
 | 5995 | 		err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type); | 
 | 5996 | 		if (err) { | 
 | 5997 | 			ret = err; | 
 | 5998 | 			goto free_pdc; | 
 | 5999 | 		} | 
 | 6000 | 	} | 
 | 6001 | 	pmu->type = type; | 
 | 6002 |  | 
| Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 6003 | 	if (pmu_bus_running) { | 
 | 6004 | 		ret = pmu_dev_alloc(pmu); | 
 | 6005 | 		if (ret) | 
 | 6006 | 			goto free_idr; | 
 | 6007 | 	} | 
 | 6008 |  | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 6009 | skip_type: | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6010 | 	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); | 
 | 6011 | 	if (pmu->pmu_cpu_context) | 
 | 6012 | 		goto got_cpu_context; | 
 | 6013 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 6014 | 	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); | 
 | 6015 | 	if (!pmu->pmu_cpu_context) | 
| Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 6016 | 		goto free_dev; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 6017 |  | 
 | 6018 | 	for_each_possible_cpu(cpu) { | 
 | 6019 | 		struct perf_cpu_context *cpuctx; | 
 | 6020 |  | 
 | 6021 | 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); | 
| Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 6022 | 		__perf_event_init_context(&cpuctx->ctx); | 
| Peter Zijlstra | 547e9fd | 2011-01-19 12:51:39 +0100 | [diff] [blame] | 6023 | 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 6024 | 		cpuctx->ctx.type = cpu_context; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 6025 | 		cpuctx->ctx.pmu = pmu; | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 6026 | 		cpuctx->jiffies_interval = 1; | 
 | 6027 | 		INIT_LIST_HEAD(&cpuctx->rotation_list); | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 6028 | 		cpuctx->active_pmu = pmu; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 6029 | 	} | 
 | 6030 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6031 | got_cpu_context: | 
| Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 6032 | 	if (!pmu->start_txn) { | 
 | 6033 | 		if (pmu->pmu_enable) { | 
 | 6034 | 			/* | 
 | 6035 | 			 * If we have pmu_enable/pmu_disable calls, install | 
 | 6036 | 			 * transaction stubs that use that to try and batch | 
 | 6037 | 			 * hardware accesses. | 
 | 6038 | 			 */ | 
 | 6039 | 			pmu->start_txn  = perf_pmu_start_txn; | 
 | 6040 | 			pmu->commit_txn = perf_pmu_commit_txn; | 
 | 6041 | 			pmu->cancel_txn = perf_pmu_cancel_txn; | 
 | 6042 | 		} else { | 
 | 6043 | 			pmu->start_txn  = perf_pmu_nop_void; | 
 | 6044 | 			pmu->commit_txn = perf_pmu_nop_int; | 
 | 6045 | 			pmu->cancel_txn = perf_pmu_nop_void; | 
 | 6046 | 		} | 
 | 6047 | 	} | 
 | 6048 |  | 
 | 6049 | 	if (!pmu->pmu_enable) { | 
 | 6050 | 		pmu->pmu_enable  = perf_pmu_nop_void; | 
 | 6051 | 		pmu->pmu_disable = perf_pmu_nop_void; | 
 | 6052 | 	} | 
 | 6053 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6054 | 	list_add_rcu(&pmu->entry, &pmus); | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 6055 | 	ret = 0; | 
 | 6056 | unlock: | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6057 | 	mutex_unlock(&pmus_lock); | 
 | 6058 |  | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 6059 | 	return ret; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 6060 |  | 
| Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 6061 | free_dev: | 
 | 6062 | 	device_del(pmu->dev); | 
 | 6063 | 	put_device(pmu->dev); | 
 | 6064 |  | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 6065 | free_idr: | 
 | 6066 | 	if (pmu->type >= PERF_TYPE_MAX) | 
 | 6067 | 		idr_remove(&pmu_idr, pmu->type); | 
 | 6068 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 6069 | free_pdc: | 
 | 6070 | 	free_percpu(pmu->pmu_disable_count); | 
 | 6071 | 	goto unlock; | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6072 | } | 
 | 6073 |  | 
 | 6074 | void perf_pmu_unregister(struct pmu *pmu) | 
 | 6075 | { | 
 | 6076 | 	mutex_lock(&pmus_lock); | 
 | 6077 | 	list_del_rcu(&pmu->entry); | 
 | 6078 | 	mutex_unlock(&pmus_lock); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6079 |  | 
 | 6080 | 	/* | 
| Peter Zijlstra | cde8e88 | 2010-09-13 11:06:55 +0200 | [diff] [blame] | 6081 | 	 * We dereference the pmu list under both SRCU and regular RCU, so | 
 | 6082 | 	 * synchronize against both of those. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6083 | 	 */ | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6084 | 	synchronize_srcu(&pmus_srcu); | 
| Peter Zijlstra | cde8e88 | 2010-09-13 11:06:55 +0200 | [diff] [blame] | 6085 | 	synchronize_rcu(); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6086 |  | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 6087 | 	free_percpu(pmu->pmu_disable_count); | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 6088 | 	if (pmu->type >= PERF_TYPE_MAX) | 
 | 6089 | 		idr_remove(&pmu_idr, pmu->type); | 
| Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 6090 | 	device_del(pmu->dev); | 
 | 6091 | 	put_device(pmu->dev); | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 6092 | 	free_pmu_context(pmu); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6093 | } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6094 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6095 | struct pmu *perf_init_event(struct perf_event *event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6096 | { | 
| Peter Zijlstra | 51b0fe3 | 2010-06-11 13:35:57 +0200 | [diff] [blame] | 6097 | 	struct pmu *pmu = NULL; | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6098 | 	int idx; | 
| Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 6099 | 	int ret; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 6100 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6101 | 	idx = srcu_read_lock(&pmus_srcu); | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 6102 |  | 
 | 6103 | 	rcu_read_lock(); | 
 | 6104 | 	pmu = idr_find(&pmu_idr, event->attr.type); | 
 | 6105 | 	rcu_read_unlock(); | 
| Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 6106 | 	if (pmu) { | 
 | 6107 | 		ret = pmu->event_init(event); | 
 | 6108 | 		if (ret) | 
 | 6109 | 			pmu = ERR_PTR(ret); | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 6110 | 		goto unlock; | 
| Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 6111 | 	} | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 6112 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6113 | 	list_for_each_entry_rcu(pmu, &pmus, entry) { | 
| Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 6114 | 		ret = pmu->event_init(event); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6115 | 		if (!ret) | 
| Peter Zijlstra | e5f4d33 | 2010-09-10 17:38:06 +0200 | [diff] [blame] | 6116 | 			goto unlock; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 6117 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6118 | 		if (ret != -ENOENT) { | 
 | 6119 | 			pmu = ERR_PTR(ret); | 
| Peter Zijlstra | e5f4d33 | 2010-09-10 17:38:06 +0200 | [diff] [blame] | 6120 | 			goto unlock; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6121 | 		} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6122 | 	} | 
| Peter Zijlstra | e5f4d33 | 2010-09-10 17:38:06 +0200 | [diff] [blame] | 6123 | 	pmu = ERR_PTR(-ENOENT); | 
 | 6124 | unlock: | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6125 | 	srcu_read_unlock(&pmus_srcu, idx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6126 |  | 
 | 6127 | 	return pmu; | 
 | 6128 | } | 
 | 6129 |  | 
 | 6130 | /* | 
 | 6131 |  * Allocate and initialize a event structure | 
 | 6132 |  */ | 
 | 6133 | static struct perf_event * | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6134 | perf_event_alloc(struct perf_event_attr *attr, int cpu, | 
| Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 6135 | 		 struct task_struct *task, | 
 | 6136 | 		 struct perf_event *group_leader, | 
 | 6137 | 		 struct perf_event *parent_event, | 
 | 6138 | 		 perf_overflow_handler_t overflow_handler) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6139 | { | 
| Peter Zijlstra | 51b0fe3 | 2010-06-11 13:35:57 +0200 | [diff] [blame] | 6140 | 	struct pmu *pmu; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6141 | 	struct perf_event *event; | 
 | 6142 | 	struct hw_perf_event *hwc; | 
 | 6143 | 	long err; | 
 | 6144 |  | 
| Oleg Nesterov | 66832eb | 2011-01-18 17:10:32 +0100 | [diff] [blame] | 6145 | 	if ((unsigned)cpu >= nr_cpu_ids) { | 
 | 6146 | 		if (!task || cpu != -1) | 
 | 6147 | 			return ERR_PTR(-EINVAL); | 
 | 6148 | 	} | 
 | 6149 |  | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6150 | 	event = kzalloc(sizeof(*event), GFP_KERNEL); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6151 | 	if (!event) | 
 | 6152 | 		return ERR_PTR(-ENOMEM); | 
 | 6153 |  | 
 | 6154 | 	/* | 
 | 6155 | 	 * Single events are their own group leaders, with an | 
 | 6156 | 	 * empty sibling list: | 
 | 6157 | 	 */ | 
 | 6158 | 	if (!group_leader) | 
 | 6159 | 		group_leader = event; | 
 | 6160 |  | 
 | 6161 | 	mutex_init(&event->child_mutex); | 
 | 6162 | 	INIT_LIST_HEAD(&event->child_list); | 
 | 6163 |  | 
 | 6164 | 	INIT_LIST_HEAD(&event->group_entry); | 
 | 6165 | 	INIT_LIST_HEAD(&event->event_entry); | 
 | 6166 | 	INIT_LIST_HEAD(&event->sibling_list); | 
 | 6167 | 	init_waitqueue_head(&event->waitq); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 6168 | 	init_irq_work(&event->pending, perf_pending_event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6169 |  | 
 | 6170 | 	mutex_init(&event->mmap_mutex); | 
 | 6171 |  | 
 | 6172 | 	event->cpu		= cpu; | 
 | 6173 | 	event->attr		= *attr; | 
 | 6174 | 	event->group_leader	= group_leader; | 
 | 6175 | 	event->pmu		= NULL; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6176 | 	event->oncpu		= -1; | 
 | 6177 |  | 
 | 6178 | 	event->parent		= parent_event; | 
 | 6179 |  | 
 | 6180 | 	event->ns		= get_pid_ns(current->nsproxy->pid_ns); | 
 | 6181 | 	event->id		= atomic64_inc_return(&perf_event_id); | 
 | 6182 |  | 
 | 6183 | 	event->state		= PERF_EVENT_STATE_INACTIVE; | 
 | 6184 |  | 
| Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 6185 | 	if (task) { | 
 | 6186 | 		event->attach_state = PERF_ATTACH_TASK; | 
 | 6187 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 
 | 6188 | 		/* | 
 | 6189 | 		 * hw_breakpoint is a bit difficult here.. | 
 | 6190 | 		 */ | 
 | 6191 | 		if (attr->type == PERF_TYPE_BREAKPOINT) | 
 | 6192 | 			event->hw.bp_target = task; | 
 | 6193 | #endif | 
 | 6194 | 	} | 
 | 6195 |  | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 6196 | 	if (!overflow_handler && parent_event) | 
 | 6197 | 		overflow_handler = parent_event->overflow_handler; | 
| Oleg Nesterov | 66832eb | 2011-01-18 17:10:32 +0100 | [diff] [blame] | 6198 |  | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 6199 | 	event->overflow_handler	= overflow_handler; | 
| Frederic Weisbecker | 97eaf53 | 2009-10-18 15:33:50 +0200 | [diff] [blame] | 6200 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6201 | 	if (attr->disabled) | 
 | 6202 | 		event->state = PERF_EVENT_STATE_OFF; | 
 | 6203 |  | 
 | 6204 | 	pmu = NULL; | 
 | 6205 |  | 
 | 6206 | 	hwc = &event->hw; | 
 | 6207 | 	hwc->sample_period = attr->sample_period; | 
 | 6208 | 	if (attr->freq && attr->sample_freq) | 
 | 6209 | 		hwc->sample_period = 1; | 
 | 6210 | 	hwc->last_period = hwc->sample_period; | 
 | 6211 |  | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 6212 | 	local64_set(&hwc->period_left, hwc->sample_period); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6213 |  | 
 | 6214 | 	/* | 
 | 6215 | 	 * we currently do not support PERF_FORMAT_GROUP on inherited events | 
 | 6216 | 	 */ | 
 | 6217 | 	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) | 
 | 6218 | 		goto done; | 
 | 6219 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 6220 | 	pmu = perf_init_event(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6221 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6222 | done: | 
 | 6223 | 	err = 0; | 
 | 6224 | 	if (!pmu) | 
 | 6225 | 		err = -EINVAL; | 
 | 6226 | 	else if (IS_ERR(pmu)) | 
 | 6227 | 		err = PTR_ERR(pmu); | 
 | 6228 |  | 
 | 6229 | 	if (err) { | 
 | 6230 | 		if (event->ns) | 
 | 6231 | 			put_pid_ns(event->ns); | 
 | 6232 | 		kfree(event); | 
 | 6233 | 		return ERR_PTR(err); | 
 | 6234 | 	} | 
 | 6235 |  | 
 | 6236 | 	event->pmu = pmu; | 
 | 6237 |  | 
 | 6238 | 	if (!event->parent) { | 
| Peter Zijlstra | 82cd6de | 2010-10-14 17:57:23 +0200 | [diff] [blame] | 6239 | 		if (event->attach_state & PERF_ATTACH_TASK) | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 6240 | 			jump_label_inc(&perf_sched_events); | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 6241 | 		if (event->attr.mmap || event->attr.mmap_data) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6242 | 			atomic_inc(&nr_mmap_events); | 
 | 6243 | 		if (event->attr.comm) | 
 | 6244 | 			atomic_inc(&nr_comm_events); | 
 | 6245 | 		if (event->attr.task) | 
 | 6246 | 			atomic_inc(&nr_task_events); | 
| Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 6247 | 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { | 
 | 6248 | 			err = get_callchain_buffers(); | 
 | 6249 | 			if (err) { | 
 | 6250 | 				free_event(event); | 
 | 6251 | 				return ERR_PTR(err); | 
 | 6252 | 			} | 
 | 6253 | 		} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6254 | 	} | 
 | 6255 |  | 
 | 6256 | 	return event; | 
 | 6257 | } | 
 | 6258 |  | 
 | 6259 | static int perf_copy_attr(struct perf_event_attr __user *uattr, | 
 | 6260 | 			  struct perf_event_attr *attr) | 
 | 6261 | { | 
 | 6262 | 	u32 size; | 
 | 6263 | 	int ret; | 
 | 6264 |  | 
 | 6265 | 	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) | 
 | 6266 | 		return -EFAULT; | 
 | 6267 |  | 
 | 6268 | 	/* | 
 | 6269 | 	 * zero the full structure, so that a short copy will be nice. | 
 | 6270 | 	 */ | 
 | 6271 | 	memset(attr, 0, sizeof(*attr)); | 
 | 6272 |  | 
 | 6273 | 	ret = get_user(size, &uattr->size); | 
 | 6274 | 	if (ret) | 
 | 6275 | 		return ret; | 
 | 6276 |  | 
 | 6277 | 	if (size > PAGE_SIZE)	/* silly large */ | 
 | 6278 | 		goto err_size; | 
 | 6279 |  | 
 | 6280 | 	if (!size)		/* abi compat */ | 
 | 6281 | 		size = PERF_ATTR_SIZE_VER0; | 
 | 6282 |  | 
 | 6283 | 	if (size < PERF_ATTR_SIZE_VER0) | 
 | 6284 | 		goto err_size; | 
 | 6285 |  | 
 | 6286 | 	/* | 
 | 6287 | 	 * If we're handed a bigger struct than we know of, | 
 | 6288 | 	 * ensure all the unknown bits are 0 - i.e. new | 
 | 6289 | 	 * user-space does not rely on any kernel feature | 
 | 6290 | 	 * extensions we dont know about yet. | 
 | 6291 | 	 */ | 
 | 6292 | 	if (size > sizeof(*attr)) { | 
 | 6293 | 		unsigned char __user *addr; | 
 | 6294 | 		unsigned char __user *end; | 
 | 6295 | 		unsigned char val; | 
 | 6296 |  | 
 | 6297 | 		addr = (void __user *)uattr + sizeof(*attr); | 
 | 6298 | 		end  = (void __user *)uattr + size; | 
 | 6299 |  | 
 | 6300 | 		for (; addr < end; addr++) { | 
 | 6301 | 			ret = get_user(val, addr); | 
 | 6302 | 			if (ret) | 
 | 6303 | 				return ret; | 
 | 6304 | 			if (val) | 
 | 6305 | 				goto err_size; | 
 | 6306 | 		} | 
 | 6307 | 		size = sizeof(*attr); | 
 | 6308 | 	} | 
 | 6309 |  | 
 | 6310 | 	ret = copy_from_user(attr, uattr, size); | 
 | 6311 | 	if (ret) | 
 | 6312 | 		return -EFAULT; | 
 | 6313 |  | 
 | 6314 | 	/* | 
 | 6315 | 	 * If the type exists, the corresponding creation will verify | 
 | 6316 | 	 * the attr->config. | 
 | 6317 | 	 */ | 
 | 6318 | 	if (attr->type >= PERF_TYPE_MAX) | 
 | 6319 | 		return -EINVAL; | 
 | 6320 |  | 
| Mahesh Salgaonkar | cd75764 | 2010-01-30 10:25:18 +0530 | [diff] [blame] | 6321 | 	if (attr->__reserved_1) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6322 | 		return -EINVAL; | 
 | 6323 |  | 
 | 6324 | 	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | 
 | 6325 | 		return -EINVAL; | 
 | 6326 |  | 
 | 6327 | 	if (attr->read_format & ~(PERF_FORMAT_MAX-1)) | 
 | 6328 | 		return -EINVAL; | 
 | 6329 |  | 
 | 6330 | out: | 
 | 6331 | 	return ret; | 
 | 6332 |  | 
 | 6333 | err_size: | 
 | 6334 | 	put_user(sizeof(*attr), &uattr->size); | 
 | 6335 | 	ret = -E2BIG; | 
 | 6336 | 	goto out; | 
 | 6337 | } | 
 | 6338 |  | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6339 | static int | 
 | 6340 | perf_event_set_output(struct perf_event *event, struct perf_event *output_event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6341 | { | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 6342 | 	struct perf_buffer *buffer = NULL, *old_buffer = NULL; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6343 | 	int ret = -EINVAL; | 
 | 6344 |  | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6345 | 	if (!output_event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6346 | 		goto set; | 
 | 6347 |  | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6348 | 	/* don't allow circular references */ | 
 | 6349 | 	if (event == output_event) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6350 | 		goto out; | 
 | 6351 |  | 
| Peter Zijlstra | 0f13930 | 2010-05-20 14:35:15 +0200 | [diff] [blame] | 6352 | 	/* | 
 | 6353 | 	 * Don't allow cross-cpu buffers | 
 | 6354 | 	 */ | 
 | 6355 | 	if (output_event->cpu != event->cpu) | 
 | 6356 | 		goto out; | 
 | 6357 |  | 
 | 6358 | 	/* | 
 | 6359 | 	 * If its not a per-cpu buffer, it must be the same task. | 
 | 6360 | 	 */ | 
 | 6361 | 	if (output_event->cpu == -1 && output_event->ctx != event->ctx) | 
 | 6362 | 		goto out; | 
 | 6363 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6364 | set: | 
 | 6365 | 	mutex_lock(&event->mmap_mutex); | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6366 | 	/* Can't redirect output if we've got an active mmap() */ | 
 | 6367 | 	if (atomic_read(&event->mmap_count)) | 
 | 6368 | 		goto unlock; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6369 |  | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6370 | 	if (output_event) { | 
 | 6371 | 		/* get the buffer we want to redirect to */ | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 6372 | 		buffer = perf_buffer_get(output_event); | 
 | 6373 | 		if (!buffer) | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6374 | 			goto unlock; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6375 | 	} | 
 | 6376 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 6377 | 	old_buffer = event->buffer; | 
 | 6378 | 	rcu_assign_pointer(event->buffer, buffer); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6379 | 	ret = 0; | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6380 | unlock: | 
 | 6381 | 	mutex_unlock(&event->mmap_mutex); | 
 | 6382 |  | 
| Peter Zijlstra | ca5135e | 2010-05-28 19:33:23 +0200 | [diff] [blame] | 6383 | 	if (old_buffer) | 
 | 6384 | 		perf_buffer_put(old_buffer); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6385 | out: | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6386 | 	return ret; | 
 | 6387 | } | 
 | 6388 |  | 
 | 6389 | /** | 
 | 6390 |  * sys_perf_event_open - open a performance event, associate it to a task/cpu | 
 | 6391 |  * | 
 | 6392 |  * @attr_uptr:	event_id type attributes for monitoring/sampling | 
 | 6393 |  * @pid:		target pid | 
 | 6394 |  * @cpu:		target cpu | 
 | 6395 |  * @group_fd:		group leader event fd | 
 | 6396 |  */ | 
 | 6397 | SYSCALL_DEFINE5(perf_event_open, | 
 | 6398 | 		struct perf_event_attr __user *, attr_uptr, | 
 | 6399 | 		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) | 
 | 6400 | { | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 6401 | 	struct perf_event *group_leader = NULL, *output_event = NULL; | 
 | 6402 | 	struct perf_event *event, *sibling; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6403 | 	struct perf_event_attr attr; | 
 | 6404 | 	struct perf_event_context *ctx; | 
 | 6405 | 	struct file *event_file = NULL; | 
 | 6406 | 	struct file *group_file = NULL; | 
| Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 6407 | 	struct task_struct *task = NULL; | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 6408 | 	struct pmu *pmu; | 
| Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 6409 | 	int event_fd; | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 6410 | 	int move_group = 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6411 | 	int fput_needed = 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6412 | 	int err; | 
 | 6413 |  | 
 | 6414 | 	/* for future expandability... */ | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 6415 | 	if (flags & ~PERF_FLAG_ALL) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6416 | 		return -EINVAL; | 
 | 6417 |  | 
 | 6418 | 	err = perf_copy_attr(attr_uptr, &attr); | 
 | 6419 | 	if (err) | 
 | 6420 | 		return err; | 
 | 6421 |  | 
 | 6422 | 	if (!attr.exclude_kernel) { | 
 | 6423 | 		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) | 
 | 6424 | 			return -EACCES; | 
 | 6425 | 	} | 
 | 6426 |  | 
 | 6427 | 	if (attr.freq) { | 
 | 6428 | 		if (attr.sample_freq > sysctl_perf_event_sample_rate) | 
 | 6429 | 			return -EINVAL; | 
 | 6430 | 	} | 
 | 6431 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 6432 | 	/* | 
 | 6433 | 	 * In cgroup mode, the pid argument is used to pass the fd | 
 | 6434 | 	 * opened to the cgroup directory in cgroupfs. The cpu argument | 
 | 6435 | 	 * designates the cpu on which to monitor threads from that | 
 | 6436 | 	 * cgroup. | 
 | 6437 | 	 */ | 
 | 6438 | 	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) | 
 | 6439 | 		return -EINVAL; | 
 | 6440 |  | 
| Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 6441 | 	event_fd = get_unused_fd_flags(O_RDWR); | 
 | 6442 | 	if (event_fd < 0) | 
 | 6443 | 		return event_fd; | 
 | 6444 |  | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6445 | 	if (group_fd != -1) { | 
 | 6446 | 		group_leader = perf_fget_light(group_fd, &fput_needed); | 
 | 6447 | 		if (IS_ERR(group_leader)) { | 
 | 6448 | 			err = PTR_ERR(group_leader); | 
| Stephane Eranian | d14b12d | 2010-09-17 11:28:47 +0200 | [diff] [blame] | 6449 | 			goto err_fd; | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6450 | 		} | 
 | 6451 | 		group_file = group_leader->filp; | 
 | 6452 | 		if (flags & PERF_FLAG_FD_OUTPUT) | 
 | 6453 | 			output_event = group_leader; | 
 | 6454 | 		if (flags & PERF_FLAG_FD_NO_GROUP) | 
 | 6455 | 			group_leader = NULL; | 
 | 6456 | 	} | 
 | 6457 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 6458 | 	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { | 
| Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 6459 | 		task = find_lively_task_by_vpid(pid); | 
 | 6460 | 		if (IS_ERR(task)) { | 
 | 6461 | 			err = PTR_ERR(task); | 
 | 6462 | 			goto err_group_fd; | 
 | 6463 | 		} | 
 | 6464 | 	} | 
 | 6465 |  | 
| Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 6466 | 	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL); | 
| Stephane Eranian | d14b12d | 2010-09-17 11:28:47 +0200 | [diff] [blame] | 6467 | 	if (IS_ERR(event)) { | 
 | 6468 | 		err = PTR_ERR(event); | 
| Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 6469 | 		goto err_task; | 
| Stephane Eranian | d14b12d | 2010-09-17 11:28:47 +0200 | [diff] [blame] | 6470 | 	} | 
 | 6471 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 6472 | 	if (flags & PERF_FLAG_PID_CGROUP) { | 
 | 6473 | 		err = perf_cgroup_connect(pid, event, &attr, group_leader); | 
 | 6474 | 		if (err) | 
 | 6475 | 			goto err_alloc; | 
| Peter Zijlstra | 0830937 | 2011-03-03 11:31:20 +0100 | [diff] [blame] | 6476 | 		/* | 
 | 6477 | 		 * one more event: | 
 | 6478 | 		 * - that has cgroup constraint on event->cpu | 
 | 6479 | 		 * - that may need work on context switch | 
 | 6480 | 		 */ | 
 | 6481 | 		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); | 
 | 6482 | 		jump_label_inc(&perf_sched_events); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 6483 | 	} | 
 | 6484 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6485 | 	/* | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 6486 | 	 * Special case software events and allow them to be part of | 
 | 6487 | 	 * any hardware group. | 
 | 6488 | 	 */ | 
 | 6489 | 	pmu = event->pmu; | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 6490 |  | 
 | 6491 | 	if (group_leader && | 
 | 6492 | 	    (is_software_event(event) != is_software_event(group_leader))) { | 
 | 6493 | 		if (is_software_event(event)) { | 
 | 6494 | 			/* | 
 | 6495 | 			 * If event and group_leader are not both a software | 
 | 6496 | 			 * event, and event is, then group leader is not. | 
 | 6497 | 			 * | 
 | 6498 | 			 * Allow the addition of software events to !software | 
 | 6499 | 			 * groups, this is safe because software events never | 
 | 6500 | 			 * fail to schedule. | 
 | 6501 | 			 */ | 
 | 6502 | 			pmu = group_leader->pmu; | 
 | 6503 | 		} else if (is_software_event(group_leader) && | 
 | 6504 | 			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { | 
 | 6505 | 			/* | 
 | 6506 | 			 * In case the group is a pure software group, and we | 
 | 6507 | 			 * try to add a hardware event, move the whole group to | 
 | 6508 | 			 * the hardware context. | 
 | 6509 | 			 */ | 
 | 6510 | 			move_group = 1; | 
 | 6511 | 		} | 
 | 6512 | 	} | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 6513 |  | 
 | 6514 | 	/* | 
 | 6515 | 	 * Get the target context (task or percpu): | 
 | 6516 | 	 */ | 
| Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 6517 | 	ctx = find_get_context(pmu, task, cpu); | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 6518 | 	if (IS_ERR(ctx)) { | 
 | 6519 | 		err = PTR_ERR(ctx); | 
| Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 6520 | 		goto err_alloc; | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 6521 | 	} | 
 | 6522 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6523 | 	/* | 
 | 6524 | 	 * Look up the group leader (we will attach this event to it): | 
 | 6525 | 	 */ | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6526 | 	if (group_leader) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6527 | 		err = -EINVAL; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6528 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6529 | 		/* | 
 | 6530 | 		 * Do not allow a recursive hierarchy (this new sibling | 
 | 6531 | 		 * becoming part of another group-sibling): | 
 | 6532 | 		 */ | 
 | 6533 | 		if (group_leader->group_leader != group_leader) | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6534 | 			goto err_context; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6535 | 		/* | 
 | 6536 | 		 * Do not allow to attach to a group in a different | 
 | 6537 | 		 * task or CPU context: | 
 | 6538 | 		 */ | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 6539 | 		if (move_group) { | 
 | 6540 | 			if (group_leader->ctx->type != ctx->type) | 
 | 6541 | 				goto err_context; | 
 | 6542 | 		} else { | 
 | 6543 | 			if (group_leader->ctx != ctx) | 
 | 6544 | 				goto err_context; | 
 | 6545 | 		} | 
 | 6546 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6547 | 		/* | 
 | 6548 | 		 * Only a group leader can be exclusive or pinned | 
 | 6549 | 		 */ | 
 | 6550 | 		if (attr.exclusive || attr.pinned) | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6551 | 			goto err_context; | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6552 | 	} | 
 | 6553 |  | 
 | 6554 | 	if (output_event) { | 
 | 6555 | 		err = perf_event_set_output(event, output_event); | 
 | 6556 | 		if (err) | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6557 | 			goto err_context; | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6558 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6559 |  | 
| Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 6560 | 	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); | 
 | 6561 | 	if (IS_ERR(event_file)) { | 
 | 6562 | 		err = PTR_ERR(event_file); | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6563 | 		goto err_context; | 
| Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 6564 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6565 |  | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 6566 | 	if (move_group) { | 
 | 6567 | 		struct perf_event_context *gctx = group_leader->ctx; | 
 | 6568 |  | 
 | 6569 | 		mutex_lock(&gctx->mutex); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 6570 | 		perf_remove_from_context(group_leader); | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 6571 | 		list_for_each_entry(sibling, &group_leader->sibling_list, | 
 | 6572 | 				    group_entry) { | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 6573 | 			perf_remove_from_context(sibling); | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 6574 | 			put_ctx(gctx); | 
 | 6575 | 		} | 
 | 6576 | 		mutex_unlock(&gctx->mutex); | 
 | 6577 | 		put_ctx(gctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6578 | 	} | 
 | 6579 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6580 | 	event->filp = event_file; | 
 | 6581 | 	WARN_ON_ONCE(ctx->parent_ctx); | 
 | 6582 | 	mutex_lock(&ctx->mutex); | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 6583 |  | 
 | 6584 | 	if (move_group) { | 
 | 6585 | 		perf_install_in_context(ctx, group_leader, cpu); | 
 | 6586 | 		get_ctx(ctx); | 
 | 6587 | 		list_for_each_entry(sibling, &group_leader->sibling_list, | 
 | 6588 | 				    group_entry) { | 
 | 6589 | 			perf_install_in_context(ctx, sibling, cpu); | 
 | 6590 | 			get_ctx(ctx); | 
 | 6591 | 		} | 
 | 6592 | 	} | 
 | 6593 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6594 | 	perf_install_in_context(ctx, event, cpu); | 
 | 6595 | 	++ctx->generation; | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 6596 | 	perf_unpin_context(ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6597 | 	mutex_unlock(&ctx->mutex); | 
 | 6598 |  | 
 | 6599 | 	event->owner = current; | 
| Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 6600 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6601 | 	mutex_lock(¤t->perf_event_mutex); | 
 | 6602 | 	list_add_tail(&event->owner_entry, ¤t->perf_event_list); | 
 | 6603 | 	mutex_unlock(¤t->perf_event_mutex); | 
 | 6604 |  | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 6605 | 	/* | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 6606 | 	 * Precalculate sample_data sizes | 
 | 6607 | 	 */ | 
 | 6608 | 	perf_event__header_size(event); | 
| Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 6609 | 	perf_event__id_header_size(event); | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 6610 |  | 
 | 6611 | 	/* | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 6612 | 	 * Drop the reference on the group_event after placing the | 
 | 6613 | 	 * new event on the sibling_list. This ensures destruction | 
 | 6614 | 	 * of the group leader will find the pointer to itself in | 
 | 6615 | 	 * perf_group_detach(). | 
 | 6616 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6617 | 	fput_light(group_file, fput_needed); | 
| Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 6618 | 	fd_install(event_fd, event_file); | 
 | 6619 | 	return event_fd; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6620 |  | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6621 | err_context: | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 6622 | 	perf_unpin_context(ctx); | 
| Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 6623 | 	put_ctx(ctx); | 
| Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 6624 | err_alloc: | 
 | 6625 | 	free_event(event); | 
| Peter Zijlstra | e7d0bc0 | 2010-10-14 16:54:51 +0200 | [diff] [blame] | 6626 | err_task: | 
 | 6627 | 	if (task) | 
 | 6628 | 		put_task_struct(task); | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 6629 | err_group_fd: | 
 | 6630 | 	fput_light(group_file, fput_needed); | 
| Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 6631 | err_fd: | 
 | 6632 | 	put_unused_fd(event_fd); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6633 | 	return err; | 
 | 6634 | } | 
 | 6635 |  | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6636 | /** | 
 | 6637 |  * perf_event_create_kernel_counter | 
 | 6638 |  * | 
 | 6639 |  * @attr: attributes of the counter to create | 
 | 6640 |  * @cpu: cpu in which the counter is bound | 
| Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 6641 |  * @task: task to profile (NULL for percpu) | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6642 |  */ | 
 | 6643 | struct perf_event * | 
 | 6644 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | 
| Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 6645 | 				 struct task_struct *task, | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 6646 | 				 perf_overflow_handler_t overflow_handler) | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6647 | { | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6648 | 	struct perf_event_context *ctx; | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6649 | 	struct perf_event *event; | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6650 | 	int err; | 
 | 6651 |  | 
 | 6652 | 	/* | 
 | 6653 | 	 * Get the target context (task or percpu): | 
 | 6654 | 	 */ | 
 | 6655 |  | 
| Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 6656 | 	event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler); | 
| Frederic Weisbecker | c6567f6 | 2009-11-26 05:35:41 +0100 | [diff] [blame] | 6657 | 	if (IS_ERR(event)) { | 
 | 6658 | 		err = PTR_ERR(event); | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6659 | 		goto err; | 
 | 6660 | 	} | 
 | 6661 |  | 
| Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 6662 | 	ctx = find_get_context(event->pmu, task, cpu); | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6663 | 	if (IS_ERR(ctx)) { | 
 | 6664 | 		err = PTR_ERR(ctx); | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6665 | 		goto err_free; | 
| Frederic Weisbecker | c6567f6 | 2009-11-26 05:35:41 +0100 | [diff] [blame] | 6666 | 	} | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6667 |  | 
 | 6668 | 	event->filp = NULL; | 
 | 6669 | 	WARN_ON_ONCE(ctx->parent_ctx); | 
 | 6670 | 	mutex_lock(&ctx->mutex); | 
 | 6671 | 	perf_install_in_context(ctx, event, cpu); | 
 | 6672 | 	++ctx->generation; | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 6673 | 	perf_unpin_context(ctx); | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6674 | 	mutex_unlock(&ctx->mutex); | 
 | 6675 |  | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6676 | 	return event; | 
 | 6677 |  | 
| Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 6678 | err_free: | 
 | 6679 | 	free_event(event); | 
 | 6680 | err: | 
| Frederic Weisbecker | c6567f6 | 2009-11-26 05:35:41 +0100 | [diff] [blame] | 6681 | 	return ERR_PTR(err); | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 6682 | } | 
 | 6683 | EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); | 
 | 6684 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6685 | static void sync_child_event(struct perf_event *child_event, | 
 | 6686 | 			       struct task_struct *child) | 
 | 6687 | { | 
 | 6688 | 	struct perf_event *parent_event = child_event->parent; | 
 | 6689 | 	u64 child_val; | 
 | 6690 |  | 
 | 6691 | 	if (child_event->attr.inherit_stat) | 
 | 6692 | 		perf_event_read_event(child_event, child); | 
 | 6693 |  | 
| Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 6694 | 	child_val = perf_event_count(child_event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6695 |  | 
 | 6696 | 	/* | 
 | 6697 | 	 * Add back the child's count to the parent's count: | 
 | 6698 | 	 */ | 
| Peter Zijlstra | a6e6dea | 2010-05-21 14:27:58 +0200 | [diff] [blame] | 6699 | 	atomic64_add(child_val, &parent_event->child_count); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6700 | 	atomic64_add(child_event->total_time_enabled, | 
 | 6701 | 		     &parent_event->child_total_time_enabled); | 
 | 6702 | 	atomic64_add(child_event->total_time_running, | 
 | 6703 | 		     &parent_event->child_total_time_running); | 
 | 6704 |  | 
 | 6705 | 	/* | 
 | 6706 | 	 * Remove this event from the parent's list | 
 | 6707 | 	 */ | 
 | 6708 | 	WARN_ON_ONCE(parent_event->ctx->parent_ctx); | 
 | 6709 | 	mutex_lock(&parent_event->child_mutex); | 
 | 6710 | 	list_del_init(&child_event->child_list); | 
 | 6711 | 	mutex_unlock(&parent_event->child_mutex); | 
 | 6712 |  | 
 | 6713 | 	/* | 
 | 6714 | 	 * Release the parent event, if this was the last | 
 | 6715 | 	 * reference to it. | 
 | 6716 | 	 */ | 
 | 6717 | 	fput(parent_event->filp); | 
 | 6718 | } | 
 | 6719 |  | 
 | 6720 | static void | 
 | 6721 | __perf_event_exit_task(struct perf_event *child_event, | 
 | 6722 | 			 struct perf_event_context *child_ctx, | 
 | 6723 | 			 struct task_struct *child) | 
 | 6724 | { | 
| Peter Zijlstra | 38b435b | 2011-03-15 14:37:10 +0100 | [diff] [blame] | 6725 | 	if (child_event->parent) { | 
 | 6726 | 		raw_spin_lock_irq(&child_ctx->lock); | 
 | 6727 | 		perf_group_detach(child_event); | 
 | 6728 | 		raw_spin_unlock_irq(&child_ctx->lock); | 
 | 6729 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6730 |  | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 6731 | 	perf_remove_from_context(child_event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6732 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6733 | 	/* | 
| Peter Zijlstra | 38b435b | 2011-03-15 14:37:10 +0100 | [diff] [blame] | 6734 | 	 * It can happen that the parent exits first, and has events | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6735 | 	 * that are still around due to the child reference. These | 
| Peter Zijlstra | 38b435b | 2011-03-15 14:37:10 +0100 | [diff] [blame] | 6736 | 	 * events need to be zapped. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6737 | 	 */ | 
| Peter Zijlstra | 38b435b | 2011-03-15 14:37:10 +0100 | [diff] [blame] | 6738 | 	if (child_event->parent) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6739 | 		sync_child_event(child_event, child); | 
 | 6740 | 		free_event(child_event); | 
 | 6741 | 	} | 
 | 6742 | } | 
 | 6743 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6744 | static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6745 | { | 
 | 6746 | 	struct perf_event *child_event, *tmp; | 
 | 6747 | 	struct perf_event_context *child_ctx; | 
 | 6748 | 	unsigned long flags; | 
 | 6749 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6750 | 	if (likely(!child->perf_event_ctxp[ctxn])) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6751 | 		perf_event_task(child, NULL, 0); | 
 | 6752 | 		return; | 
 | 6753 | 	} | 
 | 6754 |  | 
 | 6755 | 	local_irq_save(flags); | 
 | 6756 | 	/* | 
 | 6757 | 	 * We can't reschedule here because interrupts are disabled, | 
 | 6758 | 	 * and either child is current or it is a task that can't be | 
 | 6759 | 	 * scheduled, so we are now safe from rescheduling changing | 
 | 6760 | 	 * our context. | 
 | 6761 | 	 */ | 
| Oleg Nesterov | 806839b | 2011-01-21 18:45:47 +0100 | [diff] [blame] | 6762 | 	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); | 
| Peter Zijlstra | 82cd6de | 2010-10-14 17:57:23 +0200 | [diff] [blame] | 6763 | 	task_ctx_sched_out(child_ctx, EVENT_ALL); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6764 |  | 
 | 6765 | 	/* | 
 | 6766 | 	 * Take the context lock here so that if find_get_context is | 
 | 6767 | 	 * reading child->perf_event_ctxp, we wait until it has | 
 | 6768 | 	 * incremented the context's refcount before we do put_ctx below. | 
 | 6769 | 	 */ | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 6770 | 	raw_spin_lock(&child_ctx->lock); | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6771 | 	child->perf_event_ctxp[ctxn] = NULL; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6772 | 	/* | 
 | 6773 | 	 * If this context is a clone; unclone it so it can't get | 
 | 6774 | 	 * swapped to another process while we're removing all | 
 | 6775 | 	 * the events from it. | 
 | 6776 | 	 */ | 
 | 6777 | 	unclone_ctx(child_ctx); | 
| Peter Zijlstra | 5e942bb | 2009-11-23 11:37:26 +0100 | [diff] [blame] | 6778 | 	update_context_time(child_ctx); | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 6779 | 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6780 |  | 
 | 6781 | 	/* | 
 | 6782 | 	 * Report the task dead after unscheduling the events so that we | 
 | 6783 | 	 * won't get any samples after PERF_RECORD_EXIT. We can however still | 
 | 6784 | 	 * get a few PERF_RECORD_READ events. | 
 | 6785 | 	 */ | 
 | 6786 | 	perf_event_task(child, child_ctx, 0); | 
 | 6787 |  | 
 | 6788 | 	/* | 
 | 6789 | 	 * We can recurse on the same lock type through: | 
 | 6790 | 	 * | 
 | 6791 | 	 *   __perf_event_exit_task() | 
 | 6792 | 	 *     sync_child_event() | 
 | 6793 | 	 *       fput(parent_event->filp) | 
 | 6794 | 	 *         perf_release() | 
 | 6795 | 	 *           mutex_lock(&ctx->mutex) | 
 | 6796 | 	 * | 
 | 6797 | 	 * But since its the parent context it won't be the same instance. | 
 | 6798 | 	 */ | 
| Peter Zijlstra | a0507c8 | 2010-05-06 15:42:53 +0200 | [diff] [blame] | 6799 | 	mutex_lock(&child_ctx->mutex); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6800 |  | 
 | 6801 | again: | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 6802 | 	list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups, | 
 | 6803 | 				 group_entry) | 
 | 6804 | 		__perf_event_exit_task(child_event, child_ctx, child); | 
 | 6805 |  | 
 | 6806 | 	list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6807 | 				 group_entry) | 
 | 6808 | 		__perf_event_exit_task(child_event, child_ctx, child); | 
 | 6809 |  | 
 | 6810 | 	/* | 
 | 6811 | 	 * If the last event was a group event, it will have appended all | 
 | 6812 | 	 * its siblings to the list, but we obtained 'tmp' before that which | 
 | 6813 | 	 * will still point to the list head terminating the iteration. | 
 | 6814 | 	 */ | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 6815 | 	if (!list_empty(&child_ctx->pinned_groups) || | 
 | 6816 | 	    !list_empty(&child_ctx->flexible_groups)) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6817 | 		goto again; | 
 | 6818 |  | 
 | 6819 | 	mutex_unlock(&child_ctx->mutex); | 
 | 6820 |  | 
 | 6821 | 	put_ctx(child_ctx); | 
 | 6822 | } | 
 | 6823 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6824 | /* | 
 | 6825 |  * When a child task exits, feed back event values to parent events. | 
 | 6826 |  */ | 
 | 6827 | void perf_event_exit_task(struct task_struct *child) | 
 | 6828 | { | 
| Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 6829 | 	struct perf_event *event, *tmp; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6830 | 	int ctxn; | 
 | 6831 |  | 
| Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 6832 | 	mutex_lock(&child->perf_event_mutex); | 
 | 6833 | 	list_for_each_entry_safe(event, tmp, &child->perf_event_list, | 
 | 6834 | 				 owner_entry) { | 
 | 6835 | 		list_del_init(&event->owner_entry); | 
 | 6836 |  | 
 | 6837 | 		/* | 
 | 6838 | 		 * Ensure the list deletion is visible before we clear | 
 | 6839 | 		 * the owner, closes a race against perf_release() where | 
 | 6840 | 		 * we need to serialize on the owner->perf_event_mutex. | 
 | 6841 | 		 */ | 
 | 6842 | 		smp_wmb(); | 
 | 6843 | 		event->owner = NULL; | 
 | 6844 | 	} | 
 | 6845 | 	mutex_unlock(&child->perf_event_mutex); | 
 | 6846 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6847 | 	for_each_task_context_nr(ctxn) | 
 | 6848 | 		perf_event_exit_task_context(child, ctxn); | 
 | 6849 | } | 
 | 6850 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 6851 | static void perf_free_event(struct perf_event *event, | 
 | 6852 | 			    struct perf_event_context *ctx) | 
 | 6853 | { | 
 | 6854 | 	struct perf_event *parent = event->parent; | 
 | 6855 |  | 
 | 6856 | 	if (WARN_ON_ONCE(!parent)) | 
 | 6857 | 		return; | 
 | 6858 |  | 
 | 6859 | 	mutex_lock(&parent->child_mutex); | 
 | 6860 | 	list_del_init(&event->child_list); | 
 | 6861 | 	mutex_unlock(&parent->child_mutex); | 
 | 6862 |  | 
 | 6863 | 	fput(parent->filp); | 
 | 6864 |  | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 6865 | 	perf_group_detach(event); | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 6866 | 	list_del_event(event, ctx); | 
 | 6867 | 	free_event(event); | 
 | 6868 | } | 
 | 6869 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6870 | /* | 
 | 6871 |  * free an unexposed, unused context as created by inheritance by | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6872 |  * perf_event_init_task below, used by fork() in case of fail. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6873 |  */ | 
 | 6874 | void perf_event_free_task(struct task_struct *task) | 
 | 6875 | { | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6876 | 	struct perf_event_context *ctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6877 | 	struct perf_event *event, *tmp; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6878 | 	int ctxn; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6879 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6880 | 	for_each_task_context_nr(ctxn) { | 
 | 6881 | 		ctx = task->perf_event_ctxp[ctxn]; | 
 | 6882 | 		if (!ctx) | 
 | 6883 | 			continue; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6884 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6885 | 		mutex_lock(&ctx->mutex); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6886 | again: | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6887 | 		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, | 
 | 6888 | 				group_entry) | 
 | 6889 | 			perf_free_event(event, ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6890 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6891 | 		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, | 
 | 6892 | 				group_entry) | 
 | 6893 | 			perf_free_event(event, ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6894 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6895 | 		if (!list_empty(&ctx->pinned_groups) || | 
 | 6896 | 				!list_empty(&ctx->flexible_groups)) | 
 | 6897 | 			goto again; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6898 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6899 | 		mutex_unlock(&ctx->mutex); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6900 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 6901 | 		put_ctx(ctx); | 
 | 6902 | 	} | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6903 | } | 
 | 6904 |  | 
| Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 6905 | void perf_event_delayed_put(struct task_struct *task) | 
 | 6906 | { | 
 | 6907 | 	int ctxn; | 
 | 6908 |  | 
 | 6909 | 	for_each_task_context_nr(ctxn) | 
 | 6910 | 		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); | 
 | 6911 | } | 
 | 6912 |  | 
| Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 6913 | /* | 
 | 6914 |  * inherit a event from parent task to child task: | 
 | 6915 |  */ | 
 | 6916 | static struct perf_event * | 
 | 6917 | inherit_event(struct perf_event *parent_event, | 
 | 6918 | 	      struct task_struct *parent, | 
 | 6919 | 	      struct perf_event_context *parent_ctx, | 
 | 6920 | 	      struct task_struct *child, | 
 | 6921 | 	      struct perf_event *group_leader, | 
 | 6922 | 	      struct perf_event_context *child_ctx) | 
 | 6923 | { | 
 | 6924 | 	struct perf_event *child_event; | 
| Peter Zijlstra | cee010e | 2010-09-10 12:51:54 +0200 | [diff] [blame] | 6925 | 	unsigned long flags; | 
| Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 6926 |  | 
 | 6927 | 	/* | 
 | 6928 | 	 * Instead of creating recursive hierarchies of events, | 
 | 6929 | 	 * we link inherited events back to the original parent, | 
 | 6930 | 	 * which has a filp for sure, which we use as the reference | 
 | 6931 | 	 * count: | 
 | 6932 | 	 */ | 
 | 6933 | 	if (parent_event->parent) | 
 | 6934 | 		parent_event = parent_event->parent; | 
 | 6935 |  | 
 | 6936 | 	child_event = perf_event_alloc(&parent_event->attr, | 
 | 6937 | 					   parent_event->cpu, | 
| Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 6938 | 					   child, | 
| Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 6939 | 					   group_leader, parent_event, | 
 | 6940 | 					   NULL); | 
 | 6941 | 	if (IS_ERR(child_event)) | 
 | 6942 | 		return child_event; | 
 | 6943 | 	get_ctx(child_ctx); | 
 | 6944 |  | 
 | 6945 | 	/* | 
 | 6946 | 	 * Make the child state follow the state of the parent event, | 
 | 6947 | 	 * not its attr.disabled bit.  We hold the parent's mutex, | 
 | 6948 | 	 * so we won't race with perf_event_{en, dis}able_family. | 
 | 6949 | 	 */ | 
 | 6950 | 	if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) | 
 | 6951 | 		child_event->state = PERF_EVENT_STATE_INACTIVE; | 
 | 6952 | 	else | 
 | 6953 | 		child_event->state = PERF_EVENT_STATE_OFF; | 
 | 6954 |  | 
 | 6955 | 	if (parent_event->attr.freq) { | 
 | 6956 | 		u64 sample_period = parent_event->hw.sample_period; | 
 | 6957 | 		struct hw_perf_event *hwc = &child_event->hw; | 
 | 6958 |  | 
 | 6959 | 		hwc->sample_period = sample_period; | 
 | 6960 | 		hwc->last_period   = sample_period; | 
 | 6961 |  | 
 | 6962 | 		local64_set(&hwc->period_left, sample_period); | 
 | 6963 | 	} | 
 | 6964 |  | 
 | 6965 | 	child_event->ctx = child_ctx; | 
 | 6966 | 	child_event->overflow_handler = parent_event->overflow_handler; | 
 | 6967 |  | 
 | 6968 | 	/* | 
| Thomas Gleixner | 614b678 | 2010-12-03 16:24:32 -0200 | [diff] [blame] | 6969 | 	 * Precalculate sample_data sizes | 
 | 6970 | 	 */ | 
 | 6971 | 	perf_event__header_size(child_event); | 
| Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 6972 | 	perf_event__id_header_size(child_event); | 
| Thomas Gleixner | 614b678 | 2010-12-03 16:24:32 -0200 | [diff] [blame] | 6973 |  | 
 | 6974 | 	/* | 
| Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 6975 | 	 * Link it up in the child's context: | 
 | 6976 | 	 */ | 
| Peter Zijlstra | cee010e | 2010-09-10 12:51:54 +0200 | [diff] [blame] | 6977 | 	raw_spin_lock_irqsave(&child_ctx->lock, flags); | 
| Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 6978 | 	add_event_to_ctx(child_event, child_ctx); | 
| Peter Zijlstra | cee010e | 2010-09-10 12:51:54 +0200 | [diff] [blame] | 6979 | 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags); | 
| Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 6980 |  | 
 | 6981 | 	/* | 
 | 6982 | 	 * Get a reference to the parent filp - we will fput it | 
 | 6983 | 	 * when the child event exits. This is safe to do because | 
 | 6984 | 	 * we are in the parent and we know that the filp still | 
 | 6985 | 	 * exists and has a nonzero count: | 
 | 6986 | 	 */ | 
 | 6987 | 	atomic_long_inc(&parent_event->filp->f_count); | 
 | 6988 |  | 
 | 6989 | 	/* | 
 | 6990 | 	 * Link this into the parent event's child list | 
 | 6991 | 	 */ | 
 | 6992 | 	WARN_ON_ONCE(parent_event->ctx->parent_ctx); | 
 | 6993 | 	mutex_lock(&parent_event->child_mutex); | 
 | 6994 | 	list_add_tail(&child_event->child_list, &parent_event->child_list); | 
 | 6995 | 	mutex_unlock(&parent_event->child_mutex); | 
 | 6996 |  | 
 | 6997 | 	return child_event; | 
 | 6998 | } | 
 | 6999 |  | 
 | 7000 | static int inherit_group(struct perf_event *parent_event, | 
 | 7001 | 	      struct task_struct *parent, | 
 | 7002 | 	      struct perf_event_context *parent_ctx, | 
 | 7003 | 	      struct task_struct *child, | 
 | 7004 | 	      struct perf_event_context *child_ctx) | 
 | 7005 | { | 
 | 7006 | 	struct perf_event *leader; | 
 | 7007 | 	struct perf_event *sub; | 
 | 7008 | 	struct perf_event *child_ctr; | 
 | 7009 |  | 
 | 7010 | 	leader = inherit_event(parent_event, parent, parent_ctx, | 
 | 7011 | 				 child, NULL, child_ctx); | 
 | 7012 | 	if (IS_ERR(leader)) | 
 | 7013 | 		return PTR_ERR(leader); | 
 | 7014 | 	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { | 
 | 7015 | 		child_ctr = inherit_event(sub, parent, parent_ctx, | 
 | 7016 | 					    child, leader, child_ctx); | 
 | 7017 | 		if (IS_ERR(child_ctr)) | 
 | 7018 | 			return PTR_ERR(child_ctr); | 
 | 7019 | 	} | 
 | 7020 | 	return 0; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7021 | } | 
 | 7022 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7023 | static int | 
 | 7024 | inherit_task_group(struct perf_event *event, struct task_struct *parent, | 
 | 7025 | 		   struct perf_event_context *parent_ctx, | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7026 | 		   struct task_struct *child, int ctxn, | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7027 | 		   int *inherited_all) | 
 | 7028 | { | 
 | 7029 | 	int ret; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7030 | 	struct perf_event_context *child_ctx; | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7031 |  | 
 | 7032 | 	if (!event->attr.inherit) { | 
 | 7033 | 		*inherited_all = 0; | 
 | 7034 | 		return 0; | 
 | 7035 | 	} | 
 | 7036 |  | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 7037 | 	child_ctx = child->perf_event_ctxp[ctxn]; | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7038 | 	if (!child_ctx) { | 
 | 7039 | 		/* | 
 | 7040 | 		 * This is executed from the parent task context, so | 
 | 7041 | 		 * inherit events that have been marked for cloning. | 
 | 7042 | 		 * First allocate and initialize a context for the | 
 | 7043 | 		 * child. | 
 | 7044 | 		 */ | 
 | 7045 |  | 
| Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 7046 | 		child_ctx = alloc_perf_context(event->pmu, child); | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7047 | 		if (!child_ctx) | 
 | 7048 | 			return -ENOMEM; | 
 | 7049 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7050 | 		child->perf_event_ctxp[ctxn] = child_ctx; | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7051 | 	} | 
 | 7052 |  | 
 | 7053 | 	ret = inherit_group(event, parent, parent_ctx, | 
 | 7054 | 			    child, child_ctx); | 
 | 7055 |  | 
 | 7056 | 	if (ret) | 
 | 7057 | 		*inherited_all = 0; | 
 | 7058 |  | 
 | 7059 | 	return ret; | 
 | 7060 | } | 
 | 7061 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7062 | /* | 
 | 7063 |  * Initialize the perf_event context in task_struct | 
 | 7064 |  */ | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7065 | int perf_event_init_context(struct task_struct *child, int ctxn) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7066 | { | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7067 | 	struct perf_event_context *child_ctx, *parent_ctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7068 | 	struct perf_event_context *cloned_ctx; | 
 | 7069 | 	struct perf_event *event; | 
 | 7070 | 	struct task_struct *parent = current; | 
 | 7071 | 	int inherited_all = 1; | 
| Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 7072 | 	unsigned long flags; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7073 | 	int ret = 0; | 
 | 7074 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7075 | 	if (likely(!parent->perf_event_ctxp[ctxn])) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7076 | 		return 0; | 
 | 7077 |  | 
 | 7078 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7079 | 	 * If the parent's context is a clone, pin it so it won't get | 
 | 7080 | 	 * swapped under us. | 
 | 7081 | 	 */ | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7082 | 	parent_ctx = perf_pin_task_context(parent, ctxn); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7083 |  | 
 | 7084 | 	/* | 
 | 7085 | 	 * No need to check if parent_ctx != NULL here; since we saw | 
 | 7086 | 	 * it non-NULL earlier, the only reason for it to become NULL | 
 | 7087 | 	 * is if we exit, and since we're currently in the middle of | 
 | 7088 | 	 * a fork we can't be exiting at the same time. | 
 | 7089 | 	 */ | 
 | 7090 |  | 
 | 7091 | 	/* | 
 | 7092 | 	 * Lock the parent list. No need to lock the child - not PID | 
 | 7093 | 	 * hashed yet and not running, so nobody can access it. | 
 | 7094 | 	 */ | 
 | 7095 | 	mutex_lock(&parent_ctx->mutex); | 
 | 7096 |  | 
 | 7097 | 	/* | 
 | 7098 | 	 * We dont have to disable NMIs - we are only looking at | 
 | 7099 | 	 * the list, not manipulating it: | 
 | 7100 | 	 */ | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7101 | 	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7102 | 		ret = inherit_task_group(event, parent, parent_ctx, | 
 | 7103 | 					 child, ctxn, &inherited_all); | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7104 | 		if (ret) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7105 | 			break; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7106 | 	} | 
 | 7107 |  | 
| Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 7108 | 	/* | 
 | 7109 | 	 * We can't hold ctx->lock when iterating the ->flexible_group list due | 
 | 7110 | 	 * to allocations, but we need to prevent rotation because | 
 | 7111 | 	 * rotate_ctx() will change the list from interrupt context. | 
 | 7112 | 	 */ | 
 | 7113 | 	raw_spin_lock_irqsave(&parent_ctx->lock, flags); | 
 | 7114 | 	parent_ctx->rotate_disable = 1; | 
 | 7115 | 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | 
 | 7116 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7117 | 	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7118 | 		ret = inherit_task_group(event, parent, parent_ctx, | 
 | 7119 | 					 child, ctxn, &inherited_all); | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7120 | 		if (ret) | 
 | 7121 | 			break; | 
 | 7122 | 	} | 
 | 7123 |  | 
| Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 7124 | 	raw_spin_lock_irqsave(&parent_ctx->lock, flags); | 
 | 7125 | 	parent_ctx->rotate_disable = 0; | 
| Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 7126 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7127 | 	child_ctx = child->perf_event_ctxp[ctxn]; | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7128 |  | 
| Peter Zijlstra | 05cbaa2 | 2009-12-30 16:00:35 +0100 | [diff] [blame] | 7129 | 	if (child_ctx && inherited_all) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7130 | 		/* | 
 | 7131 | 		 * Mark the child context as a clone of the parent | 
 | 7132 | 		 * context, or of whatever the parent is a clone of. | 
| Peter Zijlstra | c5ed514 | 2011-01-17 13:45:37 +0100 | [diff] [blame] | 7133 | 		 * | 
 | 7134 | 		 * Note that if the parent is a clone, the holding of | 
 | 7135 | 		 * parent_ctx->lock avoids it from being uncloned. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7136 | 		 */ | 
| Peter Zijlstra | c5ed514 | 2011-01-17 13:45:37 +0100 | [diff] [blame] | 7137 | 		cloned_ctx = parent_ctx->parent_ctx; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7138 | 		if (cloned_ctx) { | 
 | 7139 | 			child_ctx->parent_ctx = cloned_ctx; | 
 | 7140 | 			child_ctx->parent_gen = parent_ctx->parent_gen; | 
 | 7141 | 		} else { | 
 | 7142 | 			child_ctx->parent_ctx = parent_ctx; | 
 | 7143 | 			child_ctx->parent_gen = parent_ctx->generation; | 
 | 7144 | 		} | 
 | 7145 | 		get_ctx(child_ctx->parent_ctx); | 
 | 7146 | 	} | 
 | 7147 |  | 
| Peter Zijlstra | c5ed514 | 2011-01-17 13:45:37 +0100 | [diff] [blame] | 7148 | 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7149 | 	mutex_unlock(&parent_ctx->mutex); | 
 | 7150 |  | 
 | 7151 | 	perf_unpin_context(parent_ctx); | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 7152 | 	put_ctx(parent_ctx); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7153 |  | 
 | 7154 | 	return ret; | 
 | 7155 | } | 
 | 7156 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7157 | /* | 
 | 7158 |  * Initialize the perf_event context in task_struct | 
 | 7159 |  */ | 
 | 7160 | int perf_event_init_task(struct task_struct *child) | 
 | 7161 | { | 
 | 7162 | 	int ctxn, ret; | 
 | 7163 |  | 
| Oleg Nesterov | 8550d7c | 2011-01-19 19:22:28 +0100 | [diff] [blame] | 7164 | 	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); | 
 | 7165 | 	mutex_init(&child->perf_event_mutex); | 
 | 7166 | 	INIT_LIST_HEAD(&child->perf_event_list); | 
 | 7167 |  | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7168 | 	for_each_task_context_nr(ctxn) { | 
 | 7169 | 		ret = perf_event_init_context(child, ctxn); | 
 | 7170 | 		if (ret) | 
 | 7171 | 			return ret; | 
 | 7172 | 	} | 
 | 7173 |  | 
 | 7174 | 	return 0; | 
 | 7175 | } | 
 | 7176 |  | 
| Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 7177 | static void __init perf_event_init_all_cpus(void) | 
 | 7178 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 7179 | 	struct swevent_htable *swhash; | 
| Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 7180 | 	int cpu; | 
| Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 7181 |  | 
 | 7182 | 	for_each_possible_cpu(cpu) { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 7183 | 		swhash = &per_cpu(swevent_htable, cpu); | 
 | 7184 | 		mutex_init(&swhash->hlist_mutex); | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 7185 | 		INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); | 
| Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 7186 | 	} | 
 | 7187 | } | 
 | 7188 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7189 | static void __cpuinit perf_event_init_cpu(int cpu) | 
 | 7190 | { | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 7191 | 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7192 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 7193 | 	mutex_lock(&swhash->hlist_mutex); | 
 | 7194 | 	if (swhash->hlist_refcount > 0) { | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 7195 | 		struct swevent_hlist *hlist; | 
 | 7196 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 7197 | 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); | 
 | 7198 | 		WARN_ON(!hlist); | 
 | 7199 | 		rcu_assign_pointer(swhash->swevent_hlist, hlist); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 7200 | 	} | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 7201 | 	mutex_unlock(&swhash->hlist_mutex); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7202 | } | 
 | 7203 |  | 
| Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 7204 | #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 7205 | static void perf_pmu_rotate_stop(struct pmu *pmu) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7206 | { | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 7207 | 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 
 | 7208 |  | 
 | 7209 | 	WARN_ON(!irqs_disabled()); | 
 | 7210 |  | 
 | 7211 | 	list_del_init(&cpuctx->rotation_list); | 
 | 7212 | } | 
 | 7213 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 7214 | static void __perf_event_exit_context(void *__info) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7215 | { | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 7216 | 	struct perf_event_context *ctx = __info; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7217 | 	struct perf_event *event, *tmp; | 
 | 7218 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 7219 | 	perf_pmu_rotate_stop(ctx->pmu); | 
| Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 7220 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7221 | 	list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 7222 | 		__perf_remove_from_context(event); | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 7223 | 	list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) | 
| Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 7224 | 		__perf_remove_from_context(event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7225 | } | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 7226 |  | 
 | 7227 | static void perf_event_exit_cpu_context(int cpu) | 
 | 7228 | { | 
 | 7229 | 	struct perf_event_context *ctx; | 
 | 7230 | 	struct pmu *pmu; | 
 | 7231 | 	int idx; | 
 | 7232 |  | 
 | 7233 | 	idx = srcu_read_lock(&pmus_srcu); | 
 | 7234 | 	list_for_each_entry_rcu(pmu, &pmus, entry) { | 
| Peter Zijlstra | 917bdd1 | 2010-09-17 11:28:49 +0200 | [diff] [blame] | 7235 | 		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 7236 |  | 
 | 7237 | 		mutex_lock(&ctx->mutex); | 
 | 7238 | 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); | 
 | 7239 | 		mutex_unlock(&ctx->mutex); | 
 | 7240 | 	} | 
 | 7241 | 	srcu_read_unlock(&pmus_srcu, idx); | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 7242 | } | 
 | 7243 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7244 | static void perf_event_exit_cpu(int cpu) | 
 | 7245 | { | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 7246 | 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7247 |  | 
| Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 7248 | 	mutex_lock(&swhash->hlist_mutex); | 
 | 7249 | 	swevent_hlist_release(swhash); | 
 | 7250 | 	mutex_unlock(&swhash->hlist_mutex); | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 7251 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 7252 | 	perf_event_exit_cpu_context(cpu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7253 | } | 
 | 7254 | #else | 
 | 7255 | static inline void perf_event_exit_cpu(int cpu) { } | 
 | 7256 | #endif | 
 | 7257 |  | 
| Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 7258 | static int | 
 | 7259 | perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) | 
 | 7260 | { | 
 | 7261 | 	int cpu; | 
 | 7262 |  | 
 | 7263 | 	for_each_online_cpu(cpu) | 
 | 7264 | 		perf_event_exit_cpu(cpu); | 
 | 7265 |  | 
 | 7266 | 	return NOTIFY_OK; | 
 | 7267 | } | 
 | 7268 |  | 
 | 7269 | /* | 
 | 7270 |  * Run the perf reboot notifier at the very last possible moment so that | 
 | 7271 |  * the generic watchdog code runs as long as possible. | 
 | 7272 |  */ | 
 | 7273 | static struct notifier_block perf_reboot_notifier = { | 
 | 7274 | 	.notifier_call = perf_reboot, | 
 | 7275 | 	.priority = INT_MIN, | 
 | 7276 | }; | 
 | 7277 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7278 | static int __cpuinit | 
 | 7279 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | 
 | 7280 | { | 
 | 7281 | 	unsigned int cpu = (long)hcpu; | 
 | 7282 |  | 
| Peter Zijlstra | 5e11637 | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 7283 | 	switch (action & ~CPU_TASKS_FROZEN) { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7284 |  | 
 | 7285 | 	case CPU_UP_PREPARE: | 
| Peter Zijlstra | 5e11637 | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 7286 | 	case CPU_DOWN_FAILED: | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7287 | 		perf_event_init_cpu(cpu); | 
 | 7288 | 		break; | 
 | 7289 |  | 
| Peter Zijlstra | 5e11637 | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 7290 | 	case CPU_UP_CANCELED: | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7291 | 	case CPU_DOWN_PREPARE: | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7292 | 		perf_event_exit_cpu(cpu); | 
 | 7293 | 		break; | 
 | 7294 |  | 
 | 7295 | 	default: | 
 | 7296 | 		break; | 
 | 7297 | 	} | 
 | 7298 |  | 
 | 7299 | 	return NOTIFY_OK; | 
 | 7300 | } | 
 | 7301 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7302 | void __init perf_event_init(void) | 
 | 7303 | { | 
| Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 7304 | 	int ret; | 
 | 7305 |  | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 7306 | 	idr_init(&pmu_idr); | 
 | 7307 |  | 
| Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 7308 | 	perf_event_init_all_cpus(); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 7309 | 	init_srcu_struct(&pmus_srcu); | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 7310 | 	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); | 
 | 7311 | 	perf_pmu_register(&perf_cpu_clock, NULL, -1); | 
 | 7312 | 	perf_pmu_register(&perf_task_clock, NULL, -1); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 7313 | 	perf_tp_register(); | 
 | 7314 | 	perf_cpu_notifier(perf_cpu_notify); | 
| Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 7315 | 	register_reboot_notifier(&perf_reboot_notifier); | 
| Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 7316 |  | 
 | 7317 | 	ret = init_hw_breakpoint(); | 
 | 7318 | 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7319 | } | 
| Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 7320 |  | 
 | 7321 | static int __init perf_event_sysfs_init(void) | 
 | 7322 | { | 
 | 7323 | 	struct pmu *pmu; | 
 | 7324 | 	int ret; | 
 | 7325 |  | 
 | 7326 | 	mutex_lock(&pmus_lock); | 
 | 7327 |  | 
 | 7328 | 	ret = bus_register(&pmu_bus); | 
 | 7329 | 	if (ret) | 
 | 7330 | 		goto unlock; | 
 | 7331 |  | 
 | 7332 | 	list_for_each_entry(pmu, &pmus, entry) { | 
 | 7333 | 		if (!pmu->name || pmu->type < 0) | 
 | 7334 | 			continue; | 
 | 7335 |  | 
 | 7336 | 		ret = pmu_dev_alloc(pmu); | 
 | 7337 | 		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); | 
 | 7338 | 	} | 
 | 7339 | 	pmu_bus_running = 1; | 
 | 7340 | 	ret = 0; | 
 | 7341 |  | 
 | 7342 | unlock: | 
 | 7343 | 	mutex_unlock(&pmus_lock); | 
 | 7344 |  | 
 | 7345 | 	return ret; | 
 | 7346 | } | 
 | 7347 | device_initcall(perf_event_sysfs_init); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 7348 |  | 
 | 7349 | #ifdef CONFIG_CGROUP_PERF | 
 | 7350 | static struct cgroup_subsys_state *perf_cgroup_create( | 
 | 7351 | 	struct cgroup_subsys *ss, struct cgroup *cont) | 
 | 7352 | { | 
 | 7353 | 	struct perf_cgroup *jc; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 7354 |  | 
| Li Zefan | 1b15d05 | 2011-03-03 14:26:06 +0800 | [diff] [blame] | 7355 | 	jc = kzalloc(sizeof(*jc), GFP_KERNEL); | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 7356 | 	if (!jc) | 
 | 7357 | 		return ERR_PTR(-ENOMEM); | 
 | 7358 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 7359 | 	jc->info = alloc_percpu(struct perf_cgroup_info); | 
 | 7360 | 	if (!jc->info) { | 
 | 7361 | 		kfree(jc); | 
 | 7362 | 		return ERR_PTR(-ENOMEM); | 
 | 7363 | 	} | 
 | 7364 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 7365 | 	return &jc->css; | 
 | 7366 | } | 
 | 7367 |  | 
 | 7368 | static void perf_cgroup_destroy(struct cgroup_subsys *ss, | 
 | 7369 | 				struct cgroup *cont) | 
 | 7370 | { | 
 | 7371 | 	struct perf_cgroup *jc; | 
 | 7372 | 	jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), | 
 | 7373 | 			  struct perf_cgroup, css); | 
 | 7374 | 	free_percpu(jc->info); | 
 | 7375 | 	kfree(jc); | 
 | 7376 | } | 
 | 7377 |  | 
 | 7378 | static int __perf_cgroup_move(void *info) | 
 | 7379 | { | 
 | 7380 | 	struct task_struct *task = info; | 
 | 7381 | 	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); | 
 | 7382 | 	return 0; | 
 | 7383 | } | 
 | 7384 |  | 
 | 7385 | static void perf_cgroup_move(struct task_struct *task) | 
 | 7386 | { | 
 | 7387 | 	task_function_call(task, __perf_cgroup_move, task); | 
 | 7388 | } | 
 | 7389 |  | 
 | 7390 | static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 
 | 7391 | 		struct cgroup *old_cgrp, struct task_struct *task, | 
 | 7392 | 		bool threadgroup) | 
 | 7393 | { | 
 | 7394 | 	perf_cgroup_move(task); | 
 | 7395 | 	if (threadgroup) { | 
 | 7396 | 		struct task_struct *c; | 
 | 7397 | 		rcu_read_lock(); | 
 | 7398 | 		list_for_each_entry_rcu(c, &task->thread_group, thread_group) { | 
 | 7399 | 			perf_cgroup_move(c); | 
 | 7400 | 		} | 
 | 7401 | 		rcu_read_unlock(); | 
 | 7402 | 	} | 
 | 7403 | } | 
 | 7404 |  | 
 | 7405 | static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, | 
 | 7406 | 		struct cgroup *old_cgrp, struct task_struct *task) | 
 | 7407 | { | 
 | 7408 | 	/* | 
 | 7409 | 	 * cgroup_exit() is called in the copy_process() failure path. | 
 | 7410 | 	 * Ignore this case since the task hasn't ran yet, this avoids | 
 | 7411 | 	 * trying to poke a half freed task state from generic code. | 
 | 7412 | 	 */ | 
 | 7413 | 	if (!(task->flags & PF_EXITING)) | 
 | 7414 | 		return; | 
 | 7415 |  | 
 | 7416 | 	perf_cgroup_move(task); | 
 | 7417 | } | 
 | 7418 |  | 
 | 7419 | struct cgroup_subsys perf_subsys = { | 
 | 7420 | 	.name = "perf_event", | 
 | 7421 | 	.subsys_id = perf_subsys_id, | 
 | 7422 | 	.create = perf_cgroup_create, | 
 | 7423 | 	.destroy = perf_cgroup_destroy, | 
 | 7424 | 	.exit = perf_cgroup_exit, | 
 | 7425 | 	.attach = perf_cgroup_attach, | 
 | 7426 | }; | 
 | 7427 | #endif /* CONFIG_CGROUP_PERF */ |