blob: b8fe7397b9022d8b397c7532400b6383983d7e7f [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counter core code
3 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Paul Mackerrasc5dd0162009-04-30 09:48:16 +10007 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
Peter Zijlstra7b732a72009-03-23 18:22:10 +01008 *
9 * For licensing details see kernel-base/COPYING
Thomas Gleixner0793a612008-12-04 20:12:29 +010010 */
11
12#include <linux/fs.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010013#include <linux/mm.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010014#include <linux/cpu.h>
15#include <linux/smp.h>
Ingo Molnar04289bb2008-12-11 08:38:42 +010016#include <linux/file.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010017#include <linux/poll.h>
18#include <linux/sysfs.h>
Ingo Molnar22a4f652009-06-01 10:13:37 +020019#include <linux/dcache.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010020#include <linux/percpu.h>
Ingo Molnar22a4f652009-06-01 10:13:37 +020021#include <linux/ptrace.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010022#include <linux/vmstat.h>
23#include <linux/hardirq.h>
24#include <linux/rculist.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010025#include <linux/uaccess.h>
26#include <linux/syscalls.h>
27#include <linux/anon_inodes.h>
Ingo Molnaraa9c4c02008-12-17 14:10:57 +010028#include <linux/kernel_stat.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010029#include <linux/perf_counter.h>
30
Tim Blechmann4e193bd2009-03-14 14:29:25 +010031#include <asm/irq_regs.h>
32
Thomas Gleixner0793a612008-12-04 20:12:29 +010033/*
34 * Each CPU has a list of per CPU counters:
35 */
36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
Ingo Molnar088e2852008-12-14 20:21:00 +010038int perf_max_counters __read_mostly = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +010039static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1;
41
Peter Zijlstra7fc23a52009-05-08 18:52:21 +020042static atomic_t nr_counters __read_mostly;
Peter Zijlstra60313eb2009-06-04 16:53:44 +020043static atomic_t nr_mmap_counters __read_mostly;
Peter Zijlstra60313eb2009-06-04 16:53:44 +020044static atomic_t nr_comm_counters __read_mostly;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +020045static atomic_t nr_task_counters __read_mostly;
Peter Zijlstra9ee318a2009-04-09 10:53:44 +020046
Peter Zijlstra07647712009-06-11 11:18:36 +020047/*
Peter Zijlstradf58ab22009-06-11 11:25:05 +020048 * perf counter paranoia level:
49 * 0 - not paranoid
50 * 1 - disallow cpu counters to unpriv
51 * 2 - disallow kernel profiling to unpriv
Peter Zijlstra07647712009-06-11 11:18:36 +020052 */
Peter Zijlstradf58ab22009-06-11 11:25:05 +020053int sysctl_perf_counter_paranoid __read_mostly;
Peter Zijlstra07647712009-06-11 11:18:36 +020054
55static inline bool perf_paranoid_cpu(void)
56{
57 return sysctl_perf_counter_paranoid > 0;
58}
59
60static inline bool perf_paranoid_kernel(void)
61{
62 return sysctl_perf_counter_paranoid > 1;
63}
64
Peter Zijlstra789f90f2009-05-15 15:19:27 +020065int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
Peter Zijlstradf58ab22009-06-11 11:25:05 +020066
67/*
68 * max perf counter sample rate
69 */
70int sysctl_perf_counter_sample_rate __read_mostly = 100000;
Peter Zijlstra1ccd1542009-04-09 10:53:45 +020071
Peter Zijlstraa96bbc12009-06-03 14:01:36 +020072static atomic64_t perf_counter_id;
73
Thomas Gleixner0793a612008-12-04 20:12:29 +010074/*
Ingo Molnar1dce8d92009-05-04 19:23:18 +020075 * Lock for (sysadmin-configurable) counter reservations:
Thomas Gleixner0793a612008-12-04 20:12:29 +010076 */
Ingo Molnar1dce8d92009-05-04 19:23:18 +020077static DEFINE_SPINLOCK(perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +010078
79/*
80 * Architecture provided APIs - weak aliases:
81 */
Robert Richter4aeb0b42009-04-29 12:47:03 +020082extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +010083{
Paul Mackerrasff6f0542009-01-09 16:19:25 +110084 return NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +010085}
86
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020087void __weak hw_perf_disable(void) { barrier(); }
88void __weak hw_perf_enable(void) { barrier(); }
89
Paul Mackerras01d02872009-01-14 13:44:19 +110090void __weak hw_perf_counter_setup(int cpu) { barrier(); }
Ingo Molnar28402972009-08-13 10:13:22 +020091void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
Ingo Molnar22a4f652009-06-01 10:13:37 +020092
93int __weak
94hw_perf_group_sched_in(struct perf_counter *group_leader,
Paul Mackerras3cbed422009-01-09 16:43:42 +110095 struct perf_cpu_context *cpuctx,
96 struct perf_counter_context *ctx, int cpu)
97{
98 return 0;
99}
Thomas Gleixner0793a612008-12-04 20:12:29 +0100100
Paul Mackerras4eb96fc2009-01-09 17:24:34 +1100101void __weak perf_counter_print_debug(void) { }
102
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200103static DEFINE_PER_CPU(int, disable_count);
104
105void __perf_disable(void)
106{
107 __get_cpu_var(disable_count)++;
108}
109
110bool __perf_enable(void)
111{
112 return !--__get_cpu_var(disable_count);
113}
114
115void perf_disable(void)
116{
117 __perf_disable();
118 hw_perf_disable();
119}
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200120
121void perf_enable(void)
122{
123 if (__perf_enable())
124 hw_perf_enable();
125}
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200126
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000127static void get_ctx(struct perf_counter_context *ctx)
128{
Peter Zijlstrae5289d42009-06-19 13:22:51 +0200129 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000130}
131
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000132static void free_ctx(struct rcu_head *head)
133{
134 struct perf_counter_context *ctx;
135
136 ctx = container_of(head, struct perf_counter_context, rcu_head);
137 kfree(ctx);
138}
139
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000140static void put_ctx(struct perf_counter_context *ctx)
141{
Paul Mackerras564c2b22009-05-22 14:27:22 +1000142 if (atomic_dec_and_test(&ctx->refcount)) {
143 if (ctx->parent_ctx)
144 put_ctx(ctx->parent_ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000145 if (ctx->task)
146 put_task_struct(ctx->task);
147 call_rcu(&ctx->rcu_head, free_ctx);
Paul Mackerras564c2b22009-05-22 14:27:22 +1000148 }
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000149}
150
Peter Zijlstra71a851b2009-07-10 09:06:56 +0200151static void unclone_ctx(struct perf_counter_context *ctx)
152{
153 if (ctx->parent_ctx) {
154 put_ctx(ctx->parent_ctx);
155 ctx->parent_ctx = NULL;
156 }
157}
158
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200159/*
Peter Zijlstra7f453c22009-07-21 13:19:40 +0200160 * If we inherit counters we want to return the parent counter id
161 * to userspace.
162 */
163static u64 primary_counter_id(struct perf_counter *counter)
164{
165 u64 id = counter->id;
166
167 if (counter->parent)
168 id = counter->parent->id;
169
170 return id;
171}
172
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200173/*
Paul Mackerras25346b92009-06-01 17:48:12 +1000174 * Get the perf_counter_context for a task and lock it.
175 * This has to cope with with the fact that until it is locked,
176 * the context could get moved to another task.
177 */
Ingo Molnar22a4f652009-06-01 10:13:37 +0200178static struct perf_counter_context *
179perf_lock_task_context(struct task_struct *task, unsigned long *flags)
Paul Mackerras25346b92009-06-01 17:48:12 +1000180{
181 struct perf_counter_context *ctx;
182
183 rcu_read_lock();
184 retry:
185 ctx = rcu_dereference(task->perf_counter_ctxp);
186 if (ctx) {
187 /*
188 * If this context is a clone of another, it might
189 * get swapped for another underneath us by
190 * perf_counter_task_sched_out, though the
191 * rcu_read_lock() protects us from any context
192 * getting freed. Lock the context and check if it
193 * got swapped before we could get the lock, and retry
194 * if so. If we locked the right context, then it
195 * can't get swapped on us any more.
196 */
197 spin_lock_irqsave(&ctx->lock, *flags);
198 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
199 spin_unlock_irqrestore(&ctx->lock, *flags);
200 goto retry;
201 }
Peter Zijlstrab49a9e72009-06-19 17:39:33 +0200202
203 if (!atomic_inc_not_zero(&ctx->refcount)) {
204 spin_unlock_irqrestore(&ctx->lock, *flags);
205 ctx = NULL;
206 }
Paul Mackerras25346b92009-06-01 17:48:12 +1000207 }
208 rcu_read_unlock();
209 return ctx;
210}
211
212/*
213 * Get the context for a task and increment its pin_count so it
214 * can't get swapped to another task. This also increments its
215 * reference count so that the context can't get freed.
216 */
217static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
218{
219 struct perf_counter_context *ctx;
220 unsigned long flags;
221
222 ctx = perf_lock_task_context(task, &flags);
223 if (ctx) {
224 ++ctx->pin_count;
Paul Mackerras25346b92009-06-01 17:48:12 +1000225 spin_unlock_irqrestore(&ctx->lock, flags);
226 }
227 return ctx;
228}
229
230static void perf_unpin_context(struct perf_counter_context *ctx)
231{
232 unsigned long flags;
233
234 spin_lock_irqsave(&ctx->lock, flags);
235 --ctx->pin_count;
236 spin_unlock_irqrestore(&ctx->lock, flags);
237 put_ctx(ctx);
238}
239
240/*
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200241 * Add a counter from the lists for its context.
242 * Must be called with ctx->mutex and ctx->lock held.
243 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100244static void
245list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
246{
247 struct perf_counter *group_leader = counter->group_leader;
248
249 /*
250 * Depending on whether it is a standalone or sibling counter,
251 * add it straight to the context's counter list, or to the group
252 * leader's sibling list:
253 */
Peter Zijlstra3df5eda2009-05-08 18:52:22 +0200254 if (group_leader == counter)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100255 list_add_tail(&counter->list_entry, &ctx->counter_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +0100256 else {
Ingo Molnar04289bb2008-12-11 08:38:42 +0100257 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +0100258 group_leader->nr_siblings++;
259 }
Peter Zijlstra592903c2009-03-13 12:21:36 +0100260
261 list_add_rcu(&counter->event_entry, &ctx->event_list);
Peter Zijlstra8bc20952009-05-15 20:45:59 +0200262 ctx->nr_counters++;
Peter Zijlstrabfbd3382009-06-24 21:11:59 +0200263 if (counter->attr.inherit_stat)
264 ctx->nr_stat++;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100265}
266
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000267/*
268 * Remove a counter from the lists for its context.
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200269 * Must be called with ctx->mutex and ctx->lock held.
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000270 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100271static void
272list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
273{
274 struct perf_counter *sibling, *tmp;
275
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000276 if (list_empty(&counter->list_entry))
277 return;
Peter Zijlstra8bc20952009-05-15 20:45:59 +0200278 ctx->nr_counters--;
Peter Zijlstrabfbd3382009-06-24 21:11:59 +0200279 if (counter->attr.inherit_stat)
280 ctx->nr_stat--;
Peter Zijlstra8bc20952009-05-15 20:45:59 +0200281
Ingo Molnar04289bb2008-12-11 08:38:42 +0100282 list_del_init(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +0100283 list_del_rcu(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100284
Peter Zijlstra5c148192009-03-25 12:30:23 +0100285 if (counter->group_leader != counter)
286 counter->group_leader->nr_siblings--;
287
Ingo Molnar04289bb2008-12-11 08:38:42 +0100288 /*
289 * If this was a group counter with sibling counters then
290 * upgrade the siblings to singleton counters by adding them
291 * to the context list directly:
292 */
293 list_for_each_entry_safe(sibling, tmp,
294 &counter->sibling_list, list_entry) {
295
Peter Zijlstra75564232009-03-13 12:21:29 +0100296 list_move_tail(&sibling->list_entry, &ctx->counter_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100297 sibling->group_leader = sibling;
298 }
299}
300
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100301static void
302counter_sched_out(struct perf_counter *counter,
303 struct perf_cpu_context *cpuctx,
304 struct perf_counter_context *ctx)
305{
306 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
307 return;
308
309 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra970892a2009-08-13 11:47:54 +0200310 if (counter->pending_disable) {
311 counter->pending_disable = 0;
312 counter->state = PERF_COUNTER_STATE_OFF;
313 }
Peter Zijlstra4af49982009-04-06 11:45:10 +0200314 counter->tstamp_stopped = ctx->time;
Robert Richter4aeb0b42009-04-29 12:47:03 +0200315 counter->pmu->disable(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100316 counter->oncpu = -1;
317
318 if (!is_software_counter(counter))
319 cpuctx->active_oncpu--;
320 ctx->nr_active--;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200321 if (counter->attr.exclusive || !cpuctx->active_oncpu)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100322 cpuctx->exclusive = 0;
323}
324
Paul Mackerrasd859e292009-01-17 18:10:22 +1100325static void
326group_sched_out(struct perf_counter *group_counter,
327 struct perf_cpu_context *cpuctx,
328 struct perf_counter_context *ctx)
329{
330 struct perf_counter *counter;
331
332 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
333 return;
334
335 counter_sched_out(group_counter, cpuctx, ctx);
336
337 /*
338 * Schedule out siblings (if any):
339 */
340 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
341 counter_sched_out(counter, cpuctx, ctx);
342
Peter Zijlstra0d486962009-06-02 19:22:16 +0200343 if (group_counter->attr.exclusive)
Paul Mackerrasd859e292009-01-17 18:10:22 +1100344 cpuctx->exclusive = 0;
345}
346
Thomas Gleixner0793a612008-12-04 20:12:29 +0100347/*
348 * Cross CPU call to remove a performance counter
349 *
350 * We disable the counter on the hardware level first. After that we
351 * remove it from the context list.
352 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100353static void __perf_counter_remove_from_context(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100354{
355 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
356 struct perf_counter *counter = info;
357 struct perf_counter_context *ctx = counter->ctx;
358
359 /*
360 * If this is a task context, we need to check whether it is
361 * the current task context of this cpu. If not it has been
362 * scheduled out before the smp call arrived.
363 */
Peter Zijlstra665c2142009-05-29 14:51:57 +0200364 if (ctx->task && cpuctx->task_ctx != ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100365 return;
366
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200367 spin_lock(&ctx->lock);
Ingo Molnar34adc802009-05-20 20:13:28 +0200368 /*
369 * Protect the list operation against NMI by disabling the
370 * counters on a global level.
371 */
372 perf_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100373
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100374 counter_sched_out(counter, cpuctx, ctx);
375
Ingo Molnar04289bb2008-12-11 08:38:42 +0100376 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100377
378 if (!ctx->task) {
379 /*
380 * Allow more per task counters with respect to the
381 * reservation:
382 */
383 cpuctx->max_pertask =
384 min(perf_max_counters - ctx->nr_counters,
385 perf_max_counters - perf_reserved_percpu);
386 }
387
Ingo Molnar34adc802009-05-20 20:13:28 +0200388 perf_enable();
Peter Zijlstra665c2142009-05-29 14:51:57 +0200389 spin_unlock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100390}
391
392
393/*
394 * Remove the counter from a task's (or a CPU's) list of counters.
395 *
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200396 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100397 *
398 * CPU counters are removed with a smp call. For task counters we only
399 * call when the task is on a CPU.
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000400 *
401 * If counter->ctx is a cloned context, callers must make sure that
402 * every task struct that counter->ctx->task could possibly point to
403 * remains valid. This is OK when called from perf_release since
404 * that only calls us on the top-level context, which can't be a clone.
405 * When called from perf_counter_exit_task, it's OK because the
406 * context has been detached from its task.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100407 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100408static void perf_counter_remove_from_context(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100409{
410 struct perf_counter_context *ctx = counter->ctx;
411 struct task_struct *task = ctx->task;
412
413 if (!task) {
414 /*
415 * Per cpu counters are removed via an smp call and
416 * the removal is always sucessful.
417 */
418 smp_call_function_single(counter->cpu,
Ingo Molnar04289bb2008-12-11 08:38:42 +0100419 __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100420 counter, 1);
421 return;
422 }
423
424retry:
Ingo Molnar04289bb2008-12-11 08:38:42 +0100425 task_oncpu_function_call(task, __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100426 counter);
427
428 spin_lock_irq(&ctx->lock);
429 /*
430 * If the context is active we need to retry the smp call.
431 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100432 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100433 spin_unlock_irq(&ctx->lock);
434 goto retry;
435 }
436
437 /*
438 * The lock prevents that this context is scheduled in so we
Ingo Molnar04289bb2008-12-11 08:38:42 +0100439 * can remove the counter safely, if the call above did not
Thomas Gleixner0793a612008-12-04 20:12:29 +0100440 * succeed.
441 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100442 if (!list_empty(&counter->list_entry)) {
Ingo Molnar04289bb2008-12-11 08:38:42 +0100443 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100444 }
445 spin_unlock_irq(&ctx->lock);
446}
447
Peter Zijlstra4af49982009-04-06 11:45:10 +0200448static inline u64 perf_clock(void)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100449{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200450 return cpu_clock(smp_processor_id());
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100451}
452
453/*
454 * Update the record of the current time in a context.
455 */
Peter Zijlstra4af49982009-04-06 11:45:10 +0200456static void update_context_time(struct perf_counter_context *ctx)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100457{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200458 u64 now = perf_clock();
459
460 ctx->time += now - ctx->timestamp;
461 ctx->timestamp = now;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100462}
463
464/*
465 * Update the total_time_enabled and total_time_running fields for a counter.
466 */
467static void update_counter_times(struct perf_counter *counter)
468{
469 struct perf_counter_context *ctx = counter->ctx;
470 u64 run_end;
471
Peter Zijlstra4af49982009-04-06 11:45:10 +0200472 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
473 return;
474
475 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
476
477 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
478 run_end = counter->tstamp_stopped;
479 else
480 run_end = ctx->time;
481
482 counter->total_time_running = run_end - counter->tstamp_running;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100483}
484
485/*
486 * Update total_time_enabled and total_time_running for all counters in a group.
487 */
488static void update_group_times(struct perf_counter *leader)
489{
490 struct perf_counter *counter;
491
492 update_counter_times(leader);
493 list_for_each_entry(counter, &leader->sibling_list, list_entry)
494 update_counter_times(counter);
495}
496
497/*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100498 * Cross CPU call to disable a performance counter
499 */
500static void __perf_counter_disable(void *info)
501{
502 struct perf_counter *counter = info;
503 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
504 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100505
506 /*
507 * If this is a per-task counter, need to check whether this
508 * counter's task is the current task on this cpu.
509 */
Peter Zijlstra665c2142009-05-29 14:51:57 +0200510 if (ctx->task && cpuctx->task_ctx != ctx)
Paul Mackerrasd859e292009-01-17 18:10:22 +1100511 return;
512
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200513 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100514
515 /*
516 * If the counter is on, turn it off.
517 * If it is in error state, leave it in error state.
518 */
519 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
Peter Zijlstra4af49982009-04-06 11:45:10 +0200520 update_context_time(ctx);
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100521 update_counter_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100522 if (counter == counter->group_leader)
523 group_sched_out(counter, cpuctx, ctx);
524 else
525 counter_sched_out(counter, cpuctx, ctx);
526 counter->state = PERF_COUNTER_STATE_OFF;
527 }
528
Peter Zijlstra665c2142009-05-29 14:51:57 +0200529 spin_unlock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100530}
531
532/*
533 * Disable a counter.
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000534 *
535 * If counter->ctx is a cloned context, callers must make sure that
536 * every task struct that counter->ctx->task could possibly point to
537 * remains valid. This condition is satisifed when called through
538 * perf_counter_for_each_child or perf_counter_for_each because they
539 * hold the top-level counter's child_mutex, so any descendant that
540 * goes to exit will block in sync_child_counter.
541 * When called from perf_pending_counter it's OK because counter->ctx
542 * is the current context on this CPU and preemption is disabled,
543 * hence we can't get into perf_counter_task_sched_out for this context.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100544 */
545static void perf_counter_disable(struct perf_counter *counter)
546{
547 struct perf_counter_context *ctx = counter->ctx;
548 struct task_struct *task = ctx->task;
549
550 if (!task) {
551 /*
552 * Disable the counter on the cpu that it's on
553 */
554 smp_call_function_single(counter->cpu, __perf_counter_disable,
555 counter, 1);
556 return;
557 }
558
559 retry:
560 task_oncpu_function_call(task, __perf_counter_disable, counter);
561
562 spin_lock_irq(&ctx->lock);
563 /*
564 * If the counter is still active, we need to retry the cross-call.
565 */
566 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
567 spin_unlock_irq(&ctx->lock);
568 goto retry;
569 }
570
571 /*
572 * Since we have the lock this context can't be scheduled
573 * in, so we can change the state safely.
574 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100575 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
576 update_counter_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100577 counter->state = PERF_COUNTER_STATE_OFF;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100578 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100579
580 spin_unlock_irq(&ctx->lock);
581}
582
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100583static int
584counter_sched_in(struct perf_counter *counter,
585 struct perf_cpu_context *cpuctx,
586 struct perf_counter_context *ctx,
587 int cpu)
588{
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100589 if (counter->state <= PERF_COUNTER_STATE_OFF)
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100590 return 0;
591
592 counter->state = PERF_COUNTER_STATE_ACTIVE;
593 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
594 /*
595 * The new state must be visible before we turn it on in the hardware:
596 */
597 smp_wmb();
598
Robert Richter4aeb0b42009-04-29 12:47:03 +0200599 if (counter->pmu->enable(counter)) {
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100600 counter->state = PERF_COUNTER_STATE_INACTIVE;
601 counter->oncpu = -1;
602 return -EAGAIN;
603 }
604
Peter Zijlstra4af49982009-04-06 11:45:10 +0200605 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100606
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100607 if (!is_software_counter(counter))
608 cpuctx->active_oncpu++;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100609 ctx->nr_active++;
610
Peter Zijlstra0d486962009-06-02 19:22:16 +0200611 if (counter->attr.exclusive)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100612 cpuctx->exclusive = 1;
613
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100614 return 0;
615}
616
Paul Mackerras6751b712009-05-11 12:08:02 +1000617static int
618group_sched_in(struct perf_counter *group_counter,
619 struct perf_cpu_context *cpuctx,
620 struct perf_counter_context *ctx,
621 int cpu)
622{
623 struct perf_counter *counter, *partial_group;
624 int ret;
625
626 if (group_counter->state == PERF_COUNTER_STATE_OFF)
627 return 0;
628
629 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
630 if (ret)
631 return ret < 0 ? ret : 0;
632
Paul Mackerras6751b712009-05-11 12:08:02 +1000633 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
634 return -EAGAIN;
635
636 /*
637 * Schedule in siblings as one group (if any):
638 */
639 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
Paul Mackerras6751b712009-05-11 12:08:02 +1000640 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
641 partial_group = counter;
642 goto group_error;
643 }
644 }
645
646 return 0;
647
648group_error:
649 /*
650 * Groups can be scheduled in as one unit only, so undo any
651 * partial group before returning:
652 */
653 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
654 if (counter == partial_group)
655 break;
656 counter_sched_out(counter, cpuctx, ctx);
657 }
658 counter_sched_out(group_counter, cpuctx, ctx);
659
660 return -EAGAIN;
661}
662
Thomas Gleixner0793a612008-12-04 20:12:29 +0100663/*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100664 * Return 1 for a group consisting entirely of software counters,
665 * 0 if the group contains any hardware counters.
666 */
667static int is_software_only_group(struct perf_counter *leader)
668{
669 struct perf_counter *counter;
670
671 if (!is_software_counter(leader))
672 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100673
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100674 list_for_each_entry(counter, &leader->sibling_list, list_entry)
675 if (!is_software_counter(counter))
676 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100677
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100678 return 1;
679}
680
681/*
682 * Work out whether we can put this counter group on the CPU now.
683 */
684static int group_can_go_on(struct perf_counter *counter,
685 struct perf_cpu_context *cpuctx,
686 int can_add_hw)
687{
688 /*
689 * Groups consisting entirely of software counters can always go on.
690 */
691 if (is_software_only_group(counter))
692 return 1;
693 /*
694 * If an exclusive group is already on, no other hardware
695 * counters can go on.
696 */
697 if (cpuctx->exclusive)
698 return 0;
699 /*
700 * If this group is exclusive and there are already
701 * counters on the CPU, it can't go on.
702 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200703 if (counter->attr.exclusive && cpuctx->active_oncpu)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100704 return 0;
705 /*
706 * Otherwise, try to add it if all previous groups were able
707 * to go on.
708 */
709 return can_add_hw;
710}
711
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100712static void add_counter_to_ctx(struct perf_counter *counter,
713 struct perf_counter_context *ctx)
714{
715 list_add_counter(counter, ctx);
Peter Zijlstra4af49982009-04-06 11:45:10 +0200716 counter->tstamp_enabled = ctx->time;
717 counter->tstamp_running = ctx->time;
718 counter->tstamp_stopped = ctx->time;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100719}
720
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100721/*
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100722 * Cross CPU call to install and enable a performance counter
Peter Zijlstra682076a2009-05-23 18:28:57 +0200723 *
724 * Must be called with ctx->mutex held
Thomas Gleixner0793a612008-12-04 20:12:29 +0100725 */
726static void __perf_install_in_context(void *info)
727{
728 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
729 struct perf_counter *counter = info;
730 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100731 struct perf_counter *leader = counter->group_leader;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100732 int cpu = smp_processor_id();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100733 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100734
735 /*
736 * If this is a task context, we need to check whether it is
737 * the current task context of this cpu. If not it has been
738 * scheduled out before the smp call arrived.
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000739 * Or possibly this is the right context but it isn't
740 * on this cpu because it had no counters.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100741 */
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000742 if (ctx->task && cpuctx->task_ctx != ctx) {
Peter Zijlstra665c2142009-05-29 14:51:57 +0200743 if (cpuctx->task_ctx || ctx->task != current)
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000744 return;
745 cpuctx->task_ctx = ctx;
746 }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100747
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200748 spin_lock(&ctx->lock);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000749 ctx->is_active = 1;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200750 update_context_time(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100751
752 /*
753 * Protect the list operation against NMI by disabling the
754 * counters on a global level. NOP for non NMI based counters.
755 */
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200756 perf_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100757
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100758 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100759
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100760 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100761 * Don't put the counter on if it is disabled or if
762 * it is in a group and the group isn't on.
763 */
764 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
765 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
766 goto unlock;
767
768 /*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100769 * An exclusive counter can't go on if there are already active
770 * hardware counters, and no hardware counter can go on if there
771 * is already an exclusive counter on.
772 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100773 if (!group_can_go_on(counter, cpuctx, 1))
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100774 err = -EEXIST;
775 else
776 err = counter_sched_in(counter, cpuctx, ctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100777
Paul Mackerrasd859e292009-01-17 18:10:22 +1100778 if (err) {
779 /*
780 * This counter couldn't go on. If it is in a group
781 * then we have to pull the whole group off.
782 * If the counter group is pinned then put it in error state.
783 */
784 if (leader != counter)
785 group_sched_out(leader, cpuctx, ctx);
Peter Zijlstra0d486962009-06-02 19:22:16 +0200786 if (leader->attr.pinned) {
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100787 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100788 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100789 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100790 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100791
792 if (!err && !ctx->task && cpuctx->max_pertask)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100793 cpuctx->max_pertask--;
794
Paul Mackerrasd859e292009-01-17 18:10:22 +1100795 unlock:
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200796 perf_enable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100797
Peter Zijlstra665c2142009-05-29 14:51:57 +0200798 spin_unlock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100799}
800
801/*
802 * Attach a performance counter to a context
803 *
804 * First we add the counter to the list with the hardware enable bit
805 * in counter->hw_config cleared.
806 *
807 * If the counter is attached to a task which is on a CPU we use a smp
808 * call to enable it in the task context. The task might have been
809 * scheduled away, but we check this in the smp call again.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100810 *
811 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100812 */
813static void
814perf_install_in_context(struct perf_counter_context *ctx,
815 struct perf_counter *counter,
816 int cpu)
817{
818 struct task_struct *task = ctx->task;
819
Thomas Gleixner0793a612008-12-04 20:12:29 +0100820 if (!task) {
821 /*
822 * Per cpu counters are installed via an smp call and
823 * the install is always sucessful.
824 */
825 smp_call_function_single(cpu, __perf_install_in_context,
826 counter, 1);
827 return;
828 }
829
Thomas Gleixner0793a612008-12-04 20:12:29 +0100830retry:
831 task_oncpu_function_call(task, __perf_install_in_context,
832 counter);
833
834 spin_lock_irq(&ctx->lock);
835 /*
Thomas Gleixner0793a612008-12-04 20:12:29 +0100836 * we need to retry the smp call.
837 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100838 if (ctx->is_active && list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100839 spin_unlock_irq(&ctx->lock);
840 goto retry;
841 }
842
843 /*
844 * The lock prevents that this context is scheduled in so we
845 * can add the counter safely, if it the call above did not
846 * succeed.
847 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100848 if (list_empty(&counter->list_entry))
849 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100850 spin_unlock_irq(&ctx->lock);
851}
852
Paul Mackerrasd859e292009-01-17 18:10:22 +1100853/*
854 * Cross CPU call to enable a performance counter
855 */
856static void __perf_counter_enable(void *info)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100857{
Paul Mackerrasd859e292009-01-17 18:10:22 +1100858 struct perf_counter *counter = info;
859 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
860 struct perf_counter_context *ctx = counter->ctx;
861 struct perf_counter *leader = counter->group_leader;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100862 int err;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100863
864 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100865 * If this is a per-task counter, need to check whether this
866 * counter's task is the current task on this cpu.
Ingo Molnar04289bb2008-12-11 08:38:42 +0100867 */
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000868 if (ctx->task && cpuctx->task_ctx != ctx) {
Peter Zijlstra665c2142009-05-29 14:51:57 +0200869 if (cpuctx->task_ctx || ctx->task != current)
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000870 return;
871 cpuctx->task_ctx = ctx;
872 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100873
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200874 spin_lock(&ctx->lock);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000875 ctx->is_active = 1;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200876 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100877
878 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
879 goto unlock;
880 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200881 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100882
883 /*
884 * If the counter is in a group and isn't the group leader,
885 * then don't put it on unless the group is on.
886 */
887 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
888 goto unlock;
889
Paul Mackerrase758a332009-05-12 21:59:01 +1000890 if (!group_can_go_on(counter, cpuctx, 1)) {
Paul Mackerrasd859e292009-01-17 18:10:22 +1100891 err = -EEXIST;
Paul Mackerrase758a332009-05-12 21:59:01 +1000892 } else {
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200893 perf_disable();
Paul Mackerrase758a332009-05-12 21:59:01 +1000894 if (counter == leader)
895 err = group_sched_in(counter, cpuctx, ctx,
896 smp_processor_id());
897 else
898 err = counter_sched_in(counter, cpuctx, ctx,
899 smp_processor_id());
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200900 perf_enable();
Paul Mackerrase758a332009-05-12 21:59:01 +1000901 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100902
903 if (err) {
904 /*
905 * If this counter can't go on and it's part of a
906 * group, then the whole group has to come off.
907 */
908 if (leader != counter)
909 group_sched_out(leader, cpuctx, ctx);
Peter Zijlstra0d486962009-06-02 19:22:16 +0200910 if (leader->attr.pinned) {
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100911 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100912 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100913 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100914 }
915
916 unlock:
Peter Zijlstra665c2142009-05-29 14:51:57 +0200917 spin_unlock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100918}
919
920/*
921 * Enable a counter.
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000922 *
923 * If counter->ctx is a cloned context, callers must make sure that
924 * every task struct that counter->ctx->task could possibly point to
925 * remains valid. This condition is satisfied when called through
926 * perf_counter_for_each_child or perf_counter_for_each as described
927 * for perf_counter_disable.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100928 */
929static void perf_counter_enable(struct perf_counter *counter)
930{
931 struct perf_counter_context *ctx = counter->ctx;
932 struct task_struct *task = ctx->task;
933
934 if (!task) {
935 /*
936 * Enable the counter on the cpu that it's on
937 */
938 smp_call_function_single(counter->cpu, __perf_counter_enable,
939 counter, 1);
940 return;
941 }
942
943 spin_lock_irq(&ctx->lock);
944 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
945 goto out;
946
947 /*
948 * If the counter is in error state, clear that first.
949 * That way, if we see the counter in error state below, we
950 * know that it has gone back into error state, as distinct
951 * from the task having been scheduled away before the
952 * cross-call arrived.
953 */
954 if (counter->state == PERF_COUNTER_STATE_ERROR)
955 counter->state = PERF_COUNTER_STATE_OFF;
956
957 retry:
958 spin_unlock_irq(&ctx->lock);
959 task_oncpu_function_call(task, __perf_counter_enable, counter);
960
961 spin_lock_irq(&ctx->lock);
962
963 /*
964 * If the context is active and the counter is still off,
965 * we need to retry the cross-call.
966 */
967 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
968 goto retry;
969
970 /*
971 * Since we have the lock this context can't be scheduled
972 * in, so we can change the state safely.
973 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100974 if (counter->state == PERF_COUNTER_STATE_OFF) {
Paul Mackerrasd859e292009-01-17 18:10:22 +1100975 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200976 counter->tstamp_enabled =
977 ctx->time - counter->total_time_enabled;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100978 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100979 out:
980 spin_unlock_irq(&ctx->lock);
981}
982
Peter Zijlstra2023b352009-05-05 17:50:26 +0200983static int perf_counter_refresh(struct perf_counter *counter, int refresh)
Peter Zijlstra79f14642009-04-06 11:45:07 +0200984{
Peter Zijlstra2023b352009-05-05 17:50:26 +0200985 /*
986 * not supported on inherited counters
987 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200988 if (counter->attr.inherit)
Peter Zijlstra2023b352009-05-05 17:50:26 +0200989 return -EINVAL;
990
Peter Zijlstra79f14642009-04-06 11:45:07 +0200991 atomic_add(refresh, &counter->event_limit);
992 perf_counter_enable(counter);
Peter Zijlstra2023b352009-05-05 17:50:26 +0200993
994 return 0;
Peter Zijlstra79f14642009-04-06 11:45:07 +0200995}
996
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100997void __perf_counter_sched_out(struct perf_counter_context *ctx,
998 struct perf_cpu_context *cpuctx)
999{
1000 struct perf_counter *counter;
1001
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001002 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001003 ctx->is_active = 0;
1004 if (likely(!ctx->nr_counters))
1005 goto out;
Peter Zijlstra4af49982009-04-06 11:45:10 +02001006 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001007
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001008 perf_disable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001009 if (ctx->nr_active) {
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001010 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1011 if (counter != counter->group_leader)
1012 counter_sched_out(counter, cpuctx, ctx);
1013 else
1014 group_sched_out(counter, cpuctx, ctx);
1015 }
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001016 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001017 perf_enable();
Paul Mackerrasd859e292009-01-17 18:10:22 +11001018 out:
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001019 spin_unlock(&ctx->lock);
1020}
1021
Thomas Gleixner0793a612008-12-04 20:12:29 +01001022/*
Paul Mackerras564c2b22009-05-22 14:27:22 +10001023 * Test whether two contexts are equivalent, i.e. whether they
1024 * have both been cloned from the same version of the same context
1025 * and they both have the same number of enabled counters.
1026 * If the number of enabled counters is the same, then the set
1027 * of enabled counters should be the same, because these are both
1028 * inherited contexts, therefore we can't access individual counters
1029 * in them directly with an fd; we can only enable/disable all
1030 * counters via prctl, or enable/disable all counters in a family
1031 * via ioctl, which will have the same effect on both contexts.
1032 */
1033static int context_equiv(struct perf_counter_context *ctx1,
1034 struct perf_counter_context *ctx2)
1035{
1036 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001037 && ctx1->parent_gen == ctx2->parent_gen
Paul Mackerras25346b92009-06-01 17:48:12 +10001038 && !ctx1->pin_count && !ctx2->pin_count;
Paul Mackerras564c2b22009-05-22 14:27:22 +10001039}
1040
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001041static void __perf_counter_read(void *counter);
1042
1043static void __perf_counter_sync_stat(struct perf_counter *counter,
1044 struct perf_counter *next_counter)
1045{
1046 u64 value;
1047
1048 if (!counter->attr.inherit_stat)
1049 return;
1050
1051 /*
1052 * Update the counter value, we cannot use perf_counter_read()
1053 * because we're in the middle of a context switch and have IRQs
1054 * disabled, which upsets smp_call_function_single(), however
1055 * we know the counter must be on the current CPU, therefore we
1056 * don't need to use it.
1057 */
1058 switch (counter->state) {
1059 case PERF_COUNTER_STATE_ACTIVE:
1060 __perf_counter_read(counter);
1061 break;
1062
1063 case PERF_COUNTER_STATE_INACTIVE:
1064 update_counter_times(counter);
1065 break;
1066
1067 default:
1068 break;
1069 }
1070
1071 /*
1072 * In order to keep per-task stats reliable we need to flip the counter
1073 * values when we flip the contexts.
1074 */
1075 value = atomic64_read(&next_counter->count);
1076 value = atomic64_xchg(&counter->count, value);
1077 atomic64_set(&next_counter->count, value);
1078
Peter Zijlstra19d2e752009-06-26 13:10:23 +02001079 swap(counter->total_time_enabled, next_counter->total_time_enabled);
1080 swap(counter->total_time_running, next_counter->total_time_running);
1081
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001082 /*
Peter Zijlstra19d2e752009-06-26 13:10:23 +02001083 * Since we swizzled the values, update the user visible data too.
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001084 */
Peter Zijlstra19d2e752009-06-26 13:10:23 +02001085 perf_counter_update_userpage(counter);
1086 perf_counter_update_userpage(next_counter);
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001087}
1088
1089#define list_next_entry(pos, member) \
1090 list_entry(pos->member.next, typeof(*pos), member)
1091
1092static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1093 struct perf_counter_context *next_ctx)
1094{
1095 struct perf_counter *counter, *next_counter;
1096
1097 if (!ctx->nr_stat)
1098 return;
1099
1100 counter = list_first_entry(&ctx->event_list,
1101 struct perf_counter, event_entry);
1102
1103 next_counter = list_first_entry(&next_ctx->event_list,
1104 struct perf_counter, event_entry);
1105
1106 while (&counter->event_entry != &ctx->event_list &&
1107 &next_counter->event_entry != &next_ctx->event_list) {
1108
1109 __perf_counter_sync_stat(counter, next_counter);
1110
1111 counter = list_next_entry(counter, event_entry);
Peter Zijlstra10545982009-08-06 18:06:26 +02001112 next_counter = list_next_entry(next_counter, event_entry);
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001113 }
1114}
1115
Paul Mackerras564c2b22009-05-22 14:27:22 +10001116/*
Thomas Gleixner0793a612008-12-04 20:12:29 +01001117 * Called from scheduler to remove the counters of the current task,
1118 * with interrupts disabled.
1119 *
1120 * We stop each counter and update the counter value in counter->count.
1121 *
Ingo Molnar76715812008-12-17 14:20:28 +01001122 * This does not protect us against NMI, but disable()
Thomas Gleixner0793a612008-12-04 20:12:29 +01001123 * sets the disabled bit in the control field of counter _before_
1124 * accessing the counter control register. If a NMI hits, then it will
1125 * not restart the counter.
1126 */
Paul Mackerras564c2b22009-05-22 14:27:22 +10001127void perf_counter_task_sched_out(struct task_struct *task,
1128 struct task_struct *next, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001129{
1130 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001131 struct perf_counter_context *ctx = task->perf_counter_ctxp;
Paul Mackerras564c2b22009-05-22 14:27:22 +10001132 struct perf_counter_context *next_ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001133 struct perf_counter_context *parent;
Peter Zijlstra4a0deca2009-03-19 20:26:12 +01001134 struct pt_regs *regs;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001135 int do_switch = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001136
Peter Zijlstra10989fb2009-05-25 14:45:28 +02001137 regs = task_pt_regs(task);
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02001138 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
Peter Zijlstra10989fb2009-05-25 14:45:28 +02001139
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001140 if (likely(!ctx || !cpuctx->task_ctx))
Thomas Gleixner0793a612008-12-04 20:12:29 +01001141 return;
1142
Peter Zijlstrabce379b2009-04-06 11:45:13 +02001143 update_context_time(ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001144
1145 rcu_read_lock();
1146 parent = rcu_dereference(ctx->parent_ctx);
Paul Mackerras564c2b22009-05-22 14:27:22 +10001147 next_ctx = next->perf_counter_ctxp;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001148 if (parent && next_ctx &&
1149 rcu_dereference(next_ctx->parent_ctx) == parent) {
1150 /*
1151 * Looks like the two contexts are clones, so we might be
1152 * able to optimize the context switch. We lock both
1153 * contexts and check that they are clones under the
1154 * lock (including re-checking that neither has been
1155 * uncloned in the meantime). It doesn't matter which
1156 * order we take the locks because no other cpu could
1157 * be trying to lock both of these tasks.
1158 */
1159 spin_lock(&ctx->lock);
1160 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1161 if (context_equiv(ctx, next_ctx)) {
Peter Zijlstra665c2142009-05-29 14:51:57 +02001162 /*
1163 * XXX do we need a memory barrier of sorts
1164 * wrt to rcu_dereference() of perf_counter_ctxp
1165 */
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001166 task->perf_counter_ctxp = next_ctx;
1167 next->perf_counter_ctxp = ctx;
1168 ctx->task = next;
1169 next_ctx->task = task;
1170 do_switch = 0;
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001171
1172 perf_counter_sync_stat(ctx, next_ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001173 }
1174 spin_unlock(&next_ctx->lock);
1175 spin_unlock(&ctx->lock);
Paul Mackerras564c2b22009-05-22 14:27:22 +10001176 }
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001177 rcu_read_unlock();
Paul Mackerras564c2b22009-05-22 14:27:22 +10001178
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001179 if (do_switch) {
1180 __perf_counter_sched_out(ctx, cpuctx);
1181 cpuctx->task_ctx = NULL;
1182 }
Thomas Gleixner0793a612008-12-04 20:12:29 +01001183}
1184
Peter Zijlstra665c2142009-05-29 14:51:57 +02001185/*
1186 * Called with IRQs disabled
1187 */
Paul Mackerrasa08b1592009-05-11 15:46:10 +10001188static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1189{
1190 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1191
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001192 if (!cpuctx->task_ctx)
1193 return;
Ingo Molnar012b84d2009-05-17 11:08:41 +02001194
1195 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1196 return;
1197
Paul Mackerrasa08b1592009-05-11 15:46:10 +10001198 __perf_counter_sched_out(ctx, cpuctx);
1199 cpuctx->task_ctx = NULL;
1200}
1201
Peter Zijlstra665c2142009-05-29 14:51:57 +02001202/*
1203 * Called with IRQs disabled
1204 */
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001205static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
Ingo Molnar04289bb2008-12-11 08:38:42 +01001206{
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001207 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001208}
1209
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001210static void
1211__perf_counter_sched_in(struct perf_counter_context *ctx,
1212 struct perf_cpu_context *cpuctx, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001213{
Thomas Gleixner0793a612008-12-04 20:12:29 +01001214 struct perf_counter *counter;
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +11001215 int can_add_hw = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001216
Thomas Gleixner0793a612008-12-04 20:12:29 +01001217 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001218 ctx->is_active = 1;
1219 if (likely(!ctx->nr_counters))
1220 goto out;
1221
Peter Zijlstra4af49982009-04-06 11:45:10 +02001222 ctx->timestamp = perf_clock();
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001223
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001224 perf_disable();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001225
1226 /*
1227 * First go through the list and put on any pinned groups
1228 * in order to give them the best chance of going on.
1229 */
Ingo Molnar04289bb2008-12-11 08:38:42 +01001230 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001231 if (counter->state <= PERF_COUNTER_STATE_OFF ||
Peter Zijlstra0d486962009-06-02 19:22:16 +02001232 !counter->attr.pinned)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001233 continue;
1234 if (counter->cpu != -1 && counter->cpu != cpu)
1235 continue;
1236
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001237 if (counter != counter->group_leader)
1238 counter_sched_in(counter, cpuctx, ctx, cpu);
1239 else {
1240 if (group_can_go_on(counter, cpuctx, 1))
1241 group_sched_in(counter, cpuctx, ctx, cpu);
1242 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001243
1244 /*
1245 * If this pinned group hasn't been scheduled,
1246 * put it in error state.
1247 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001248 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1249 update_group_times(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001250 counter->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001251 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001252 }
1253
1254 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1255 /*
1256 * Ignore counters in OFF or ERROR state, and
1257 * ignore pinned counters since we did them already.
1258 */
1259 if (counter->state <= PERF_COUNTER_STATE_OFF ||
Peter Zijlstra0d486962009-06-02 19:22:16 +02001260 counter->attr.pinned)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001261 continue;
1262
Ingo Molnar04289bb2008-12-11 08:38:42 +01001263 /*
1264 * Listen to the 'cpu' scheduling filter constraint
1265 * of counters:
1266 */
Thomas Gleixner0793a612008-12-04 20:12:29 +01001267 if (counter->cpu != -1 && counter->cpu != cpu)
1268 continue;
1269
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001270 if (counter != counter->group_leader) {
1271 if (counter_sched_in(counter, cpuctx, ctx, cpu))
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +11001272 can_add_hw = 0;
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001273 } else {
1274 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1275 if (group_sched_in(counter, cpuctx, ctx, cpu))
1276 can_add_hw = 0;
1277 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001278 }
Thomas Gleixner0793a612008-12-04 20:12:29 +01001279 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001280 perf_enable();
Paul Mackerrasd859e292009-01-17 18:10:22 +11001281 out:
Thomas Gleixner0793a612008-12-04 20:12:29 +01001282 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001283}
Ingo Molnar04289bb2008-12-11 08:38:42 +01001284
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001285/*
1286 * Called from scheduler to add the counters of the current task
1287 * with interrupts disabled.
1288 *
1289 * We restore the counter value and then enable it.
1290 *
1291 * This does not protect us against NMI, but enable()
1292 * sets the enabled bit in the control field of counter _before_
1293 * accessing the counter control register. If a NMI hits, then it will
1294 * keep the counter running.
1295 */
1296void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1297{
1298 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001299 struct perf_counter_context *ctx = task->perf_counter_ctxp;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001300
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001301 if (likely(!ctx))
1302 return;
Paul Mackerras564c2b22009-05-22 14:27:22 +10001303 if (cpuctx->task_ctx == ctx)
1304 return;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001305 __perf_counter_sched_in(ctx, cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001306 cpuctx->task_ctx = ctx;
1307}
1308
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001309static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1310{
1311 struct perf_counter_context *ctx = &cpuctx->ctx;
1312
1313 __perf_counter_sched_in(ctx, cpuctx, cpu);
1314}
1315
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001316#define MAX_INTERRUPTS (~0ULL)
1317
1318static void perf_log_throttle(struct perf_counter *counter, int enable);
Peter Zijlstra26b119b2009-05-20 12:21:20 +02001319
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001320static void perf_adjust_period(struct perf_counter *counter, u64 events)
1321{
1322 struct hw_perf_counter *hwc = &counter->hw;
1323 u64 period, sample_period;
1324 s64 delta;
1325
1326 events *= hwc->sample_period;
1327 period = div64_u64(events, counter->attr.sample_freq);
1328
1329 delta = (s64)(period - hwc->sample_period);
1330 delta = (delta + 7) / 8; /* low pass filter */
1331
1332 sample_period = hwc->sample_period + delta;
1333
1334 if (!sample_period)
1335 sample_period = 1;
1336
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001337 hwc->sample_period = sample_period;
1338}
1339
1340static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001341{
1342 struct perf_counter *counter;
Peter Zijlstra6a24ed6c2009-06-05 18:01:29 +02001343 struct hw_perf_counter *hwc;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001344 u64 interrupts, freq;
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001345
1346 spin_lock(&ctx->lock);
1347 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1348 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1349 continue;
1350
Peter Zijlstra6a24ed6c2009-06-05 18:01:29 +02001351 hwc = &counter->hw;
1352
1353 interrupts = hwc->interrupts;
1354 hwc->interrupts = 0;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001355
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001356 /*
1357 * unthrottle counters on the tick
1358 */
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001359 if (interrupts == MAX_INTERRUPTS) {
1360 perf_log_throttle(counter, 1);
1361 counter->pmu->unthrottle(counter);
Peter Zijlstradf58ab22009-06-11 11:25:05 +02001362 interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001363 }
1364
Peter Zijlstra0d486962009-06-02 19:22:16 +02001365 if (!counter->attr.freq || !counter->attr.sample_freq)
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001366 continue;
1367
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001368 /*
1369 * if the specified freq < HZ then we need to skip ticks
1370 */
Peter Zijlstra6a24ed6c2009-06-05 18:01:29 +02001371 if (counter->attr.sample_freq < HZ) {
1372 freq = counter->attr.sample_freq;
1373
1374 hwc->freq_count += freq;
1375 hwc->freq_interrupts += interrupts;
1376
1377 if (hwc->freq_count < HZ)
1378 continue;
1379
1380 interrupts = hwc->freq_interrupts;
1381 hwc->freq_interrupts = 0;
1382 hwc->freq_count -= HZ;
1383 } else
1384 freq = HZ;
1385
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001386 perf_adjust_period(counter, freq * interrupts);
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001387
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001388 /*
1389 * In order to avoid being stalled by an (accidental) huge
1390 * sample period, force reset the sample period if we didn't
1391 * get any events in this freq period.
1392 */
1393 if (!interrupts) {
1394 perf_disable();
1395 counter->pmu->disable(counter);
Paul Mackerras87847b82009-06-13 17:06:50 +10001396 atomic64_set(&hwc->period_left, 0);
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001397 counter->pmu->enable(counter);
1398 perf_enable();
1399 }
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001400 }
1401 spin_unlock(&ctx->lock);
1402}
1403
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001404/*
1405 * Round-robin a context's counters:
1406 */
1407static void rotate_ctx(struct perf_counter_context *ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001408{
Thomas Gleixner0793a612008-12-04 20:12:29 +01001409 struct perf_counter *counter;
1410
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001411 if (!ctx->nr_counters)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001412 return;
1413
Thomas Gleixner0793a612008-12-04 20:12:29 +01001414 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001415 /*
Ingo Molnar04289bb2008-12-11 08:38:42 +01001416 * Rotate the first entry last (works just fine for group counters too):
Thomas Gleixner0793a612008-12-04 20:12:29 +01001417 */
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001418 perf_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +01001419 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Peter Zijlstra75564232009-03-13 12:21:29 +01001420 list_move_tail(&counter->list_entry, &ctx->counter_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001421 break;
1422 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001423 perf_enable();
Thomas Gleixner0793a612008-12-04 20:12:29 +01001424
1425 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001426}
Thomas Gleixner0793a612008-12-04 20:12:29 +01001427
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001428void perf_counter_task_tick(struct task_struct *curr, int cpu)
1429{
Peter Zijlstra7fc23a52009-05-08 18:52:21 +02001430 struct perf_cpu_context *cpuctx;
1431 struct perf_counter_context *ctx;
1432
1433 if (!atomic_read(&nr_counters))
1434 return;
1435
1436 cpuctx = &per_cpu(perf_cpu_context, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001437 ctx = curr->perf_counter_ctxp;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001438
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001439 perf_ctx_adjust_freq(&cpuctx->ctx);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001440 if (ctx)
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001441 perf_ctx_adjust_freq(ctx);
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001442
Ingo Molnarb82914c2009-05-04 18:54:32 +02001443 perf_counter_cpu_sched_out(cpuctx);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001444 if (ctx)
1445 __perf_counter_task_sched_out(ctx);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001446
Ingo Molnarb82914c2009-05-04 18:54:32 +02001447 rotate_ctx(&cpuctx->ctx);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001448 if (ctx)
1449 rotate_ctx(ctx);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001450
Ingo Molnarb82914c2009-05-04 18:54:32 +02001451 perf_counter_cpu_sched_in(cpuctx, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001452 if (ctx)
1453 perf_counter_task_sched_in(curr, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001454}
1455
1456/*
Paul Mackerras57e79862009-06-30 16:07:19 +10001457 * Enable all of a task's counters that have been marked enable-on-exec.
1458 * This expects task == current.
1459 */
1460static void perf_counter_enable_on_exec(struct task_struct *task)
1461{
1462 struct perf_counter_context *ctx;
1463 struct perf_counter *counter;
1464 unsigned long flags;
1465 int enabled = 0;
1466
1467 local_irq_save(flags);
1468 ctx = task->perf_counter_ctxp;
1469 if (!ctx || !ctx->nr_counters)
1470 goto out;
1471
1472 __perf_counter_task_sched_out(ctx);
1473
1474 spin_lock(&ctx->lock);
1475
1476 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1477 if (!counter->attr.enable_on_exec)
1478 continue;
1479 counter->attr.enable_on_exec = 0;
1480 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
1481 continue;
1482 counter->state = PERF_COUNTER_STATE_INACTIVE;
1483 counter->tstamp_enabled =
1484 ctx->time - counter->total_time_enabled;
1485 enabled = 1;
1486 }
1487
1488 /*
1489 * Unclone this context if we enabled any counter.
1490 */
Peter Zijlstra71a851b2009-07-10 09:06:56 +02001491 if (enabled)
1492 unclone_ctx(ctx);
Paul Mackerras57e79862009-06-30 16:07:19 +10001493
1494 spin_unlock(&ctx->lock);
1495
1496 perf_counter_task_sched_in(task, smp_processor_id());
1497 out:
1498 local_irq_restore(flags);
1499}
1500
1501/*
Thomas Gleixner0793a612008-12-04 20:12:29 +01001502 * Cross CPU call to read the hardware counter
1503 */
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001504static void __perf_counter_read(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001505{
Paul Mackerrase1ac3612009-08-14 15:39:10 +10001506 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001507 struct perf_counter *counter = info;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001508 struct perf_counter_context *ctx = counter->ctx;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001509 unsigned long flags;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001510
Paul Mackerrase1ac3612009-08-14 15:39:10 +10001511 /*
1512 * If this is a task context, we need to check whether it is
1513 * the current task context of this cpu. If not it has been
1514 * scheduled out before the smp call arrived. In that case
1515 * counter->count would have been updated to a recent sample
1516 * when the counter was scheduled out.
1517 */
1518 if (ctx->task && cpuctx->task_ctx != ctx)
1519 return;
1520
Peter Zijlstra849691a2009-04-06 11:45:12 +02001521 local_irq_save(flags);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001522 if (ctx->is_active)
Peter Zijlstra4af49982009-04-06 11:45:10 +02001523 update_context_time(ctx);
Robert Richter4aeb0b42009-04-29 12:47:03 +02001524 counter->pmu->read(counter);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001525 update_counter_times(counter);
Peter Zijlstra849691a2009-04-06 11:45:12 +02001526 local_irq_restore(flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001527}
1528
Ingo Molnar04289bb2008-12-11 08:38:42 +01001529static u64 perf_counter_read(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001530{
1531 /*
1532 * If counter is enabled and currently active on a CPU, update the
1533 * value in the counter structure:
1534 */
Ingo Molnar6a930702008-12-11 15:17:03 +01001535 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001536 smp_call_function_single(counter->oncpu,
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001537 __perf_counter_read, counter, 1);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001538 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1539 update_counter_times(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001540 }
1541
Ingo Molnaree060942008-12-13 09:00:03 +01001542 return atomic64_read(&counter->count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001543}
1544
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001545/*
1546 * Initialize the perf_counter context in a task_struct:
1547 */
1548static void
1549__perf_counter_init_context(struct perf_counter_context *ctx,
1550 struct task_struct *task)
1551{
1552 memset(ctx, 0, sizeof(*ctx));
1553 spin_lock_init(&ctx->lock);
1554 mutex_init(&ctx->mutex);
1555 INIT_LIST_HEAD(&ctx->counter_list);
1556 INIT_LIST_HEAD(&ctx->event_list);
1557 atomic_set(&ctx->refcount, 1);
1558 ctx->task = task;
1559}
1560
Thomas Gleixner0793a612008-12-04 20:12:29 +01001561static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1562{
Ingo Molnar22a4f652009-06-01 10:13:37 +02001563 struct perf_counter_context *ctx;
1564 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001565 struct task_struct *task;
Paul Mackerras25346b92009-06-01 17:48:12 +10001566 unsigned long flags;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001567 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001568
1569 /*
1570 * If cpu is not a wildcard then this is a percpu counter:
1571 */
1572 if (cpu != -1) {
1573 /* Must be root to operate on a CPU counter: */
Peter Zijlstra07647712009-06-11 11:18:36 +02001574 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
Thomas Gleixner0793a612008-12-04 20:12:29 +01001575 return ERR_PTR(-EACCES);
1576
1577 if (cpu < 0 || cpu > num_possible_cpus())
1578 return ERR_PTR(-EINVAL);
1579
1580 /*
1581 * We could be clever and allow to attach a counter to an
1582 * offline CPU and activate it when the CPU comes up, but
1583 * that's for later.
1584 */
1585 if (!cpu_isset(cpu, cpu_online_map))
1586 return ERR_PTR(-ENODEV);
1587
1588 cpuctx = &per_cpu(perf_cpu_context, cpu);
1589 ctx = &cpuctx->ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001590 get_ctx(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001591
Thomas Gleixner0793a612008-12-04 20:12:29 +01001592 return ctx;
1593 }
1594
1595 rcu_read_lock();
1596 if (!pid)
1597 task = current;
1598 else
1599 task = find_task_by_vpid(pid);
1600 if (task)
1601 get_task_struct(task);
1602 rcu_read_unlock();
1603
1604 if (!task)
1605 return ERR_PTR(-ESRCH);
1606
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001607 /*
1608 * Can't attach counters to a dying task.
1609 */
1610 err = -ESRCH;
1611 if (task->flags & PF_EXITING)
1612 goto errout;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001613
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001614 /* Reuse ptrace permission checks for now. */
1615 err = -EACCES;
1616 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1617 goto errout;
1618
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001619 retry:
Paul Mackerras25346b92009-06-01 17:48:12 +10001620 ctx = perf_lock_task_context(task, &flags);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001621 if (ctx) {
Peter Zijlstra71a851b2009-07-10 09:06:56 +02001622 unclone_ctx(ctx);
Paul Mackerras25346b92009-06-01 17:48:12 +10001623 spin_unlock_irqrestore(&ctx->lock, flags);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001624 }
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001625
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001626 if (!ctx) {
1627 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001628 err = -ENOMEM;
1629 if (!ctx)
1630 goto errout;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001631 __perf_counter_init_context(ctx, task);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001632 get_ctx(ctx);
1633 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001634 /*
1635 * We raced with some other task; use
1636 * the context they set.
1637 */
1638 kfree(ctx);
Paul Mackerras25346b92009-06-01 17:48:12 +10001639 goto retry;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001640 }
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001641 get_task_struct(task);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001642 }
1643
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001644 put_task_struct(task);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001645 return ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001646
1647 errout:
1648 put_task_struct(task);
1649 return ERR_PTR(err);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001650}
1651
Peter Zijlstra592903c2009-03-13 12:21:36 +01001652static void free_counter_rcu(struct rcu_head *head)
1653{
1654 struct perf_counter *counter;
1655
1656 counter = container_of(head, struct perf_counter, rcu_head);
Peter Zijlstra709e50c2009-06-02 14:13:15 +02001657 if (counter->ns)
1658 put_pid_ns(counter->ns);
Peter Zijlstra592903c2009-03-13 12:21:36 +01001659 kfree(counter);
1660}
1661
Peter Zijlstra925d5192009-03-30 19:07:02 +02001662static void perf_pending_sync(struct perf_counter *counter);
1663
Peter Zijlstraf1600952009-03-19 20:26:16 +01001664static void free_counter(struct perf_counter *counter)
1665{
Peter Zijlstra925d5192009-03-30 19:07:02 +02001666 perf_pending_sync(counter);
1667
Peter Zijlstraf3440112009-06-22 13:58:35 +02001668 if (!counter->parent) {
1669 atomic_dec(&nr_counters);
1670 if (counter->attr.mmap)
1671 atomic_dec(&nr_mmap_counters);
1672 if (counter->attr.comm)
1673 atomic_dec(&nr_comm_counters);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02001674 if (counter->attr.task)
1675 atomic_dec(&nr_task_counters);
Peter Zijlstraf3440112009-06-22 13:58:35 +02001676 }
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02001677
Peter Zijlstrae077df42009-03-19 20:26:17 +01001678 if (counter->destroy)
1679 counter->destroy(counter);
1680
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001681 put_ctx(counter->ctx);
Peter Zijlstraf1600952009-03-19 20:26:16 +01001682 call_rcu(&counter->rcu_head, free_counter_rcu);
1683}
1684
Thomas Gleixner0793a612008-12-04 20:12:29 +01001685/*
1686 * Called when the last reference to the file is gone.
1687 */
1688static int perf_release(struct inode *inode, struct file *file)
1689{
1690 struct perf_counter *counter = file->private_data;
1691 struct perf_counter_context *ctx = counter->ctx;
1692
1693 file->private_data = NULL;
1694
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001695 WARN_ON_ONCE(ctx->parent_ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001696 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001697 perf_counter_remove_from_context(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001698 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001699
Peter Zijlstra082ff5a2009-05-23 18:29:00 +02001700 mutex_lock(&counter->owner->perf_counter_mutex);
1701 list_del_init(&counter->owner_entry);
1702 mutex_unlock(&counter->owner->perf_counter_mutex);
1703 put_task_struct(counter->owner);
1704
Peter Zijlstraf1600952009-03-19 20:26:16 +01001705 free_counter(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001706
1707 return 0;
1708}
1709
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001710static int perf_counter_read_size(struct perf_counter *counter)
1711{
1712 int entry = sizeof(u64); /* value */
1713 int size = 0;
1714 int nr = 1;
1715
1716 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1717 size += sizeof(u64);
1718
1719 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1720 size += sizeof(u64);
1721
1722 if (counter->attr.read_format & PERF_FORMAT_ID)
1723 entry += sizeof(u64);
1724
1725 if (counter->attr.read_format & PERF_FORMAT_GROUP) {
1726 nr += counter->group_leader->nr_siblings;
1727 size += sizeof(u64);
1728 }
1729
1730 size += entry * nr;
1731
1732 return size;
1733}
1734
1735static u64 perf_counter_read_value(struct perf_counter *counter)
Peter Zijlstrae53c0992009-07-24 14:42:10 +02001736{
1737 struct perf_counter *child;
1738 u64 total = 0;
1739
1740 total += perf_counter_read(counter);
1741 list_for_each_entry(child, &counter->child_list, child_list)
1742 total += perf_counter_read(child);
1743
1744 return total;
1745}
1746
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001747static int perf_counter_read_entry(struct perf_counter *counter,
1748 u64 read_format, char __user *buf)
1749{
1750 int n = 0, count = 0;
1751 u64 values[2];
1752
1753 values[n++] = perf_counter_read_value(counter);
1754 if (read_format & PERF_FORMAT_ID)
1755 values[n++] = primary_counter_id(counter);
1756
1757 count = n * sizeof(u64);
1758
1759 if (copy_to_user(buf, values, count))
1760 return -EFAULT;
1761
1762 return count;
1763}
1764
1765static int perf_counter_read_group(struct perf_counter *counter,
1766 u64 read_format, char __user *buf)
1767{
1768 struct perf_counter *leader = counter->group_leader, *sub;
1769 int n = 0, size = 0, err = -EFAULT;
1770 u64 values[3];
1771
1772 values[n++] = 1 + leader->nr_siblings;
1773 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1774 values[n++] = leader->total_time_enabled +
1775 atomic64_read(&leader->child_total_time_enabled);
1776 }
1777 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1778 values[n++] = leader->total_time_running +
1779 atomic64_read(&leader->child_total_time_running);
1780 }
1781
1782 size = n * sizeof(u64);
1783
1784 if (copy_to_user(buf, values, size))
1785 return -EFAULT;
1786
1787 err = perf_counter_read_entry(leader, read_format, buf + size);
1788 if (err < 0)
1789 return err;
1790
1791 size += err;
1792
1793 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1794 err = perf_counter_read_entry(counter, read_format,
1795 buf + size);
1796 if (err < 0)
1797 return err;
1798
1799 size += err;
1800 }
1801
1802 return size;
1803}
1804
1805static int perf_counter_read_one(struct perf_counter *counter,
1806 u64 read_format, char __user *buf)
1807{
1808 u64 values[4];
1809 int n = 0;
1810
1811 values[n++] = perf_counter_read_value(counter);
1812 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1813 values[n++] = counter->total_time_enabled +
1814 atomic64_read(&counter->child_total_time_enabled);
1815 }
1816 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1817 values[n++] = counter->total_time_running +
1818 atomic64_read(&counter->child_total_time_running);
1819 }
1820 if (read_format & PERF_FORMAT_ID)
1821 values[n++] = primary_counter_id(counter);
1822
1823 if (copy_to_user(buf, values, n * sizeof(u64)))
1824 return -EFAULT;
1825
1826 return n * sizeof(u64);
1827}
1828
Thomas Gleixner0793a612008-12-04 20:12:29 +01001829/*
1830 * Read the performance counter - simple non blocking version for now
1831 */
1832static ssize_t
1833perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1834{
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001835 u64 read_format = counter->attr.read_format;
1836 int ret;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001837
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001838 /*
1839 * Return end-of-file for a read on a counter that is in
1840 * error state (i.e. because it was pinned but it couldn't be
1841 * scheduled on to the CPU at some point).
1842 */
1843 if (counter->state == PERF_COUNTER_STATE_ERROR)
1844 return 0;
1845
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001846 if (count < perf_counter_read_size(counter))
1847 return -ENOSPC;
1848
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001849 WARN_ON_ONCE(counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001850 mutex_lock(&counter->child_mutex);
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001851 if (read_format & PERF_FORMAT_GROUP)
1852 ret = perf_counter_read_group(counter, read_format, buf);
1853 else
1854 ret = perf_counter_read_one(counter, read_format, buf);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001855 mutex_unlock(&counter->child_mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001856
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001857 return ret;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001858}
1859
1860static ssize_t
Thomas Gleixner0793a612008-12-04 20:12:29 +01001861perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1862{
1863 struct perf_counter *counter = file->private_data;
1864
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001865 return perf_read_hw(counter, buf, count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001866}
1867
1868static unsigned int perf_poll(struct file *file, poll_table *wait)
1869{
1870 struct perf_counter *counter = file->private_data;
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001871 struct perf_mmap_data *data;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001872 unsigned int events = POLL_HUP;
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001873
1874 rcu_read_lock();
1875 data = rcu_dereference(counter->data);
1876 if (data)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001877 events = atomic_xchg(&data->poll, 0);
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001878 rcu_read_unlock();
Thomas Gleixner0793a612008-12-04 20:12:29 +01001879
1880 poll_wait(file, &counter->waitq, wait);
1881
Thomas Gleixner0793a612008-12-04 20:12:29 +01001882 return events;
1883}
1884
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001885static void perf_counter_reset(struct perf_counter *counter)
1886{
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001887 (void)perf_counter_read(counter);
Paul Mackerras615a3f12009-05-11 15:50:21 +10001888 atomic64_set(&counter->count, 0);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001889 perf_counter_update_userpage(counter);
1890}
1891
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001892/*
1893 * Holding the top-level counter's child_mutex means that any
1894 * descendant process that has inherited this counter will block
1895 * in sync_child_counter if it goes to exit, thus satisfying the
1896 * task existence requirements of perf_counter_enable/disable.
1897 */
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001898static void perf_counter_for_each_child(struct perf_counter *counter,
1899 void (*func)(struct perf_counter *))
1900{
1901 struct perf_counter *child;
1902
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001903 WARN_ON_ONCE(counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001904 mutex_lock(&counter->child_mutex);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001905 func(counter);
1906 list_for_each_entry(child, &counter->child_list, child_list)
1907 func(child);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001908 mutex_unlock(&counter->child_mutex);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001909}
1910
1911static void perf_counter_for_each(struct perf_counter *counter,
1912 void (*func)(struct perf_counter *))
1913{
Peter Zijlstra75f937f2009-06-15 15:05:12 +02001914 struct perf_counter_context *ctx = counter->ctx;
1915 struct perf_counter *sibling;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001916
Peter Zijlstra75f937f2009-06-15 15:05:12 +02001917 WARN_ON_ONCE(ctx->parent_ctx);
1918 mutex_lock(&ctx->mutex);
1919 counter = counter->group_leader;
1920
1921 perf_counter_for_each_child(counter, func);
1922 func(counter);
1923 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1924 perf_counter_for_each_child(counter, func);
1925 mutex_unlock(&ctx->mutex);
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001926}
1927
Peter Zijlstra08247e32009-06-02 16:46:57 +02001928static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1929{
1930 struct perf_counter_context *ctx = counter->ctx;
1931 unsigned long size;
1932 int ret = 0;
1933 u64 value;
1934
Peter Zijlstra0d486962009-06-02 19:22:16 +02001935 if (!counter->attr.sample_period)
Peter Zijlstra08247e32009-06-02 16:46:57 +02001936 return -EINVAL;
1937
1938 size = copy_from_user(&value, arg, sizeof(value));
1939 if (size != sizeof(value))
1940 return -EFAULT;
1941
1942 if (!value)
1943 return -EINVAL;
1944
1945 spin_lock_irq(&ctx->lock);
Peter Zijlstra0d486962009-06-02 19:22:16 +02001946 if (counter->attr.freq) {
Peter Zijlstradf58ab22009-06-11 11:25:05 +02001947 if (value > sysctl_perf_counter_sample_rate) {
Peter Zijlstra08247e32009-06-02 16:46:57 +02001948 ret = -EINVAL;
1949 goto unlock;
1950 }
1951
Peter Zijlstra0d486962009-06-02 19:22:16 +02001952 counter->attr.sample_freq = value;
Peter Zijlstra08247e32009-06-02 16:46:57 +02001953 } else {
Peter Zijlstra0d486962009-06-02 19:22:16 +02001954 counter->attr.sample_period = value;
Peter Zijlstra08247e32009-06-02 16:46:57 +02001955 counter->hw.sample_period = value;
Peter Zijlstra08247e32009-06-02 16:46:57 +02001956 }
1957unlock:
1958 spin_unlock_irq(&ctx->lock);
1959
1960 return ret;
1961}
1962
Paul Mackerrasd859e292009-01-17 18:10:22 +11001963static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1964{
1965 struct perf_counter *counter = file->private_data;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001966 void (*func)(struct perf_counter *);
1967 u32 flags = arg;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001968
1969 switch (cmd) {
1970 case PERF_COUNTER_IOC_ENABLE:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001971 func = perf_counter_enable;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001972 break;
1973 case PERF_COUNTER_IOC_DISABLE:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001974 func = perf_counter_disable;
Peter Zijlstra79f14642009-04-06 11:45:07 +02001975 break;
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001976 case PERF_COUNTER_IOC_RESET:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001977 func = perf_counter_reset;
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001978 break;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001979
1980 case PERF_COUNTER_IOC_REFRESH:
1981 return perf_counter_refresh(counter, arg);
Peter Zijlstra08247e32009-06-02 16:46:57 +02001982
1983 case PERF_COUNTER_IOC_PERIOD:
1984 return perf_counter_period(counter, (u64 __user *)arg);
1985
Paul Mackerrasd859e292009-01-17 18:10:22 +11001986 default:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001987 return -ENOTTY;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001988 }
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001989
1990 if (flags & PERF_IOC_FLAG_GROUP)
1991 perf_counter_for_each(counter, func);
1992 else
1993 perf_counter_for_each_child(counter, func);
1994
1995 return 0;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001996}
1997
Peter Zijlstra771d7cd2009-05-25 14:45:26 +02001998int perf_counter_task_enable(void)
1999{
2000 struct perf_counter *counter;
2001
2002 mutex_lock(&current->perf_counter_mutex);
2003 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
2004 perf_counter_for_each_child(counter, perf_counter_enable);
2005 mutex_unlock(&current->perf_counter_mutex);
2006
2007 return 0;
2008}
2009
2010int perf_counter_task_disable(void)
2011{
2012 struct perf_counter *counter;
2013
2014 mutex_lock(&current->perf_counter_mutex);
2015 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
2016 perf_counter_for_each_child(counter, perf_counter_disable);
2017 mutex_unlock(&current->perf_counter_mutex);
2018
2019 return 0;
2020}
2021
Peter Zijlstra194002b2009-06-22 16:35:24 +02002022static int perf_counter_index(struct perf_counter *counter)
2023{
2024 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2025 return 0;
2026
2027 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
2028}
2029
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002030/*
2031 * Callers need to ensure there can be no nesting of this function, otherwise
2032 * the seqlock logic goes bad. We can not serialize this because the arch
2033 * code calls this from NMI context.
2034 */
2035void perf_counter_update_userpage(struct perf_counter *counter)
Paul Mackerras37d81822009-03-23 18:22:08 +01002036{
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002037 struct perf_counter_mmap_page *userpg;
Ingo Molnar22a4f652009-06-01 10:13:37 +02002038 struct perf_mmap_data *data;
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002039
2040 rcu_read_lock();
2041 data = rcu_dereference(counter->data);
2042 if (!data)
2043 goto unlock;
2044
2045 userpg = data->user_page;
Paul Mackerras37d81822009-03-23 18:22:08 +01002046
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002047 /*
2048 * Disable preemption so as to not let the corresponding user-space
2049 * spin too long if we get preempted.
2050 */
2051 preempt_disable();
Paul Mackerras37d81822009-03-23 18:22:08 +01002052 ++userpg->lock;
Peter Zijlstra92f22a32009-04-02 11:12:04 +02002053 barrier();
Peter Zijlstra194002b2009-06-22 16:35:24 +02002054 userpg->index = perf_counter_index(counter);
Paul Mackerras37d81822009-03-23 18:22:08 +01002055 userpg->offset = atomic64_read(&counter->count);
2056 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
2057 userpg->offset -= atomic64_read(&counter->hw.prev_count);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002058
Peter Zijlstra7f8b4e42009-06-22 14:34:35 +02002059 userpg->time_enabled = counter->total_time_enabled +
2060 atomic64_read(&counter->child_total_time_enabled);
2061
2062 userpg->time_running = counter->total_time_running +
2063 atomic64_read(&counter->child_total_time_running);
2064
Peter Zijlstra92f22a32009-04-02 11:12:04 +02002065 barrier();
Paul Mackerras37d81822009-03-23 18:22:08 +01002066 ++userpg->lock;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002067 preempt_enable();
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002068unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002069 rcu_read_unlock();
Paul Mackerras37d81822009-03-23 18:22:08 +01002070}
2071
2072static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2073{
2074 struct perf_counter *counter = vma->vm_file->private_data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002075 struct perf_mmap_data *data;
2076 int ret = VM_FAULT_SIGBUS;
Paul Mackerras37d81822009-03-23 18:22:08 +01002077
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002078 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2079 if (vmf->pgoff == 0)
2080 ret = 0;
2081 return ret;
2082 }
2083
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002084 rcu_read_lock();
2085 data = rcu_dereference(counter->data);
2086 if (!data)
2087 goto unlock;
Paul Mackerras37d81822009-03-23 18:22:08 +01002088
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002089 if (vmf->pgoff == 0) {
2090 vmf->page = virt_to_page(data->user_page);
2091 } else {
2092 int nr = vmf->pgoff - 1;
2093
2094 if ((unsigned)nr > data->nr_pages)
2095 goto unlock;
2096
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002097 if (vmf->flags & FAULT_FLAG_WRITE)
2098 goto unlock;
2099
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002100 vmf->page = virt_to_page(data->data_pages[nr]);
2101 }
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002102
Paul Mackerras37d81822009-03-23 18:22:08 +01002103 get_page(vmf->page);
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002104 vmf->page->mapping = vma->vm_file->f_mapping;
2105 vmf->page->index = vmf->pgoff;
2106
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002107 ret = 0;
2108unlock:
2109 rcu_read_unlock();
2110
2111 return ret;
2112}
2113
2114static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
2115{
2116 struct perf_mmap_data *data;
2117 unsigned long size;
2118 int i;
2119
2120 WARN_ON(atomic_read(&counter->mmap_count));
2121
2122 size = sizeof(struct perf_mmap_data);
2123 size += nr_pages * sizeof(void *);
2124
2125 data = kzalloc(size, GFP_KERNEL);
2126 if (!data)
2127 goto fail;
2128
2129 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
2130 if (!data->user_page)
2131 goto fail_user_page;
2132
2133 for (i = 0; i < nr_pages; i++) {
2134 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
2135 if (!data->data_pages[i])
2136 goto fail_data_pages;
2137 }
2138
2139 data->nr_pages = nr_pages;
Peter Zijlstra22c15582009-05-05 17:50:25 +02002140 atomic_set(&data->lock, -1);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002141
2142 rcu_assign_pointer(counter->data, data);
2143
Paul Mackerras37d81822009-03-23 18:22:08 +01002144 return 0;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002145
2146fail_data_pages:
2147 for (i--; i >= 0; i--)
2148 free_page((unsigned long)data->data_pages[i]);
2149
2150 free_page((unsigned long)data->user_page);
2151
2152fail_user_page:
2153 kfree(data);
2154
2155fail:
2156 return -ENOMEM;
2157}
2158
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002159static void perf_mmap_free_page(unsigned long addr)
2160{
Kevin Cernekee5bfd7562009-07-05 12:08:19 -07002161 struct page *page = virt_to_page((void *)addr);
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002162
2163 page->mapping = NULL;
2164 __free_page(page);
2165}
2166
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002167static void __perf_mmap_data_free(struct rcu_head *rcu_head)
2168{
Ingo Molnar22a4f652009-06-01 10:13:37 +02002169 struct perf_mmap_data *data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002170 int i;
2171
Ingo Molnar22a4f652009-06-01 10:13:37 +02002172 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2173
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002174 perf_mmap_free_page((unsigned long)data->user_page);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002175 for (i = 0; i < data->nr_pages; i++)
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002176 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2177
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002178 kfree(data);
2179}
2180
2181static void perf_mmap_data_free(struct perf_counter *counter)
2182{
2183 struct perf_mmap_data *data = counter->data;
2184
2185 WARN_ON(atomic_read(&counter->mmap_count));
2186
2187 rcu_assign_pointer(counter->data, NULL);
2188 call_rcu(&data->rcu_head, __perf_mmap_data_free);
2189}
2190
2191static void perf_mmap_open(struct vm_area_struct *vma)
2192{
2193 struct perf_counter *counter = vma->vm_file->private_data;
2194
2195 atomic_inc(&counter->mmap_count);
2196}
2197
2198static void perf_mmap_close(struct vm_area_struct *vma)
2199{
2200 struct perf_counter *counter = vma->vm_file->private_data;
2201
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10002202 WARN_ON_ONCE(counter->ctx->parent_ctx);
Ingo Molnar22a4f652009-06-01 10:13:37 +02002203 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002204 struct user_struct *user = current_user();
2205
2206 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
Peter Zijlstrac5078f72009-05-05 17:50:24 +02002207 vma->vm_mm->locked_vm -= counter->data->nr_locked;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002208 perf_mmap_data_free(counter);
2209 mutex_unlock(&counter->mmap_mutex);
2210 }
Paul Mackerras37d81822009-03-23 18:22:08 +01002211}
2212
2213static struct vm_operations_struct perf_mmap_vmops = {
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002214 .open = perf_mmap_open,
2215 .close = perf_mmap_close,
2216 .fault = perf_mmap_fault,
2217 .page_mkwrite = perf_mmap_fault,
Paul Mackerras37d81822009-03-23 18:22:08 +01002218};
2219
2220static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2221{
2222 struct perf_counter *counter = file->private_data;
Ingo Molnar22a4f652009-06-01 10:13:37 +02002223 unsigned long user_locked, user_lock_limit;
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002224 struct user_struct *user = current_user();
Ingo Molnar22a4f652009-06-01 10:13:37 +02002225 unsigned long locked, lock_limit;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002226 unsigned long vma_size;
2227 unsigned long nr_pages;
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002228 long user_extra, extra;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002229 int ret = 0;
Paul Mackerras37d81822009-03-23 18:22:08 +01002230
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002231 if (!(vma->vm_flags & VM_SHARED))
Paul Mackerras37d81822009-03-23 18:22:08 +01002232 return -EINVAL;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002233
2234 vma_size = vma->vm_end - vma->vm_start;
2235 nr_pages = (vma_size / PAGE_SIZE) - 1;
2236
Peter Zijlstra7730d862009-03-25 12:48:31 +01002237 /*
2238 * If we have data pages ensure they're a power-of-two number, so we
2239 * can do bitmasks instead of modulo.
2240 */
2241 if (nr_pages != 0 && !is_power_of_2(nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01002242 return -EINVAL;
2243
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002244 if (vma_size != PAGE_SIZE * (1 + nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01002245 return -EINVAL;
2246
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002247 if (vma->vm_pgoff != 0)
2248 return -EINVAL;
Paul Mackerras37d81822009-03-23 18:22:08 +01002249
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10002250 WARN_ON_ONCE(counter->ctx->parent_ctx);
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002251 mutex_lock(&counter->mmap_mutex);
2252 if (atomic_inc_not_zero(&counter->mmap_count)) {
2253 if (nr_pages != counter->data->nr_pages)
2254 ret = -EINVAL;
2255 goto unlock;
2256 }
2257
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002258 user_extra = nr_pages + 1;
2259 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
Ingo Molnara3862d32009-05-24 09:02:37 +02002260
2261 /*
2262 * Increase the limit linearly with more CPUs:
2263 */
2264 user_lock_limit *= num_online_cpus();
2265
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002266 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
Peter Zijlstrac5078f72009-05-05 17:50:24 +02002267
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002268 extra = 0;
2269 if (user_locked > user_lock_limit)
2270 extra = user_locked - user_lock_limit;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002271
2272 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2273 lock_limit >>= PAGE_SHIFT;
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002274 locked = vma->vm_mm->locked_vm + extra;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002275
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002276 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
2277 ret = -EPERM;
2278 goto unlock;
2279 }
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002280
2281 WARN_ON(counter->data);
2282 ret = perf_mmap_data_alloc(counter, nr_pages);
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002283 if (ret)
2284 goto unlock;
2285
2286 atomic_set(&counter->mmap_count, 1);
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002287 atomic_long_add(user_extra, &user->locked_vm);
Peter Zijlstrac5078f72009-05-05 17:50:24 +02002288 vma->vm_mm->locked_vm += extra;
2289 counter->data->nr_locked = extra;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002290 if (vma->vm_flags & VM_WRITE)
2291 counter->data->writable = 1;
2292
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002293unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002294 mutex_unlock(&counter->mmap_mutex);
Paul Mackerras37d81822009-03-23 18:22:08 +01002295
Paul Mackerras37d81822009-03-23 18:22:08 +01002296 vma->vm_flags |= VM_RESERVED;
2297 vma->vm_ops = &perf_mmap_vmops;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002298
2299 return ret;
Paul Mackerras37d81822009-03-23 18:22:08 +01002300}
2301
Peter Zijlstra3c446b32009-04-06 11:45:01 +02002302static int perf_fasync(int fd, struct file *filp, int on)
2303{
Peter Zijlstra3c446b32009-04-06 11:45:01 +02002304 struct inode *inode = filp->f_path.dentry->d_inode;
Ingo Molnar22a4f652009-06-01 10:13:37 +02002305 struct perf_counter *counter = filp->private_data;
Peter Zijlstra3c446b32009-04-06 11:45:01 +02002306 int retval;
2307
2308 mutex_lock(&inode->i_mutex);
2309 retval = fasync_helper(fd, filp, on, &counter->fasync);
2310 mutex_unlock(&inode->i_mutex);
2311
2312 if (retval < 0)
2313 return retval;
2314
2315 return 0;
2316}
2317
Thomas Gleixner0793a612008-12-04 20:12:29 +01002318static const struct file_operations perf_fops = {
2319 .release = perf_release,
2320 .read = perf_read,
2321 .poll = perf_poll,
Paul Mackerrasd859e292009-01-17 18:10:22 +11002322 .unlocked_ioctl = perf_ioctl,
2323 .compat_ioctl = perf_ioctl,
Paul Mackerras37d81822009-03-23 18:22:08 +01002324 .mmap = perf_mmap,
Peter Zijlstra3c446b32009-04-06 11:45:01 +02002325 .fasync = perf_fasync,
Thomas Gleixner0793a612008-12-04 20:12:29 +01002326};
2327
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002328/*
Peter Zijlstra925d5192009-03-30 19:07:02 +02002329 * Perf counter wakeup
2330 *
2331 * If there's data, ensure we set the poll() state and publish everything
2332 * to user-space before waking everybody up.
2333 */
2334
2335void perf_counter_wakeup(struct perf_counter *counter)
2336{
Peter Zijlstra925d5192009-03-30 19:07:02 +02002337 wake_up_all(&counter->waitq);
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002338
2339 if (counter->pending_kill) {
2340 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
2341 counter->pending_kill = 0;
2342 }
Peter Zijlstra925d5192009-03-30 19:07:02 +02002343}
2344
2345/*
2346 * Pending wakeups
2347 *
2348 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2349 *
2350 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2351 * single linked list and use cmpxchg() to add entries lockless.
2352 */
2353
Peter Zijlstra79f14642009-04-06 11:45:07 +02002354static void perf_pending_counter(struct perf_pending_entry *entry)
2355{
2356 struct perf_counter *counter = container_of(entry,
2357 struct perf_counter, pending);
2358
2359 if (counter->pending_disable) {
2360 counter->pending_disable = 0;
Peter Zijlstra970892a2009-08-13 11:47:54 +02002361 __perf_counter_disable(counter);
Peter Zijlstra79f14642009-04-06 11:45:07 +02002362 }
2363
2364 if (counter->pending_wakeup) {
2365 counter->pending_wakeup = 0;
2366 perf_counter_wakeup(counter);
2367 }
2368}
2369
Peter Zijlstra671dec52009-04-06 11:45:02 +02002370#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02002371
Peter Zijlstra671dec52009-04-06 11:45:02 +02002372static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
Peter Zijlstra925d5192009-03-30 19:07:02 +02002373 PENDING_TAIL,
2374};
2375
Peter Zijlstra671dec52009-04-06 11:45:02 +02002376static void perf_pending_queue(struct perf_pending_entry *entry,
2377 void (*func)(struct perf_pending_entry *))
Peter Zijlstra925d5192009-03-30 19:07:02 +02002378{
Peter Zijlstra671dec52009-04-06 11:45:02 +02002379 struct perf_pending_entry **head;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002380
Peter Zijlstra671dec52009-04-06 11:45:02 +02002381 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02002382 return;
2383
Peter Zijlstra671dec52009-04-06 11:45:02 +02002384 entry->func = func;
2385
2386 head = &get_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002387
2388 do {
Peter Zijlstra671dec52009-04-06 11:45:02 +02002389 entry->next = *head;
2390 } while (cmpxchg(head, entry->next, entry) != entry->next);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002391
2392 set_perf_counter_pending();
2393
Peter Zijlstra671dec52009-04-06 11:45:02 +02002394 put_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002395}
2396
2397static int __perf_pending_run(void)
2398{
Peter Zijlstra671dec52009-04-06 11:45:02 +02002399 struct perf_pending_entry *list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002400 int nr = 0;
2401
Peter Zijlstra671dec52009-04-06 11:45:02 +02002402 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002403 while (list != PENDING_TAIL) {
Peter Zijlstra671dec52009-04-06 11:45:02 +02002404 void (*func)(struct perf_pending_entry *);
2405 struct perf_pending_entry *entry = list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002406
2407 list = list->next;
2408
Peter Zijlstra671dec52009-04-06 11:45:02 +02002409 func = entry->func;
2410 entry->next = NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002411 /*
2412 * Ensure we observe the unqueue before we issue the wakeup,
2413 * so that we won't be waiting forever.
2414 * -- see perf_not_pending().
2415 */
2416 smp_wmb();
2417
Peter Zijlstra671dec52009-04-06 11:45:02 +02002418 func(entry);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002419 nr++;
2420 }
2421
2422 return nr;
2423}
2424
2425static inline int perf_not_pending(struct perf_counter *counter)
2426{
2427 /*
2428 * If we flush on whatever cpu we run, there is a chance we don't
2429 * need to wait.
2430 */
2431 get_cpu();
2432 __perf_pending_run();
2433 put_cpu();
2434
2435 /*
2436 * Ensure we see the proper queue state before going to sleep
2437 * so that we do not miss the wakeup. -- see perf_pending_handle()
2438 */
2439 smp_rmb();
Peter Zijlstra671dec52009-04-06 11:45:02 +02002440 return counter->pending.next == NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002441}
2442
2443static void perf_pending_sync(struct perf_counter *counter)
2444{
2445 wait_event(counter->waitq, perf_not_pending(counter));
2446}
2447
2448void perf_counter_do_pending(void)
2449{
2450 __perf_pending_run();
2451}
2452
2453/*
Peter Zijlstra394ee072009-03-30 19:07:14 +02002454 * Callchain support -- arch specific
2455 */
2456
Peter Zijlstra9c03d882009-04-06 11:45:00 +02002457__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
Peter Zijlstra394ee072009-03-30 19:07:14 +02002458{
2459 return NULL;
2460}
2461
2462/*
Peter Zijlstra0322cd62009-03-19 20:26:19 +01002463 * Output
2464 */
2465
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002466struct perf_output_handle {
2467 struct perf_counter *counter;
2468 struct perf_mmap_data *data;
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002469 unsigned long head;
2470 unsigned long offset;
Peter Zijlstra78d613e2009-03-30 19:07:11 +02002471 int nmi;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002472 int sample;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002473 int locked;
2474 unsigned long flags;
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002475};
2476
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002477static bool perf_output_space(struct perf_mmap_data *data,
2478 unsigned int offset, unsigned int head)
2479{
2480 unsigned long tail;
2481 unsigned long mask;
2482
2483 if (!data->writable)
2484 return true;
2485
2486 mask = (data->nr_pages << PAGE_SHIFT) - 1;
2487 /*
2488 * Userspace could choose to issue a mb() before updating the tail
2489 * pointer. So that all reads will be completed before the write is
2490 * issued.
2491 */
2492 tail = ACCESS_ONCE(data->user_page->data_tail);
2493 smp_rmb();
2494
2495 offset = (offset - tail) & mask;
2496 head = (head - tail) & mask;
2497
2498 if ((int)(head - offset) < 0)
2499 return false;
2500
2501 return true;
2502}
2503
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002504static void perf_output_wakeup(struct perf_output_handle *handle)
Peter Zijlstra78d613e2009-03-30 19:07:11 +02002505{
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002506 atomic_set(&handle->data->poll, POLL_IN);
2507
Peter Zijlstra671dec52009-04-06 11:45:02 +02002508 if (handle->nmi) {
Peter Zijlstra79f14642009-04-06 11:45:07 +02002509 handle->counter->pending_wakeup = 1;
Peter Zijlstra671dec52009-04-06 11:45:02 +02002510 perf_pending_queue(&handle->counter->pending,
Peter Zijlstra79f14642009-04-06 11:45:07 +02002511 perf_pending_counter);
Peter Zijlstra671dec52009-04-06 11:45:02 +02002512 } else
Peter Zijlstra78d613e2009-03-30 19:07:11 +02002513 perf_counter_wakeup(handle->counter);
2514}
2515
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002516/*
2517 * Curious locking construct.
2518 *
2519 * We need to ensure a later event doesn't publish a head when a former
2520 * event isn't done writing. However since we need to deal with NMIs we
2521 * cannot fully serialize things.
2522 *
2523 * What we do is serialize between CPUs so we only have to deal with NMI
2524 * nesting on a single CPU.
2525 *
2526 * We only publish the head (and generate a wakeup) when the outer-most
2527 * event completes.
2528 */
2529static void perf_output_lock(struct perf_output_handle *handle)
2530{
2531 struct perf_mmap_data *data = handle->data;
2532 int cpu;
2533
2534 handle->locked = 0;
2535
2536 local_irq_save(handle->flags);
2537 cpu = smp_processor_id();
2538
2539 if (in_nmi() && atomic_read(&data->lock) == cpu)
2540 return;
2541
Peter Zijlstra22c15582009-05-05 17:50:25 +02002542 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002543 cpu_relax();
2544
2545 handle->locked = 1;
2546}
2547
2548static void perf_output_unlock(struct perf_output_handle *handle)
2549{
2550 struct perf_mmap_data *data = handle->data;
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002551 unsigned long head;
2552 int cpu;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002553
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002554 data->done_head = data->head;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002555
2556 if (!handle->locked)
2557 goto out;
2558
2559again:
2560 /*
2561 * The xchg implies a full barrier that ensures all writes are done
2562 * before we publish the new head, matched by a rmb() in userspace when
2563 * reading this position.
2564 */
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002565 while ((head = atomic_long_xchg(&data->done_head, 0)))
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002566 data->user_page->data_head = head;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002567
2568 /*
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002569 * NMI can happen here, which means we can miss a done_head update.
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002570 */
2571
Peter Zijlstra22c15582009-05-05 17:50:25 +02002572 cpu = atomic_xchg(&data->lock, -1);
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002573 WARN_ON_ONCE(cpu != smp_processor_id());
2574
2575 /*
2576 * Therefore we have to validate we did not indeed do so.
2577 */
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002578 if (unlikely(atomic_long_read(&data->done_head))) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002579 /*
2580 * Since we had it locked, we can lock it again.
2581 */
Peter Zijlstra22c15582009-05-05 17:50:25 +02002582 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002583 cpu_relax();
2584
2585 goto again;
2586 }
2587
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002588 if (atomic_xchg(&data->wakeup, 0))
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002589 perf_output_wakeup(handle);
2590out:
2591 local_irq_restore(handle->flags);
2592}
2593
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002594static void perf_output_copy(struct perf_output_handle *handle,
Peter Zijlstra089dd792009-06-05 14:04:55 +02002595 const void *buf, unsigned int len)
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002596{
2597 unsigned int pages_mask;
2598 unsigned int offset;
2599 unsigned int size;
2600 void **pages;
2601
2602 offset = handle->offset;
2603 pages_mask = handle->data->nr_pages - 1;
2604 pages = handle->data->data_pages;
2605
2606 do {
2607 unsigned int page_offset;
2608 int nr;
2609
2610 nr = (offset >> PAGE_SHIFT) & pages_mask;
2611 page_offset = offset & (PAGE_SIZE - 1);
2612 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2613
2614 memcpy(pages[nr] + page_offset, buf, size);
2615
2616 len -= size;
2617 buf += size;
2618 offset += size;
2619 } while (len);
2620
2621 handle->offset = offset;
Peter Zijlstra63e35b22009-03-25 12:30:24 +01002622
Peter Zijlstra53020fe2009-05-13 21:26:19 +02002623 /*
2624 * Check we didn't copy past our reservation window, taking the
2625 * possible unsigned int wrap into account.
2626 */
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002627 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002628}
2629
Peter Zijlstra5c148192009-03-25 12:30:23 +01002630#define perf_output_put(handle, x) \
2631 perf_output_copy((handle), &(x), sizeof(x))
2632
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002633static int perf_output_begin(struct perf_output_handle *handle,
2634 struct perf_counter *counter, unsigned int size,
2635 int nmi, int sample)
2636{
2637 struct perf_mmap_data *data;
2638 unsigned int offset, head;
2639 int have_lost;
2640 struct {
2641 struct perf_event_header header;
2642 u64 id;
2643 u64 lost;
2644 } lost_event;
2645
2646 /*
2647 * For inherited counters we send all the output towards the parent.
2648 */
2649 if (counter->parent)
2650 counter = counter->parent;
2651
2652 rcu_read_lock();
2653 data = rcu_dereference(counter->data);
2654 if (!data)
2655 goto out;
2656
2657 handle->data = data;
2658 handle->counter = counter;
2659 handle->nmi = nmi;
2660 handle->sample = sample;
2661
2662 if (!data->nr_pages)
2663 goto fail;
2664
2665 have_lost = atomic_read(&data->lost);
2666 if (have_lost)
2667 size += sizeof(lost_event);
2668
2669 perf_output_lock(handle);
2670
2671 do {
2672 offset = head = atomic_long_read(&data->head);
2673 head += size;
2674 if (unlikely(!perf_output_space(data, offset, head)))
2675 goto fail;
2676 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2677
2678 handle->offset = offset;
2679 handle->head = head;
2680
2681 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2682 atomic_set(&data->wakeup, 1);
2683
2684 if (have_lost) {
2685 lost_event.header.type = PERF_EVENT_LOST;
2686 lost_event.header.misc = 0;
2687 lost_event.header.size = sizeof(lost_event);
2688 lost_event.id = counter->id;
2689 lost_event.lost = atomic_xchg(&data->lost, 0);
2690
2691 perf_output_put(handle, lost_event);
2692 }
2693
2694 return 0;
2695
2696fail:
2697 atomic_inc(&data->lost);
2698 perf_output_unlock(handle);
2699out:
2700 rcu_read_unlock();
2701
2702 return -ENOSPC;
2703}
2704
Peter Zijlstra78d613e2009-03-30 19:07:11 +02002705static void perf_output_end(struct perf_output_handle *handle)
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002706{
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002707 struct perf_counter *counter = handle->counter;
2708 struct perf_mmap_data *data = handle->data;
2709
Peter Zijlstra0d486962009-06-02 19:22:16 +02002710 int wakeup_events = counter->attr.wakeup_events;
Peter Zijlstrac4578102009-04-02 11:12:01 +02002711
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002712 if (handle->sample && wakeup_events) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002713 int events = atomic_inc_return(&data->events);
Peter Zijlstrac4578102009-04-02 11:12:01 +02002714 if (events >= wakeup_events) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002715 atomic_sub(wakeup_events, &data->events);
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002716 atomic_set(&data->wakeup, 1);
Peter Zijlstrac4578102009-04-02 11:12:01 +02002717 }
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002718 }
2719
2720 perf_output_unlock(handle);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002721 rcu_read_unlock();
2722}
2723
Peter Zijlstra709e50c2009-06-02 14:13:15 +02002724static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2725{
2726 /*
2727 * only top level counters have the pid namespace they were created in
2728 */
2729 if (counter->parent)
2730 counter = counter->parent;
2731
2732 return task_tgid_nr_ns(p, counter->ns);
2733}
2734
2735static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2736{
2737 /*
2738 * only top level counters have the pid namespace they were created in
2739 */
2740 if (counter->parent)
2741 counter = counter->parent;
2742
2743 return task_pid_nr_ns(p, counter->ns);
2744}
2745
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02002746static void perf_output_read_one(struct perf_output_handle *handle,
2747 struct perf_counter *counter)
2748{
2749 u64 read_format = counter->attr.read_format;
2750 u64 values[4];
2751 int n = 0;
2752
2753 values[n++] = atomic64_read(&counter->count);
2754 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2755 values[n++] = counter->total_time_enabled +
2756 atomic64_read(&counter->child_total_time_enabled);
2757 }
2758 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2759 values[n++] = counter->total_time_running +
2760 atomic64_read(&counter->child_total_time_running);
2761 }
2762 if (read_format & PERF_FORMAT_ID)
2763 values[n++] = primary_counter_id(counter);
2764
2765 perf_output_copy(handle, values, n * sizeof(u64));
2766}
2767
2768/*
2769 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
2770 */
2771static void perf_output_read_group(struct perf_output_handle *handle,
2772 struct perf_counter *counter)
2773{
2774 struct perf_counter *leader = counter->group_leader, *sub;
2775 u64 read_format = counter->attr.read_format;
2776 u64 values[5];
2777 int n = 0;
2778
2779 values[n++] = 1 + leader->nr_siblings;
2780
2781 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2782 values[n++] = leader->total_time_enabled;
2783
2784 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2785 values[n++] = leader->total_time_running;
2786
2787 if (leader != counter)
2788 leader->pmu->read(leader);
2789
2790 values[n++] = atomic64_read(&leader->count);
2791 if (read_format & PERF_FORMAT_ID)
2792 values[n++] = primary_counter_id(leader);
2793
2794 perf_output_copy(handle, values, n * sizeof(u64));
2795
2796 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2797 n = 0;
2798
2799 if (sub != counter)
2800 sub->pmu->read(sub);
2801
2802 values[n++] = atomic64_read(&sub->count);
2803 if (read_format & PERF_FORMAT_ID)
2804 values[n++] = primary_counter_id(sub);
2805
2806 perf_output_copy(handle, values, n * sizeof(u64));
2807 }
2808}
2809
2810static void perf_output_read(struct perf_output_handle *handle,
2811 struct perf_counter *counter)
2812{
2813 if (counter->attr.read_format & PERF_FORMAT_GROUP)
2814 perf_output_read_group(handle, counter);
2815 else
2816 perf_output_read_one(handle, counter);
2817}
2818
Ingo Molnar28402972009-08-13 10:13:22 +02002819void perf_counter_output(struct perf_counter *counter, int nmi,
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002820 struct perf_sample_data *data)
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002821{
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002822 int ret;
Peter Zijlstra0d486962009-06-02 19:22:16 +02002823 u64 sample_type = counter->attr.sample_type;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002824 struct perf_output_handle handle;
2825 struct perf_event_header header;
2826 u64 ip;
Peter Zijlstra5c148192009-03-25 12:30:23 +01002827 struct {
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002828 u32 pid, tid;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002829 } tid_entry;
Peter Zijlstra394ee072009-03-30 19:07:14 +02002830 struct perf_callchain_entry *callchain = NULL;
2831 int callchain_size = 0;
Peter Zijlstra339f7c92009-04-06 11:45:06 +02002832 u64 time;
Peter Zijlstraf370e1e2009-05-08 18:52:24 +02002833 struct {
2834 u32 cpu, reserved;
2835 } cpu_entry;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002836
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002837 header.type = PERF_EVENT_SAMPLE;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002838 header.size = sizeof(header);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002839
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002840 header.misc = 0;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002841 header.misc |= perf_misc_flags(data->regs);
Peter Zijlstra6fab0192009-04-08 15:01:26 +02002842
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002843 if (sample_type & PERF_SAMPLE_IP) {
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002844 ip = perf_instruction_pointer(data->regs);
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002845 header.size += sizeof(ip);
2846 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002847
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002848 if (sample_type & PERF_SAMPLE_TID) {
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002849 /* namespace issues */
Peter Zijlstra709e50c2009-06-02 14:13:15 +02002850 tid_entry.pid = perf_counter_pid(counter, current);
2851 tid_entry.tid = perf_counter_tid(counter, current);
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002852
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002853 header.size += sizeof(tid_entry);
2854 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002855
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002856 if (sample_type & PERF_SAMPLE_TIME) {
Peter Zijlstra4d855452009-04-08 15:01:32 +02002857 /*
2858 * Maybe do better on x86 and provide cpu_clock_nmi()
2859 */
2860 time = sched_clock();
2861
Peter Zijlstra4d855452009-04-08 15:01:32 +02002862 header.size += sizeof(u64);
2863 }
2864
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002865 if (sample_type & PERF_SAMPLE_ADDR)
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002866 header.size += sizeof(u64);
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002867
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002868 if (sample_type & PERF_SAMPLE_ID)
Peter Zijlstraa85f61a2009-05-08 18:52:23 +02002869 header.size += sizeof(u64);
Peter Zijlstraa85f61a2009-05-08 18:52:23 +02002870
Peter Zijlstra7f453c22009-07-21 13:19:40 +02002871 if (sample_type & PERF_SAMPLE_STREAM_ID)
2872 header.size += sizeof(u64);
2873
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002874 if (sample_type & PERF_SAMPLE_CPU) {
Peter Zijlstraf370e1e2009-05-08 18:52:24 +02002875 header.size += sizeof(cpu_entry);
2876
2877 cpu_entry.cpu = raw_smp_processor_id();
Arjan van de Ven0dc3d522009-07-21 00:55:05 -07002878 cpu_entry.reserved = 0;
Peter Zijlstraf370e1e2009-05-08 18:52:24 +02002879 }
2880
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002881 if (sample_type & PERF_SAMPLE_PERIOD)
Peter Zijlstra689802b2009-06-05 15:05:43 +02002882 header.size += sizeof(u64);
Peter Zijlstra689802b2009-06-05 15:05:43 +02002883
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02002884 if (sample_type & PERF_SAMPLE_READ)
2885 header.size += perf_counter_read_size(counter);
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002886
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002887 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002888 callchain = perf_callchain(data->regs);
Peter Zijlstra394ee072009-03-30 19:07:14 +02002889
2890 if (callchain) {
Peter Zijlstra9c03d882009-04-06 11:45:00 +02002891 callchain_size = (1 + callchain->nr) * sizeof(u64);
Peter Zijlstra394ee072009-03-30 19:07:14 +02002892 header.size += callchain_size;
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002893 } else
2894 header.size += sizeof(u64);
Peter Zijlstra394ee072009-03-30 19:07:14 +02002895 }
2896
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02002897 if (sample_type & PERF_SAMPLE_RAW) {
Peter Zijlstraa0445602009-08-10 11:16:52 +02002898 int size = sizeof(u32);
2899
2900 if (data->raw)
2901 size += data->raw->size;
2902 else
2903 size += sizeof(u32);
2904
2905 WARN_ON_ONCE(size & (sizeof(u64)-1));
2906 header.size += size;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002907 }
2908
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002909 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002910 if (ret)
2911 return;
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002912
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002913 perf_output_put(&handle, header);
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002914
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002915 if (sample_type & PERF_SAMPLE_IP)
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002916 perf_output_put(&handle, ip);
2917
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002918 if (sample_type & PERF_SAMPLE_TID)
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002919 perf_output_put(&handle, tid_entry);
2920
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002921 if (sample_type & PERF_SAMPLE_TIME)
Peter Zijlstra4d855452009-04-08 15:01:32 +02002922 perf_output_put(&handle, time);
2923
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002924 if (sample_type & PERF_SAMPLE_ADDR)
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002925 perf_output_put(&handle, data->addr);
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002926
Peter Zijlstra7f453c22009-07-21 13:19:40 +02002927 if (sample_type & PERF_SAMPLE_ID) {
2928 u64 id = primary_counter_id(counter);
2929
2930 perf_output_put(&handle, id);
2931 }
2932
2933 if (sample_type & PERF_SAMPLE_STREAM_ID)
Peter Zijlstraac4bcf82009-06-05 14:44:52 +02002934 perf_output_put(&handle, counter->id);
Peter Zijlstraa85f61a2009-05-08 18:52:23 +02002935
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002936 if (sample_type & PERF_SAMPLE_CPU)
Peter Zijlstraf370e1e2009-05-08 18:52:24 +02002937 perf_output_put(&handle, cpu_entry);
2938
Peter Zijlstra689802b2009-06-05 15:05:43 +02002939 if (sample_type & PERF_SAMPLE_PERIOD)
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002940 perf_output_put(&handle, data->period);
Peter Zijlstra689802b2009-06-05 15:05:43 +02002941
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02002942 if (sample_type & PERF_SAMPLE_READ)
2943 perf_output_read(&handle, counter);
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002944
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002945 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2946 if (callchain)
2947 perf_output_copy(&handle, callchain, callchain_size);
2948 else {
2949 u64 nr = 0;
2950 perf_output_put(&handle, nr);
2951 }
2952 }
Peter Zijlstra394ee072009-03-30 19:07:14 +02002953
Peter Zijlstraa0445602009-08-10 11:16:52 +02002954 if (sample_type & PERF_SAMPLE_RAW) {
2955 if (data->raw) {
2956 perf_output_put(&handle, data->raw->size);
2957 perf_output_copy(&handle, data->raw->data, data->raw->size);
2958 } else {
2959 struct {
2960 u32 size;
2961 u32 data;
2962 } raw = {
2963 .size = sizeof(u32),
2964 .data = 0,
2965 };
2966 perf_output_put(&handle, raw);
2967 }
2968 }
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002969
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002970 perf_output_end(&handle);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002971}
2972
Peter Zijlstra0322cd62009-03-19 20:26:19 +01002973/*
Peter Zijlstra38b200d2009-06-23 20:13:11 +02002974 * read event
2975 */
2976
2977struct perf_read_event {
2978 struct perf_event_header header;
2979
2980 u32 pid;
2981 u32 tid;
Peter Zijlstra38b200d2009-06-23 20:13:11 +02002982};
2983
2984static void
2985perf_counter_read_event(struct perf_counter *counter,
2986 struct task_struct *task)
2987{
2988 struct perf_output_handle handle;
2989 struct perf_read_event event = {
2990 .header = {
2991 .type = PERF_EVENT_READ,
2992 .misc = 0,
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02002993 .size = sizeof(event) + perf_counter_read_size(counter),
Peter Zijlstra38b200d2009-06-23 20:13:11 +02002994 },
2995 .pid = perf_counter_pid(counter, task),
2996 .tid = perf_counter_tid(counter, task),
Peter Zijlstra38b200d2009-06-23 20:13:11 +02002997 };
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02002998 int ret;
Peter Zijlstra38b200d2009-06-23 20:13:11 +02002999
3000 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
3001 if (ret)
3002 return;
3003
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02003004 perf_output_put(&handle, event);
3005 perf_output_read(&handle, counter);
3006
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003007 perf_output_end(&handle);
3008}
3009
3010/*
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003011 * task tracking -- fork/exit
3012 *
3013 * enabled by: attr.comm | attr.mmap | attr.task
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003014 */
3015
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003016struct perf_task_event {
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003017 struct task_struct *task;
3018 struct perf_counter_context *task_ctx;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003019
3020 struct {
3021 struct perf_event_header header;
3022
3023 u32 pid;
3024 u32 ppid;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003025 u32 tid;
3026 u32 ptid;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003027 } event;
3028};
3029
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003030static void perf_counter_task_output(struct perf_counter *counter,
3031 struct perf_task_event *task_event)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003032{
3033 struct perf_output_handle handle;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003034 int size = task_event->event.header.size;
3035 struct task_struct *task = task_event->task;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003036 int ret = perf_output_begin(&handle, counter, size, 0, 0);
3037
3038 if (ret)
3039 return;
3040
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003041 task_event->event.pid = perf_counter_pid(counter, task);
Peter Zijlstra94d5d1b2009-08-13 16:14:42 +02003042 task_event->event.ppid = perf_counter_pid(counter, current);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003043
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003044 task_event->event.tid = perf_counter_tid(counter, task);
Peter Zijlstra94d5d1b2009-08-13 16:14:42 +02003045 task_event->event.ptid = perf_counter_tid(counter, current);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003046
3047 perf_output_put(&handle, task_event->event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003048 perf_output_end(&handle);
3049}
3050
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003051static int perf_counter_task_match(struct perf_counter *counter)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003052{
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003053 if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003054 return 1;
3055
3056 return 0;
3057}
3058
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003059static void perf_counter_task_ctx(struct perf_counter_context *ctx,
3060 struct perf_task_event *task_event)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003061{
3062 struct perf_counter *counter;
3063
3064 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3065 return;
3066
3067 rcu_read_lock();
3068 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003069 if (perf_counter_task_match(counter))
3070 perf_counter_task_output(counter, task_event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003071 }
3072 rcu_read_unlock();
3073}
3074
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003075static void perf_counter_task_event(struct perf_task_event *task_event)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003076{
3077 struct perf_cpu_context *cpuctx;
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003078 struct perf_counter_context *ctx = task_event->task_ctx;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003079
3080 cpuctx = &get_cpu_var(perf_cpu_context);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003081 perf_counter_task_ctx(&cpuctx->ctx, task_event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003082 put_cpu_var(perf_cpu_context);
3083
3084 rcu_read_lock();
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003085 if (!ctx)
3086 ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003087 if (ctx)
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003088 perf_counter_task_ctx(ctx, task_event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003089 rcu_read_unlock();
3090}
3091
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003092static void perf_counter_task(struct task_struct *task,
3093 struct perf_counter_context *task_ctx,
3094 int new)
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003095{
3096 struct perf_task_event task_event;
3097
3098 if (!atomic_read(&nr_comm_counters) &&
3099 !atomic_read(&nr_mmap_counters) &&
3100 !atomic_read(&nr_task_counters))
3101 return;
3102
3103 task_event = (struct perf_task_event){
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003104 .task = task,
3105 .task_ctx = task_ctx,
3106 .event = {
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003107 .header = {
3108 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
3109 .misc = 0,
3110 .size = sizeof(task_event.event),
3111 },
3112 /* .pid */
3113 /* .ppid */
3114 /* .tid */
3115 /* .ptid */
3116 },
3117 };
3118
3119 perf_counter_task_event(&task_event);
3120}
3121
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003122void perf_counter_fork(struct task_struct *task)
3123{
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003124 perf_counter_task(task, NULL, 1);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003125}
3126
3127/*
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003128 * comm tracking
3129 */
3130
3131struct perf_comm_event {
Ingo Molnar22a4f652009-06-01 10:13:37 +02003132 struct task_struct *task;
3133 char *comm;
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003134 int comm_size;
3135
3136 struct {
3137 struct perf_event_header header;
3138
3139 u32 pid;
3140 u32 tid;
3141 } event;
3142};
3143
3144static void perf_counter_comm_output(struct perf_counter *counter,
3145 struct perf_comm_event *comm_event)
3146{
3147 struct perf_output_handle handle;
3148 int size = comm_event->event.header.size;
3149 int ret = perf_output_begin(&handle, counter, size, 0, 0);
3150
3151 if (ret)
3152 return;
3153
Peter Zijlstra709e50c2009-06-02 14:13:15 +02003154 comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
3155 comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
3156
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003157 perf_output_put(&handle, comm_event->event);
3158 perf_output_copy(&handle, comm_event->comm,
3159 comm_event->comm_size);
3160 perf_output_end(&handle);
3161}
3162
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003163static int perf_counter_comm_match(struct perf_counter *counter)
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003164{
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003165 if (counter->attr.comm)
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003166 return 1;
3167
3168 return 0;
3169}
3170
3171static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
3172 struct perf_comm_event *comm_event)
3173{
3174 struct perf_counter *counter;
3175
3176 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3177 return;
3178
3179 rcu_read_lock();
3180 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003181 if (perf_counter_comm_match(counter))
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003182 perf_counter_comm_output(counter, comm_event);
3183 }
3184 rcu_read_unlock();
3185}
3186
3187static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3188{
3189 struct perf_cpu_context *cpuctx;
Peter Zijlstra665c2142009-05-29 14:51:57 +02003190 struct perf_counter_context *ctx;
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003191 unsigned int size;
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003192 char comm[TASK_COMM_LEN];
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003193
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003194 memset(comm, 0, sizeof(comm));
3195 strncpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnar888fcee2009-04-09 09:48:22 +02003196 size = ALIGN(strlen(comm)+1, sizeof(u64));
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003197
3198 comm_event->comm = comm;
3199 comm_event->comm_size = size;
3200
3201 comm_event->event.header.size = sizeof(comm_event->event) + size;
3202
3203 cpuctx = &get_cpu_var(perf_cpu_context);
3204 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
3205 put_cpu_var(perf_cpu_context);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003206
3207 rcu_read_lock();
3208 /*
3209 * doesn't really matter which of the child contexts the
3210 * events ends up in.
3211 */
3212 ctx = rcu_dereference(current->perf_counter_ctxp);
3213 if (ctx)
3214 perf_counter_comm_ctx(ctx, comm_event);
3215 rcu_read_unlock();
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003216}
3217
3218void perf_counter_comm(struct task_struct *task)
3219{
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003220 struct perf_comm_event comm_event;
3221
Paul Mackerras57e79862009-06-30 16:07:19 +10003222 if (task->perf_counter_ctxp)
3223 perf_counter_enable_on_exec(task);
3224
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003225 if (!atomic_read(&nr_comm_counters))
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003226 return;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10003227
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003228 comm_event = (struct perf_comm_event){
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003229 .task = task,
Peter Zijlstra573402d2009-07-22 11:13:50 +02003230 /* .comm */
3231 /* .comm_size */
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003232 .event = {
Peter Zijlstra573402d2009-07-22 11:13:50 +02003233 .header = {
3234 .type = PERF_EVENT_COMM,
3235 .misc = 0,
3236 /* .size */
3237 },
3238 /* .pid */
3239 /* .tid */
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003240 },
3241 };
3242
3243 perf_counter_comm_event(&comm_event);
3244}
3245
3246/*
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003247 * mmap tracking
3248 */
3249
3250struct perf_mmap_event {
Peter Zijlstra089dd792009-06-05 14:04:55 +02003251 struct vm_area_struct *vma;
3252
3253 const char *file_name;
3254 int file_size;
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003255
3256 struct {
3257 struct perf_event_header header;
3258
3259 u32 pid;
3260 u32 tid;
3261 u64 start;
3262 u64 len;
3263 u64 pgoff;
3264 } event;
3265};
3266
3267static void perf_counter_mmap_output(struct perf_counter *counter,
3268 struct perf_mmap_event *mmap_event)
3269{
3270 struct perf_output_handle handle;
3271 int size = mmap_event->event.header.size;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02003272 int ret = perf_output_begin(&handle, counter, size, 0, 0);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003273
3274 if (ret)
3275 return;
3276
Peter Zijlstra709e50c2009-06-02 14:13:15 +02003277 mmap_event->event.pid = perf_counter_pid(counter, current);
3278 mmap_event->event.tid = perf_counter_tid(counter, current);
3279
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003280 perf_output_put(&handle, mmap_event->event);
3281 perf_output_copy(&handle, mmap_event->file_name,
3282 mmap_event->file_size);
Peter Zijlstra78d613e2009-03-30 19:07:11 +02003283 perf_output_end(&handle);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003284}
3285
3286static int perf_counter_mmap_match(struct perf_counter *counter,
3287 struct perf_mmap_event *mmap_event)
3288{
Peter Zijlstrad99e9442009-06-04 17:08:58 +02003289 if (counter->attr.mmap)
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003290 return 1;
3291
3292 return 0;
3293}
3294
3295static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
3296 struct perf_mmap_event *mmap_event)
3297{
3298 struct perf_counter *counter;
3299
3300 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3301 return;
3302
3303 rcu_read_lock();
3304 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3305 if (perf_counter_mmap_match(counter, mmap_event))
3306 perf_counter_mmap_output(counter, mmap_event);
3307 }
3308 rcu_read_unlock();
3309}
3310
3311static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3312{
3313 struct perf_cpu_context *cpuctx;
Peter Zijlstra665c2142009-05-29 14:51:57 +02003314 struct perf_counter_context *ctx;
Peter Zijlstra089dd792009-06-05 14:04:55 +02003315 struct vm_area_struct *vma = mmap_event->vma;
3316 struct file *file = vma->vm_file;
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003317 unsigned int size;
3318 char tmp[16];
3319 char *buf = NULL;
Peter Zijlstra089dd792009-06-05 14:04:55 +02003320 const char *name;
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003321
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003322 memset(tmp, 0, sizeof(tmp));
3323
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003324 if (file) {
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003325 /*
3326 * d_path works from the end of the buffer backwards, so we
3327 * need to add enough zero bytes after the string to handle
3328 * the 64bit alignment we do later.
3329 */
3330 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003331 if (!buf) {
3332 name = strncpy(tmp, "//enomem", sizeof(tmp));
3333 goto got_name;
3334 }
Peter Zijlstrad3d21c42009-04-09 10:53:46 +02003335 name = d_path(&file->f_path, buf, PATH_MAX);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003336 if (IS_ERR(name)) {
3337 name = strncpy(tmp, "//toolong", sizeof(tmp));
3338 goto got_name;
3339 }
3340 } else {
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003341 if (arch_vma_name(mmap_event->vma)) {
3342 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3343 sizeof(tmp));
Peter Zijlstra089dd792009-06-05 14:04:55 +02003344 goto got_name;
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003345 }
Peter Zijlstra089dd792009-06-05 14:04:55 +02003346
3347 if (!vma->vm_mm) {
3348 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3349 goto got_name;
3350 }
3351
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003352 name = strncpy(tmp, "//anon", sizeof(tmp));
3353 goto got_name;
3354 }
3355
3356got_name:
Ingo Molnar888fcee2009-04-09 09:48:22 +02003357 size = ALIGN(strlen(name)+1, sizeof(u64));
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003358
3359 mmap_event->file_name = name;
3360 mmap_event->file_size = size;
3361
3362 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
3363
3364 cpuctx = &get_cpu_var(perf_cpu_context);
3365 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
3366 put_cpu_var(perf_cpu_context);
3367
Peter Zijlstra665c2142009-05-29 14:51:57 +02003368 rcu_read_lock();
3369 /*
3370 * doesn't really matter which of the child contexts the
3371 * events ends up in.
3372 */
3373 ctx = rcu_dereference(current->perf_counter_ctxp);
3374 if (ctx)
3375 perf_counter_mmap_ctx(ctx, mmap_event);
3376 rcu_read_unlock();
3377
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003378 kfree(buf);
3379}
3380
Peter Zijlstra089dd792009-06-05 14:04:55 +02003381void __perf_counter_mmap(struct vm_area_struct *vma)
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003382{
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003383 struct perf_mmap_event mmap_event;
3384
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003385 if (!atomic_read(&nr_mmap_counters))
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003386 return;
3387
3388 mmap_event = (struct perf_mmap_event){
Peter Zijlstra089dd792009-06-05 14:04:55 +02003389 .vma = vma,
Peter Zijlstra573402d2009-07-22 11:13:50 +02003390 /* .file_name */
3391 /* .file_size */
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003392 .event = {
Peter Zijlstra573402d2009-07-22 11:13:50 +02003393 .header = {
3394 .type = PERF_EVENT_MMAP,
3395 .misc = 0,
3396 /* .size */
3397 },
3398 /* .pid */
3399 /* .tid */
Peter Zijlstra089dd792009-06-05 14:04:55 +02003400 .start = vma->vm_start,
3401 .len = vma->vm_end - vma->vm_start,
3402 .pgoff = vma->vm_pgoff,
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003403 },
3404 };
3405
3406 perf_counter_mmap_event(&mmap_event);
3407}
3408
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003409/*
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003410 * IRQ throttle logging
3411 */
3412
3413static void perf_log_throttle(struct perf_counter *counter, int enable)
3414{
3415 struct perf_output_handle handle;
3416 int ret;
3417
3418 struct {
3419 struct perf_event_header header;
3420 u64 time;
Peter Zijlstracca3f452009-06-11 14:57:55 +02003421 u64 id;
Peter Zijlstra7f453c22009-07-21 13:19:40 +02003422 u64 stream_id;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003423 } throttle_event = {
3424 .header = {
Anton Blanchard966ee4d2009-07-22 23:05:46 +10003425 .type = PERF_EVENT_THROTTLE,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003426 .misc = 0,
3427 .size = sizeof(throttle_event),
3428 },
Peter Zijlstra7f453c22009-07-21 13:19:40 +02003429 .time = sched_clock(),
3430 .id = primary_counter_id(counter),
3431 .stream_id = counter->id,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003432 };
3433
Anton Blanchard966ee4d2009-07-22 23:05:46 +10003434 if (enable)
3435 throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
3436
Ingo Molnar0127c3e2009-05-25 22:03:26 +02003437 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003438 if (ret)
3439 return;
3440
3441 perf_output_put(&handle, throttle_event);
3442 perf_output_end(&handle);
3443}
3444
3445/*
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01003446 * Generic counter overflow handling, sampling.
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003447 */
3448
Peter Zijlstradf1a1322009-06-10 21:02:22 +02003449int perf_counter_overflow(struct perf_counter *counter, int nmi,
3450 struct perf_sample_data *data)
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003451{
Peter Zijlstra79f14642009-04-06 11:45:07 +02003452 int events = atomic_read(&counter->event_limit);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003453 int throttle = counter->pmu->unthrottle != NULL;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003454 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra79f14642009-04-06 11:45:07 +02003455 int ret = 0;
3456
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003457 if (!throttle) {
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003458 hwc->interrupts++;
Ingo Molnar128f0482009-06-03 22:19:36 +02003459 } else {
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003460 if (hwc->interrupts != MAX_INTERRUPTS) {
3461 hwc->interrupts++;
Peter Zijlstradf58ab22009-06-11 11:25:05 +02003462 if (HZ * hwc->interrupts >
3463 (u64)sysctl_perf_counter_sample_rate) {
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003464 hwc->interrupts = MAX_INTERRUPTS;
Ingo Molnar128f0482009-06-03 22:19:36 +02003465 perf_log_throttle(counter, 0);
3466 ret = 1;
3467 }
3468 } else {
3469 /*
3470 * Keep re-disabling counters even though on the previous
3471 * pass we disabled it - just in case we raced with a
3472 * sched-in and the counter got enabled again:
3473 */
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003474 ret = 1;
3475 }
3476 }
Peter Zijlstra60db5e02009-05-15 15:19:28 +02003477
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003478 if (counter->attr.freq) {
3479 u64 now = sched_clock();
3480 s64 delta = now - hwc->freq_stamp;
3481
3482 hwc->freq_stamp = now;
3483
3484 if (delta > 0 && delta < TICK_NSEC)
3485 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
3486 }
3487
Peter Zijlstra2023b352009-05-05 17:50:26 +02003488 /*
3489 * XXX event_limit might not quite work as expected on inherited
3490 * counters
3491 */
3492
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02003493 counter->pending_kill = POLL_IN;
Peter Zijlstra79f14642009-04-06 11:45:07 +02003494 if (events && atomic_dec_and_test(&counter->event_limit)) {
3495 ret = 1;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02003496 counter->pending_kill = POLL_HUP;
Peter Zijlstra79f14642009-04-06 11:45:07 +02003497 if (nmi) {
3498 counter->pending_disable = 1;
3499 perf_pending_queue(&counter->pending,
3500 perf_pending_counter);
3501 } else
3502 perf_counter_disable(counter);
3503 }
3504
Peter Zijlstradf1a1322009-06-10 21:02:22 +02003505 perf_counter_output(counter, nmi, data);
Peter Zijlstra79f14642009-04-06 11:45:07 +02003506 return ret;
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003507}
3508
3509/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003510 * Generic software counter infrastructure
3511 */
3512
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003513/*
3514 * We directly increment counter->count and keep a second value in
3515 * counter->hw.period_left to count intervals. This period counter
3516 * is kept in the range [-sample_period, 0] so that we can use the
3517 * sign as trigger.
3518 */
3519
3520static u64 perf_swcounter_set_period(struct perf_counter *counter)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003521{
3522 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003523 u64 period = hwc->last_period;
3524 u64 nr, offset;
3525 s64 old, val;
3526
3527 hwc->last_period = hwc->sample_period;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003528
3529again:
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003530 old = val = atomic64_read(&hwc->period_left);
3531 if (val < 0)
3532 return 0;
3533
3534 nr = div64_u64(period + val, period);
3535 offset = nr * period;
3536 val -= offset;
3537 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003538 goto again;
3539
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003540 return nr;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003541}
3542
3543static void perf_swcounter_overflow(struct perf_counter *counter,
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003544 int nmi, struct perf_sample_data *data)
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003545{
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003546 struct hw_perf_counter *hwc = &counter->hw;
3547 u64 overflow;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02003548
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003549 data->period = counter->hw.last_period;
3550 overflow = perf_swcounter_set_period(counter);
3551
3552 if (hwc->interrupts == MAX_INTERRUPTS)
3553 return;
3554
3555 for (; overflow; overflow--) {
3556 if (perf_counter_overflow(counter, nmi, data)) {
3557 /*
3558 * We inhibit the overflow from happening when
3559 * hwc->interrupts == MAX_INTERRUPTS.
3560 */
3561 break;
3562 }
3563 }
3564}
3565
3566static void perf_swcounter_unthrottle(struct perf_counter *counter)
3567{
3568 /*
3569 * Nothing to do, we already reset hwc->interrupts.
3570 */
3571}
3572
3573static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3574 int nmi, struct perf_sample_data *data)
3575{
3576 struct hw_perf_counter *hwc = &counter->hw;
3577
3578 atomic64_add(nr, &counter->count);
3579
3580 if (!hwc->sample_period)
3581 return;
3582
3583 if (!data->regs)
3584 return;
3585
3586 if (!atomic64_add_negative(nr, &hwc->period_left))
3587 perf_swcounter_overflow(counter, nmi, data);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003588}
3589
Paul Mackerras880ca152009-06-01 17:49:14 +10003590static int perf_swcounter_is_counting(struct perf_counter *counter)
3591{
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003592 /*
3593 * The counter is active, we're good!
3594 */
Paul Mackerras880ca152009-06-01 17:49:14 +10003595 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3596 return 1;
3597
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003598 /*
3599 * The counter is off/error, not counting.
3600 */
Paul Mackerras880ca152009-06-01 17:49:14 +10003601 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3602 return 0;
3603
3604 /*
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003605 * The counter is inactive, if the context is active
3606 * we're part of a group that didn't make it on the 'pmu',
3607 * not counting.
Paul Mackerras880ca152009-06-01 17:49:14 +10003608 */
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003609 if (counter->ctx->is_active)
3610 return 0;
3611
3612 /*
3613 * We're inactive and the context is too, this means the
3614 * task is scheduled out, we're counting events that happen
3615 * to us, like migration events.
3616 */
3617 return 1;
Paul Mackerras880ca152009-06-01 17:49:14 +10003618}
3619
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003620static int perf_swcounter_match(struct perf_counter *counter,
Peter Zijlstra1c432d82009-06-11 13:19:29 +02003621 enum perf_type_id type,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003622 u32 event, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003623{
Paul Mackerras880ca152009-06-01 17:49:14 +10003624 if (!perf_swcounter_is_counting(counter))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003625 return 0;
3626
Ingo Molnara21ca2c2009-06-06 09:58:57 +02003627 if (counter->attr.type != type)
3628 return 0;
3629 if (counter->attr.config != event)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003630 return 0;
3631
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003632 if (regs) {
Peter Zijlstra0d486962009-06-02 19:22:16 +02003633 if (counter->attr.exclude_user && user_mode(regs))
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003634 return 0;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003635
Peter Zijlstra0d486962009-06-02 19:22:16 +02003636 if (counter->attr.exclude_kernel && !user_mode(regs))
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003637 return 0;
3638 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003639
3640 return 1;
3641}
3642
3643static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003644 enum perf_type_id type,
3645 u32 event, u64 nr, int nmi,
3646 struct perf_sample_data *data)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003647{
3648 struct perf_counter *counter;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003649
Peter Zijlstra01ef09d2009-03-19 20:26:11 +01003650 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003651 return;
3652
Peter Zijlstra592903c2009-03-13 12:21:36 +01003653 rcu_read_lock();
3654 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003655 if (perf_swcounter_match(counter, type, event, data->regs))
3656 perf_swcounter_add(counter, nr, nmi, data);
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003657 }
Peter Zijlstra592903c2009-03-13 12:21:36 +01003658 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003659}
3660
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003661static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3662{
3663 if (in_nmi())
3664 return &cpuctx->recursion[3];
3665
3666 if (in_irq())
3667 return &cpuctx->recursion[2];
3668
3669 if (in_softirq())
3670 return &cpuctx->recursion[1];
3671
3672 return &cpuctx->recursion[0];
3673}
3674
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003675static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3676 u64 nr, int nmi,
3677 struct perf_sample_data *data)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003678{
3679 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003680 int *recursion = perf_swcounter_recursion_context(cpuctx);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003681 struct perf_counter_context *ctx;
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003682
3683 if (*recursion)
3684 goto out;
3685
3686 (*recursion)++;
3687 barrier();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003688
Peter Zijlstra78f13e92009-04-08 15:01:33 +02003689 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003690 nr, nmi, data);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003691 rcu_read_lock();
3692 /*
3693 * doesn't really matter which of the child contexts the
3694 * events ends up in.
3695 */
3696 ctx = rcu_dereference(current->perf_counter_ctxp);
3697 if (ctx)
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003698 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003699 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003700
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003701 barrier();
3702 (*recursion)--;
3703
3704out:
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003705 put_cpu_var(perf_cpu_context);
3706}
3707
Peter Zijlstraf29ac752009-06-19 18:27:26 +02003708void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3709 struct pt_regs *regs, u64 addr)
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003710{
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003711 struct perf_sample_data data = {
3712 .regs = regs,
3713 .addr = addr,
3714 };
3715
3716 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003717}
3718
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003719static void perf_swcounter_read(struct perf_counter *counter)
3720{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003721}
3722
3723static int perf_swcounter_enable(struct perf_counter *counter)
3724{
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003725 struct hw_perf_counter *hwc = &counter->hw;
3726
3727 if (hwc->sample_period) {
3728 hwc->last_period = hwc->sample_period;
3729 perf_swcounter_set_period(counter);
3730 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003731 return 0;
3732}
3733
3734static void perf_swcounter_disable(struct perf_counter *counter)
3735{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003736}
3737
Robert Richter4aeb0b42009-04-29 12:47:03 +02003738static const struct pmu perf_ops_generic = {
Peter Zijlstraac17dc82009-03-13 12:21:34 +01003739 .enable = perf_swcounter_enable,
3740 .disable = perf_swcounter_disable,
3741 .read = perf_swcounter_read,
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003742 .unthrottle = perf_swcounter_unthrottle,
Peter Zijlstraac17dc82009-03-13 12:21:34 +01003743};
3744
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003745/*
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003746 * hrtimer based swcounter callback
3747 */
3748
3749static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3750{
3751 enum hrtimer_restart ret = HRTIMER_RESTART;
3752 struct perf_sample_data data;
3753 struct perf_counter *counter;
3754 u64 period;
3755
3756 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3757 counter->pmu->read(counter);
3758
3759 data.addr = 0;
3760 data.regs = get_irq_regs();
3761 /*
3762 * In case we exclude kernel IPs or are somehow not in interrupt
3763 * context, provide the next best thing, the user IP.
3764 */
3765 if ((counter->attr.exclude_kernel || !data.regs) &&
3766 !counter->attr.exclude_user)
3767 data.regs = task_pt_regs(current);
3768
3769 if (data.regs) {
3770 if (perf_counter_overflow(counter, 0, &data))
3771 ret = HRTIMER_NORESTART;
3772 }
3773
3774 period = max_t(u64, 10000, counter->hw.sample_period);
3775 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3776
3777 return ret;
3778}
3779
3780/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003781 * Software counter: cpu wall time clock
3782 */
3783
Paul Mackerras9abf8a02009-01-09 16:26:43 +11003784static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3785{
3786 int cpu = raw_smp_processor_id();
3787 s64 prev;
3788 u64 now;
3789
3790 now = cpu_clock(cpu);
3791 prev = atomic64_read(&counter->hw.prev_count);
3792 atomic64_set(&counter->hw.prev_count, now);
3793 atomic64_add(now - prev, &counter->count);
3794}
3795
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003796static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3797{
3798 struct hw_perf_counter *hwc = &counter->hw;
3799 int cpu = raw_smp_processor_id();
3800
3801 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
Peter Zijlstra039fc912009-03-13 16:43:47 +01003802 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3803 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003804 if (hwc->sample_period) {
3805 u64 period = max_t(u64, 10000, hwc->sample_period);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003806 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstra60db5e02009-05-15 15:19:28 +02003807 ns_to_ktime(period), 0,
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003808 HRTIMER_MODE_REL, 0);
3809 }
3810
3811 return 0;
3812}
3813
Ingo Molnar5c92d122008-12-11 13:21:10 +01003814static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3815{
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003816 if (counter->hw.sample_period)
Peter Zijlstrab986d7e2009-05-20 12:21:21 +02003817 hrtimer_cancel(&counter->hw.hrtimer);
Paul Mackerras9abf8a02009-01-09 16:26:43 +11003818 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01003819}
3820
3821static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3822{
Paul Mackerras9abf8a02009-01-09 16:26:43 +11003823 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01003824}
3825
Robert Richter4aeb0b42009-04-29 12:47:03 +02003826static const struct pmu perf_ops_cpu_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01003827 .enable = cpu_clock_perf_counter_enable,
3828 .disable = cpu_clock_perf_counter_disable,
3829 .read = cpu_clock_perf_counter_read,
Ingo Molnar5c92d122008-12-11 13:21:10 +01003830};
3831
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01003832/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003833 * Software counter: task time clock
3834 */
3835
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003836static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
Ingo Molnarbae43c92008-12-11 14:03:20 +01003837{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003838 u64 prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003839 s64 delta;
Ingo Molnarbae43c92008-12-11 14:03:20 +01003840
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02003841 prev = atomic64_xchg(&counter->hw.prev_count, now);
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003842 delta = now - prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003843 atomic64_add(delta, &counter->count);
Ingo Molnarbae43c92008-12-11 14:03:20 +01003844}
3845
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01003846static int task_clock_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003847{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003848 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02003849 u64 now;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003850
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02003851 now = counter->ctx->time;
3852
3853 atomic64_set(&hwc->prev_count, now);
Peter Zijlstra039fc912009-03-13 16:43:47 +01003854 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3855 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003856 if (hwc->sample_period) {
3857 u64 period = max_t(u64, 10000, hwc->sample_period);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003858 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstra60db5e02009-05-15 15:19:28 +02003859 ns_to_ktime(period), 0,
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003860 HRTIMER_MODE_REL, 0);
3861 }
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01003862
3863 return 0;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003864}
3865
3866static void task_clock_perf_counter_disable(struct perf_counter *counter)
3867{
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003868 if (counter->hw.sample_period)
Peter Zijlstrab986d7e2009-05-20 12:21:21 +02003869 hrtimer_cancel(&counter->hw.hrtimer);
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003870 task_clock_perf_counter_update(counter, counter->ctx->time);
3871
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003872}
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01003873
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003874static void task_clock_perf_counter_read(struct perf_counter *counter)
3875{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003876 u64 time;
3877
3878 if (!in_nmi()) {
3879 update_context_time(counter->ctx);
3880 time = counter->ctx->time;
3881 } else {
3882 u64 now = perf_clock();
3883 u64 delta = now - counter->ctx->timestamp;
3884 time = counter->ctx->time + delta;
3885 }
3886
3887 task_clock_perf_counter_update(counter, time);
Ingo Molnarbae43c92008-12-11 14:03:20 +01003888}
3889
Robert Richter4aeb0b42009-04-29 12:47:03 +02003890static const struct pmu perf_ops_task_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01003891 .enable = task_clock_perf_counter_enable,
3892 .disable = task_clock_perf_counter_disable,
3893 .read = task_clock_perf_counter_read,
Ingo Molnarbae43c92008-12-11 14:03:20 +01003894};
3895
Peter Zijlstrae077df42009-03-19 20:26:17 +01003896#ifdef CONFIG_EVENT_PROFILE
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003897void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3898 int entry_size)
Peter Zijlstrae077df42009-03-19 20:26:17 +01003899{
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003900 struct perf_raw_record raw = {
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003901 .size = entry_size,
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003902 .data = record,
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003903 };
3904
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003905 struct perf_sample_data data = {
Chris Wilsond4d7d0b2009-07-06 09:31:33 +01003906 .regs = get_irq_regs(),
Peter Zijlstra3a659302009-07-21 17:34:57 +02003907 .addr = addr,
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003908 .raw = &raw,
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003909 };
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003910
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003911 if (!data.regs)
3912 data.regs = task_pt_regs(current);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003913
Peter Zijlstra3a659302009-07-21 17:34:57 +02003914 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
Peter Zijlstrae077df42009-03-19 20:26:17 +01003915}
Steven Whitehouseff7b1b42009-04-15 16:55:05 +01003916EXPORT_SYMBOL_GPL(perf_tpcounter_event);
Peter Zijlstrae077df42009-03-19 20:26:17 +01003917
3918extern int ftrace_profile_enable(int);
3919extern void ftrace_profile_disable(int);
3920
3921static void tp_perf_counter_destroy(struct perf_counter *counter)
3922{
Chris Wilsond4d7d0b2009-07-06 09:31:33 +01003923 ftrace_profile_disable(counter->attr.config);
Peter Zijlstrae077df42009-03-19 20:26:17 +01003924}
3925
Robert Richter4aeb0b42009-04-29 12:47:03 +02003926static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
Peter Zijlstrae077df42009-03-19 20:26:17 +01003927{
Peter Zijlstraa4e95fc2009-08-10 11:20:12 +02003928 /*
3929 * Raw tracepoint data is a severe data leak, only allow root to
3930 * have these.
3931 */
3932 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
3933 !capable(CAP_SYS_ADMIN))
3934 return ERR_PTR(-EPERM);
3935
Chris Wilsond4d7d0b2009-07-06 09:31:33 +01003936 if (ftrace_profile_enable(counter->attr.config))
Peter Zijlstrae077df42009-03-19 20:26:17 +01003937 return NULL;
3938
3939 counter->destroy = tp_perf_counter_destroy;
3940
3941 return &perf_ops_generic;
3942}
3943#else
Robert Richter4aeb0b42009-04-29 12:47:03 +02003944static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
Peter Zijlstrae077df42009-03-19 20:26:17 +01003945{
3946 return NULL;
3947}
3948#endif
3949
Peter Zijlstraf29ac752009-06-19 18:27:26 +02003950atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
3951
3952static void sw_perf_counter_destroy(struct perf_counter *counter)
3953{
3954 u64 event = counter->attr.config;
3955
Peter Zijlstraf3440112009-06-22 13:58:35 +02003956 WARN_ON(counter->parent);
3957
Peter Zijlstraf29ac752009-06-19 18:27:26 +02003958 atomic_dec(&perf_swcounter_enabled[event]);
3959}
3960
Robert Richter4aeb0b42009-04-29 12:47:03 +02003961static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar5c92d122008-12-11 13:21:10 +01003962{
Robert Richter4aeb0b42009-04-29 12:47:03 +02003963 const struct pmu *pmu = NULL;
Peter Zijlstraf29ac752009-06-19 18:27:26 +02003964 u64 event = counter->attr.config;
Ingo Molnar5c92d122008-12-11 13:21:10 +01003965
Paul Mackerras0475f9e2009-02-11 14:35:35 +11003966 /*
3967 * Software counters (currently) can't in general distinguish
3968 * between user, kernel and hypervisor events.
3969 * However, context switches and cpu migrations are considered
3970 * to be kernel events, and page faults are never hypervisor
3971 * events.
3972 */
Peter Zijlstraf29ac752009-06-19 18:27:26 +02003973 switch (event) {
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02003974 case PERF_COUNT_SW_CPU_CLOCK:
Robert Richter4aeb0b42009-04-29 12:47:03 +02003975 pmu = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003976
Ingo Molnar5c92d122008-12-11 13:21:10 +01003977 break;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02003978 case PERF_COUNT_SW_TASK_CLOCK:
Paul Mackerras23a185c2009-02-09 22:42:47 +11003979 /*
3980 * If the user instantiates this as a per-cpu counter,
3981 * use the cpu_clock counter instead.
3982 */
3983 if (counter->ctx->task)
Robert Richter4aeb0b42009-04-29 12:47:03 +02003984 pmu = &perf_ops_task_clock;
Paul Mackerras23a185c2009-02-09 22:42:47 +11003985 else
Robert Richter4aeb0b42009-04-29 12:47:03 +02003986 pmu = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003987
Ingo Molnarbae43c92008-12-11 14:03:20 +01003988 break;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02003989 case PERF_COUNT_SW_PAGE_FAULTS:
3990 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
3991 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
3992 case PERF_COUNT_SW_CONTEXT_SWITCHES:
3993 case PERF_COUNT_SW_CPU_MIGRATIONS:
Peter Zijlstraf3440112009-06-22 13:58:35 +02003994 if (!counter->parent) {
3995 atomic_inc(&perf_swcounter_enabled[event]);
3996 counter->destroy = sw_perf_counter_destroy;
3997 }
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003998 pmu = &perf_ops_generic;
Ingo Molnar6c594c22008-12-14 12:34:15 +01003999 break;
Ingo Molnar5c92d122008-12-11 13:21:10 +01004000 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01004001
Robert Richter4aeb0b42009-04-29 12:47:03 +02004002 return pmu;
Ingo Molnar5c92d122008-12-11 13:21:10 +01004003}
4004
Thomas Gleixner0793a612008-12-04 20:12:29 +01004005/*
4006 * Allocate and initialize a counter structure
4007 */
4008static struct perf_counter *
Peter Zijlstra0d486962009-06-02 19:22:16 +02004009perf_counter_alloc(struct perf_counter_attr *attr,
Ingo Molnar04289bb2008-12-11 08:38:42 +01004010 int cpu,
Paul Mackerras23a185c2009-02-09 22:42:47 +11004011 struct perf_counter_context *ctx,
Ingo Molnar9b51f662008-12-12 13:49:45 +01004012 struct perf_counter *group_leader,
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004013 struct perf_counter *parent_counter,
Ingo Molnar9b51f662008-12-12 13:49:45 +01004014 gfp_t gfpflags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004015{
Robert Richter4aeb0b42009-04-29 12:47:03 +02004016 const struct pmu *pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01004017 struct perf_counter *counter;
Peter Zijlstra60db5e02009-05-15 15:19:28 +02004018 struct hw_perf_counter *hwc;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004019 long err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004020
Ingo Molnar9b51f662008-12-12 13:49:45 +01004021 counter = kzalloc(sizeof(*counter), gfpflags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004022 if (!counter)
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004023 return ERR_PTR(-ENOMEM);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004024
Ingo Molnar04289bb2008-12-11 08:38:42 +01004025 /*
4026 * Single counters are their own group leaders, with an
4027 * empty sibling list:
4028 */
4029 if (!group_leader)
4030 group_leader = counter;
4031
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004032 mutex_init(&counter->child_mutex);
4033 INIT_LIST_HEAD(&counter->child_list);
4034
Ingo Molnar04289bb2008-12-11 08:38:42 +01004035 INIT_LIST_HEAD(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +01004036 INIT_LIST_HEAD(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004037 INIT_LIST_HEAD(&counter->sibling_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004038 init_waitqueue_head(&counter->waitq);
4039
Peter Zijlstra7b732a72009-03-23 18:22:10 +01004040 mutex_init(&counter->mmap_mutex);
4041
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004042 counter->cpu = cpu;
Peter Zijlstra0d486962009-06-02 19:22:16 +02004043 counter->attr = *attr;
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004044 counter->group_leader = group_leader;
4045 counter->pmu = NULL;
4046 counter->ctx = ctx;
4047 counter->oncpu = -1;
Ingo Molnar329d8762009-05-26 08:10:00 +02004048
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004049 counter->parent = parent_counter;
4050
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004051 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
4052 counter->id = atomic64_inc_return(&perf_counter_id);
4053
4054 counter->state = PERF_COUNTER_STATE_INACTIVE;
4055
Peter Zijlstra0d486962009-06-02 19:22:16 +02004056 if (attr->disabled)
Ingo Molnara86ed502008-12-17 00:43:10 +01004057 counter->state = PERF_COUNTER_STATE_OFF;
4058
Robert Richter4aeb0b42009-04-29 12:47:03 +02004059 pmu = NULL;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004060
Peter Zijlstra60db5e02009-05-15 15:19:28 +02004061 hwc = &counter->hw;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004062 hwc->sample_period = attr->sample_period;
Peter Zijlstra0d486962009-06-02 19:22:16 +02004063 if (attr->freq && attr->sample_freq)
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004064 hwc->sample_period = 1;
4065
4066 atomic64_set(&hwc->period_left, hwc->sample_period);
Peter Zijlstra60db5e02009-05-15 15:19:28 +02004067
Peter Zijlstra2023b352009-05-05 17:50:26 +02004068 /*
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02004069 * we currently do not support PERF_FORMAT_GROUP on inherited counters
Peter Zijlstra2023b352009-05-05 17:50:26 +02004070 */
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02004071 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
Peter Zijlstra2023b352009-05-05 17:50:26 +02004072 goto done;
4073
Ingo Molnara21ca2c2009-06-06 09:58:57 +02004074 switch (attr->type) {
Peter Zijlstra081fad82009-06-11 17:57:21 +02004075 case PERF_TYPE_RAW:
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004076 case PERF_TYPE_HARDWARE:
Ingo Molnar8326f442009-06-05 20:22:46 +02004077 case PERF_TYPE_HW_CACHE:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004078 pmu = hw_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004079 break;
4080
4081 case PERF_TYPE_SOFTWARE:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004082 pmu = sw_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004083 break;
4084
4085 case PERF_TYPE_TRACEPOINT:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004086 pmu = tp_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004087 break;
Peter Zijlstra974802e2009-06-12 12:46:55 +02004088
4089 default:
4090 break;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004091 }
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01004092done:
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004093 err = 0;
Robert Richter4aeb0b42009-04-29 12:47:03 +02004094 if (!pmu)
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004095 err = -EINVAL;
Robert Richter4aeb0b42009-04-29 12:47:03 +02004096 else if (IS_ERR(pmu))
4097 err = PTR_ERR(pmu);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004098
4099 if (err) {
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004100 if (counter->ns)
4101 put_pid_ns(counter->ns);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004102 kfree(counter);
4103 return ERR_PTR(err);
4104 }
4105
Robert Richter4aeb0b42009-04-29 12:47:03 +02004106 counter->pmu = pmu;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004107
Peter Zijlstraf3440112009-06-22 13:58:35 +02004108 if (!counter->parent) {
4109 atomic_inc(&nr_counters);
4110 if (counter->attr.mmap)
4111 atomic_inc(&nr_mmap_counters);
4112 if (counter->attr.comm)
4113 atomic_inc(&nr_comm_counters);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004114 if (counter->attr.task)
4115 atomic_inc(&nr_task_counters);
Peter Zijlstraf3440112009-06-22 13:58:35 +02004116 }
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02004117
Thomas Gleixner0793a612008-12-04 20:12:29 +01004118 return counter;
4119}
4120
Peter Zijlstra974802e2009-06-12 12:46:55 +02004121static int perf_copy_attr(struct perf_counter_attr __user *uattr,
4122 struct perf_counter_attr *attr)
4123{
4124 int ret;
4125 u32 size;
4126
4127 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4128 return -EFAULT;
4129
4130 /*
4131 * zero the full structure, so that a short copy will be nice.
4132 */
4133 memset(attr, 0, sizeof(*attr));
4134
4135 ret = get_user(size, &uattr->size);
4136 if (ret)
4137 return ret;
4138
4139 if (size > PAGE_SIZE) /* silly large */
4140 goto err_size;
4141
4142 if (!size) /* abi compat */
4143 size = PERF_ATTR_SIZE_VER0;
4144
4145 if (size < PERF_ATTR_SIZE_VER0)
4146 goto err_size;
4147
4148 /*
4149 * If we're handed a bigger struct than we know of,
4150 * ensure all the unknown bits are 0.
4151 */
4152 if (size > sizeof(*attr)) {
4153 unsigned long val;
4154 unsigned long __user *addr;
4155 unsigned long __user *end;
4156
4157 addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
4158 sizeof(unsigned long));
4159 end = PTR_ALIGN((void __user *)uattr + size,
4160 sizeof(unsigned long));
4161
4162 for (; addr < end; addr += sizeof(unsigned long)) {
4163 ret = get_user(val, addr);
4164 if (ret)
4165 return ret;
4166 if (val)
4167 goto err_size;
4168 }
4169 }
4170
4171 ret = copy_from_user(attr, uattr, size);
4172 if (ret)
4173 return -EFAULT;
4174
4175 /*
4176 * If the type exists, the corresponding creation will verify
4177 * the attr->config.
4178 */
4179 if (attr->type >= PERF_TYPE_MAX)
4180 return -EINVAL;
4181
4182 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
4183 return -EINVAL;
4184
4185 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4186 return -EINVAL;
4187
4188 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4189 return -EINVAL;
4190
4191out:
4192 return ret;
4193
4194err_size:
4195 put_user(sizeof(*attr), &uattr->size);
4196 ret = -E2BIG;
4197 goto out;
4198}
4199
Thomas Gleixner0793a612008-12-04 20:12:29 +01004200/**
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004201 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
Ingo Molnar9f66a382008-12-10 12:33:23 +01004202 *
Peter Zijlstra0d486962009-06-02 19:22:16 +02004203 * @attr_uptr: event type attributes for monitoring/sampling
Thomas Gleixner0793a612008-12-04 20:12:29 +01004204 * @pid: target pid
Ingo Molnar9f66a382008-12-10 12:33:23 +01004205 * @cpu: target cpu
4206 * @group_fd: group leader counter fd
Thomas Gleixner0793a612008-12-04 20:12:29 +01004207 */
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004208SYSCALL_DEFINE5(perf_counter_open,
Peter Zijlstra974802e2009-06-12 12:46:55 +02004209 struct perf_counter_attr __user *, attr_uptr,
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004210 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004211{
Ingo Molnar04289bb2008-12-11 08:38:42 +01004212 struct perf_counter *counter, *group_leader;
Peter Zijlstra0d486962009-06-02 19:22:16 +02004213 struct perf_counter_attr attr;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004214 struct perf_counter_context *ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004215 struct file *counter_file = NULL;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004216 struct file *group_file = NULL;
4217 int fput_needed = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004218 int fput_needed2 = 0;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004219 int ret;
4220
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004221 /* for future expandability... */
4222 if (flags)
4223 return -EINVAL;
4224
Peter Zijlstra974802e2009-06-12 12:46:55 +02004225 ret = perf_copy_attr(attr_uptr, &attr);
4226 if (ret)
4227 return ret;
Thomas Gleixnereab656a2008-12-08 19:26:59 +01004228
Peter Zijlstra07647712009-06-11 11:18:36 +02004229 if (!attr.exclude_kernel) {
4230 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4231 return -EACCES;
4232 }
4233
Peter Zijlstradf58ab22009-06-11 11:25:05 +02004234 if (attr.freq) {
4235 if (attr.sample_freq > sysctl_perf_counter_sample_rate)
4236 return -EINVAL;
4237 }
4238
Ingo Molnar04289bb2008-12-11 08:38:42 +01004239 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01004240 * Get the target context (task or percpu):
4241 */
4242 ctx = find_get_context(pid, cpu);
4243 if (IS_ERR(ctx))
4244 return PTR_ERR(ctx);
4245
4246 /*
4247 * Look up the group leader (we will attach this counter to it):
Ingo Molnar04289bb2008-12-11 08:38:42 +01004248 */
4249 group_leader = NULL;
4250 if (group_fd != -1) {
4251 ret = -EINVAL;
4252 group_file = fget_light(group_fd, &fput_needed);
4253 if (!group_file)
Ingo Molnarccff2862008-12-11 11:26:29 +01004254 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004255 if (group_file->f_op != &perf_fops)
Ingo Molnarccff2862008-12-11 11:26:29 +01004256 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004257
4258 group_leader = group_file->private_data;
4259 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01004260 * Do not allow a recursive hierarchy (this new sibling
4261 * becoming part of another group-sibling):
Ingo Molnar04289bb2008-12-11 08:38:42 +01004262 */
Ingo Molnarccff2862008-12-11 11:26:29 +01004263 if (group_leader->group_leader != group_leader)
4264 goto err_put_context;
4265 /*
4266 * Do not allow to attach to a group in a different
4267 * task or CPU context:
4268 */
4269 if (group_leader->ctx != ctx)
4270 goto err_put_context;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11004271 /*
4272 * Only a group leader can be exclusive or pinned
4273 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02004274 if (attr.exclusive || attr.pinned)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11004275 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004276 }
4277
Peter Zijlstra0d486962009-06-02 19:22:16 +02004278 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004279 NULL, GFP_KERNEL);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004280 ret = PTR_ERR(counter);
4281 if (IS_ERR(counter))
Thomas Gleixner0793a612008-12-04 20:12:29 +01004282 goto err_put_context;
4283
Thomas Gleixner0793a612008-12-04 20:12:29 +01004284 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
4285 if (ret < 0)
Ingo Molnar9b51f662008-12-12 13:49:45 +01004286 goto err_free_put_context;
4287
4288 counter_file = fget_light(ret, &fput_needed2);
4289 if (!counter_file)
4290 goto err_free_put_context;
4291
4292 counter->filp = counter_file;
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004293 WARN_ON_ONCE(ctx->parent_ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004294 mutex_lock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004295 perf_install_in_context(ctx, counter, cpu);
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004296 ++ctx->generation;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004297 mutex_unlock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004298
Peter Zijlstra082ff5a2009-05-23 18:29:00 +02004299 counter->owner = current;
4300 get_task_struct(current);
4301 mutex_lock(&current->perf_counter_mutex);
4302 list_add_tail(&counter->owner_entry, &current->perf_counter_list);
4303 mutex_unlock(&current->perf_counter_mutex);
4304
Ingo Molnar9b51f662008-12-12 13:49:45 +01004305 fput_light(counter_file, fput_needed2);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004306
Ingo Molnar04289bb2008-12-11 08:38:42 +01004307out_fput:
4308 fput_light(group_file, fput_needed);
4309
Thomas Gleixner0793a612008-12-04 20:12:29 +01004310 return ret;
4311
Ingo Molnar9b51f662008-12-12 13:49:45 +01004312err_free_put_context:
Thomas Gleixner0793a612008-12-04 20:12:29 +01004313 kfree(counter);
4314
4315err_put_context:
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004316 put_ctx(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004317
Ingo Molnar04289bb2008-12-11 08:38:42 +01004318 goto out_fput;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004319}
4320
Ingo Molnar9b51f662008-12-12 13:49:45 +01004321/*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004322 * inherit a counter from parent task to child task:
4323 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11004324static struct perf_counter *
Ingo Molnar9b51f662008-12-12 13:49:45 +01004325inherit_counter(struct perf_counter *parent_counter,
4326 struct task_struct *parent,
4327 struct perf_counter_context *parent_ctx,
4328 struct task_struct *child,
Paul Mackerrasd859e292009-01-17 18:10:22 +11004329 struct perf_counter *group_leader,
Ingo Molnar9b51f662008-12-12 13:49:45 +01004330 struct perf_counter_context *child_ctx)
4331{
4332 struct perf_counter *child_counter;
4333
Paul Mackerrasd859e292009-01-17 18:10:22 +11004334 /*
4335 * Instead of creating recursive hierarchies of counters,
4336 * we link inherited counters back to the original parent,
4337 * which has a filp for sure, which we use as the reference
4338 * count:
4339 */
4340 if (parent_counter->parent)
4341 parent_counter = parent_counter->parent;
4342
Peter Zijlstra0d486962009-06-02 19:22:16 +02004343 child_counter = perf_counter_alloc(&parent_counter->attr,
Paul Mackerras23a185c2009-02-09 22:42:47 +11004344 parent_counter->cpu, child_ctx,
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004345 group_leader, parent_counter,
4346 GFP_KERNEL);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004347 if (IS_ERR(child_counter))
4348 return child_counter;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004349 get_ctx(child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004350
4351 /*
Paul Mackerras564c2b22009-05-22 14:27:22 +10004352 * Make the child state follow the state of the parent counter,
Peter Zijlstra0d486962009-06-02 19:22:16 +02004353 * not its attr.disabled bit. We hold the parent's mutex,
Ingo Molnar22a4f652009-06-01 10:13:37 +02004354 * so we won't race with perf_counter_{en, dis}able_family.
Paul Mackerras564c2b22009-05-22 14:27:22 +10004355 */
4356 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
4357 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
4358 else
4359 child_counter->state = PERF_COUNTER_STATE_OFF;
4360
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004361 if (parent_counter->attr.freq)
4362 child_counter->hw.sample_period = parent_counter->hw.sample_period;
4363
Paul Mackerras564c2b22009-05-22 14:27:22 +10004364 /*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004365 * Link it up in the child's context:
4366 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +11004367 add_counter_to_ctx(child_counter, child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004368
Ingo Molnar9b51f662008-12-12 13:49:45 +01004369 /*
4370 * Get a reference to the parent filp - we will fput it
4371 * when the child counter exits. This is safe to do because
4372 * we are in the parent and we know that the filp still
4373 * exists and has a nonzero count:
4374 */
4375 atomic_long_inc(&parent_counter->filp->f_count);
4376
Paul Mackerrasd859e292009-01-17 18:10:22 +11004377 /*
4378 * Link this into the parent counter's child list
4379 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004380 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004381 mutex_lock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004382 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004383 mutex_unlock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004384
4385 return child_counter;
4386}
4387
4388static int inherit_group(struct perf_counter *parent_counter,
4389 struct task_struct *parent,
4390 struct perf_counter_context *parent_ctx,
4391 struct task_struct *child,
4392 struct perf_counter_context *child_ctx)
4393{
4394 struct perf_counter *leader;
4395 struct perf_counter *sub;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004396 struct perf_counter *child_ctr;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004397
4398 leader = inherit_counter(parent_counter, parent, parent_ctx,
4399 child, NULL, child_ctx);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004400 if (IS_ERR(leader))
4401 return PTR_ERR(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004402 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004403 child_ctr = inherit_counter(sub, parent, parent_ctx,
4404 child, leader, child_ctx);
4405 if (IS_ERR(child_ctr))
4406 return PTR_ERR(child_ctr);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004407 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004408 return 0;
4409}
4410
Paul Mackerrasd859e292009-01-17 18:10:22 +11004411static void sync_child_counter(struct perf_counter *child_counter,
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004412 struct task_struct *child)
Paul Mackerrasd859e292009-01-17 18:10:22 +11004413{
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004414 struct perf_counter *parent_counter = child_counter->parent;
Peter Zijlstra8bc20952009-05-15 20:45:59 +02004415 u64 child_val;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004416
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02004417 if (child_counter->attr.inherit_stat)
4418 perf_counter_read_event(child_counter, child);
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004419
Paul Mackerrasd859e292009-01-17 18:10:22 +11004420 child_val = atomic64_read(&child_counter->count);
4421
4422 /*
4423 * Add back the child's count to the parent's count:
4424 */
4425 atomic64_add(child_val, &parent_counter->count);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11004426 atomic64_add(child_counter->total_time_enabled,
4427 &parent_counter->child_total_time_enabled);
4428 atomic64_add(child_counter->total_time_running,
4429 &parent_counter->child_total_time_running);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004430
4431 /*
4432 * Remove this counter from the parent's list
4433 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004434 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004435 mutex_lock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004436 list_del_init(&child_counter->child_list);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004437 mutex_unlock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004438
4439 /*
4440 * Release the parent counter, if this was the last
4441 * reference to it.
4442 */
4443 fput(parent_counter->filp);
4444}
4445
Ingo Molnar9b51f662008-12-12 13:49:45 +01004446static void
Peter Zijlstrabbbee902009-05-29 14:25:58 +02004447__perf_counter_exit_task(struct perf_counter *child_counter,
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004448 struct perf_counter_context *child_ctx,
4449 struct task_struct *child)
Ingo Molnar9b51f662008-12-12 13:49:45 +01004450{
4451 struct perf_counter *parent_counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004452
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004453 update_counter_times(child_counter);
Peter Zijlstraaa9c67f2009-05-23 18:28:59 +02004454 perf_counter_remove_from_context(child_counter);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01004455
Ingo Molnar9b51f662008-12-12 13:49:45 +01004456 parent_counter = child_counter->parent;
4457 /*
4458 * It can happen that parent exits first, and has counters
4459 * that are still around due to the child reference. These
4460 * counters need to be zapped - but otherwise linger.
4461 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11004462 if (parent_counter) {
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004463 sync_child_counter(child_counter, child);
Peter Zijlstraf1600952009-03-19 20:26:16 +01004464 free_counter(child_counter);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01004465 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004466}
4467
4468/*
Paul Mackerrasd859e292009-01-17 18:10:22 +11004469 * When a child task exits, feed back counter values to parent counters.
Ingo Molnar9b51f662008-12-12 13:49:45 +01004470 */
4471void perf_counter_exit_task(struct task_struct *child)
4472{
4473 struct perf_counter *child_counter, *tmp;
4474 struct perf_counter_context *child_ctx;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004475 unsigned long flags;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004476
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004477 if (likely(!child->perf_counter_ctxp)) {
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02004478 perf_counter_task(child, NULL, 0);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004479 return;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004480 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004481
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004482 local_irq_save(flags);
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004483 /*
4484 * We can't reschedule here because interrupts are disabled,
4485 * and either child is current or it is a task that can't be
4486 * scheduled, so we are now safe from rescheduling changing
4487 * our context.
4488 */
4489 child_ctx = child->perf_counter_ctxp;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004490 __perf_counter_task_sched_out(child_ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004491
4492 /*
4493 * Take the context lock here so that if find_get_context is
4494 * reading child->perf_counter_ctxp, we wait until it has
4495 * incremented the context's refcount before we do put_ctx below.
4496 */
4497 spin_lock(&child_ctx->lock);
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02004498 child->perf_counter_ctxp = NULL;
Peter Zijlstra71a851b2009-07-10 09:06:56 +02004499 /*
4500 * If this context is a clone; unclone it so it can't get
4501 * swapped to another process while we're removing all
4502 * the counters from it.
4503 */
4504 unclone_ctx(child_ctx);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004505 spin_unlock_irqrestore(&child_ctx->lock, flags);
4506
4507 /*
4508 * Report the task dead after unscheduling the counters so that we
4509 * won't get any samples after PERF_EVENT_EXIT. We can however still
4510 * get a few PERF_EVENT_READ events.
4511 */
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02004512 perf_counter_task(child, child_ctx, 0);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004513
Peter Zijlstra66fff222009-06-10 22:53:37 +02004514 /*
4515 * We can recurse on the same lock type through:
4516 *
4517 * __perf_counter_exit_task()
4518 * sync_child_counter()
4519 * fput(parent_counter->filp)
4520 * perf_release()
4521 * mutex_lock(&ctx->mutex)
4522 *
4523 * But since its the parent context it won't be the same instance.
4524 */
4525 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004526
Peter Zijlstra8bc20952009-05-15 20:45:59 +02004527again:
Ingo Molnar9b51f662008-12-12 13:49:45 +01004528 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
4529 list_entry)
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004530 __perf_counter_exit_task(child_counter, child_ctx, child);
Peter Zijlstra8bc20952009-05-15 20:45:59 +02004531
4532 /*
4533 * If the last counter was a group counter, it will have appended all
4534 * its siblings to the list, but we obtained 'tmp' before that which
4535 * will still point to the list head terminating the iteration.
4536 */
4537 if (!list_empty(&child_ctx->counter_list))
4538 goto again;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004539
4540 mutex_unlock(&child_ctx->mutex);
4541
4542 put_ctx(child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004543}
4544
4545/*
Peter Zijlstrabbbee902009-05-29 14:25:58 +02004546 * free an unexposed, unused context as created by inheritance by
4547 * init_task below, used by fork() in case of fail.
4548 */
4549void perf_counter_free_task(struct task_struct *task)
4550{
4551 struct perf_counter_context *ctx = task->perf_counter_ctxp;
4552 struct perf_counter *counter, *tmp;
4553
4554 if (!ctx)
4555 return;
4556
4557 mutex_lock(&ctx->mutex);
4558again:
4559 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
4560 struct perf_counter *parent = counter->parent;
4561
4562 if (WARN_ON_ONCE(!parent))
4563 continue;
4564
4565 mutex_lock(&parent->child_mutex);
4566 list_del_init(&counter->child_list);
4567 mutex_unlock(&parent->child_mutex);
4568
4569 fput(parent->filp);
4570
4571 list_del_counter(counter, ctx);
4572 free_counter(counter);
4573 }
4574
4575 if (!list_empty(&ctx->counter_list))
4576 goto again;
4577
4578 mutex_unlock(&ctx->mutex);
4579
4580 put_ctx(ctx);
4581}
4582
4583/*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004584 * Initialize the perf_counter context in task_struct
4585 */
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004586int perf_counter_init_task(struct task_struct *child)
Ingo Molnar9b51f662008-12-12 13:49:45 +01004587{
4588 struct perf_counter_context *child_ctx, *parent_ctx;
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004589 struct perf_counter_context *cloned_ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004590 struct perf_counter *counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004591 struct task_struct *parent = current;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004592 int inherited_all = 1;
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004593 int ret = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004594
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004595 child->perf_counter_ctxp = NULL;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004596
Peter Zijlstra082ff5a2009-05-23 18:29:00 +02004597 mutex_init(&child->perf_counter_mutex);
4598 INIT_LIST_HEAD(&child->perf_counter_list);
4599
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004600 if (likely(!parent->perf_counter_ctxp))
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004601 return 0;
4602
Ingo Molnar9b51f662008-12-12 13:49:45 +01004603 /*
4604 * This is executed from the parent task context, so inherit
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004605 * counters that have been marked for cloning.
4606 * First allocate and initialize a context for the child.
Ingo Molnar9b51f662008-12-12 13:49:45 +01004607 */
4608
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004609 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
4610 if (!child_ctx)
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004611 return -ENOMEM;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004612
4613 __perf_counter_init_context(child_ctx, child);
4614 child->perf_counter_ctxp = child_ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004615 get_task_struct(child);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004616
Ingo Molnar9b51f662008-12-12 13:49:45 +01004617 /*
Paul Mackerras25346b92009-06-01 17:48:12 +10004618 * If the parent's context is a clone, pin it so it won't get
4619 * swapped under us.
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004620 */
Paul Mackerras25346b92009-06-01 17:48:12 +10004621 parent_ctx = perf_pin_task_context(parent);
4622
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004623 /*
4624 * No need to check if parent_ctx != NULL here; since we saw
4625 * it non-NULL earlier, the only reason for it to become NULL
4626 * is if we exit, and since we're currently in the middle of
4627 * a fork we can't be exiting at the same time.
4628 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004629
4630 /*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004631 * Lock the parent list. No need to lock the child - not PID
4632 * hashed yet and not running, so nobody can access it.
4633 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11004634 mutex_lock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004635
4636 /*
4637 * We dont have to disable NMIs - we are only looking at
4638 * the list, not manipulating it:
4639 */
Peter Zijlstrad7b629a2009-05-20 12:21:19 +02004640 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
4641 if (counter != counter->group_leader)
4642 continue;
4643
Peter Zijlstra0d486962009-06-02 19:22:16 +02004644 if (!counter->attr.inherit) {
Paul Mackerras564c2b22009-05-22 14:27:22 +10004645 inherited_all = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004646 continue;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004647 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004648
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004649 ret = inherit_group(counter, parent, parent_ctx,
4650 child, child_ctx);
4651 if (ret) {
Paul Mackerras564c2b22009-05-22 14:27:22 +10004652 inherited_all = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004653 break;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004654 }
4655 }
4656
4657 if (inherited_all) {
4658 /*
4659 * Mark the child context as a clone of the parent
4660 * context, or of whatever the parent is a clone of.
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004661 * Note that if the parent is a clone, it could get
4662 * uncloned at any point, but that doesn't matter
4663 * because the list of counters and the generation
4664 * count can't have changed since we took the mutex.
Paul Mackerras564c2b22009-05-22 14:27:22 +10004665 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004666 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
4667 if (cloned_ctx) {
4668 child_ctx->parent_ctx = cloned_ctx;
Paul Mackerras25346b92009-06-01 17:48:12 +10004669 child_ctx->parent_gen = parent_ctx->parent_gen;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004670 } else {
4671 child_ctx->parent_ctx = parent_ctx;
4672 child_ctx->parent_gen = parent_ctx->generation;
4673 }
4674 get_ctx(child_ctx->parent_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004675 }
4676
Paul Mackerrasd859e292009-01-17 18:10:22 +11004677 mutex_unlock(&parent_ctx->mutex);
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004678
Paul Mackerras25346b92009-06-01 17:48:12 +10004679 perf_unpin_context(parent_ctx);
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004680
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004681 return ret;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004682}
4683
Ingo Molnar04289bb2008-12-11 08:38:42 +01004684static void __cpuinit perf_counter_init_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004685{
Ingo Molnar04289bb2008-12-11 08:38:42 +01004686 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004687
Ingo Molnar04289bb2008-12-11 08:38:42 +01004688 cpuctx = &per_cpu(perf_cpu_context, cpu);
4689 __perf_counter_init_context(&cpuctx->ctx, NULL);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004690
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004691 spin_lock(&perf_resource_lock);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004692 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004693 spin_unlock(&perf_resource_lock);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004694
Paul Mackerras01d02872009-01-14 13:44:19 +11004695 hw_perf_counter_setup(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004696}
4697
4698#ifdef CONFIG_HOTPLUG_CPU
Ingo Molnar04289bb2008-12-11 08:38:42 +01004699static void __perf_counter_exit_cpu(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004700{
4701 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4702 struct perf_counter_context *ctx = &cpuctx->ctx;
4703 struct perf_counter *counter, *tmp;
4704
Ingo Molnar04289bb2008-12-11 08:38:42 +01004705 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4706 __perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004707}
Ingo Molnar04289bb2008-12-11 08:38:42 +01004708static void perf_counter_exit_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004709{
Paul Mackerrasd859e292009-01-17 18:10:22 +11004710 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4711 struct perf_counter_context *ctx = &cpuctx->ctx;
4712
4713 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004714 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004715 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004716}
4717#else
Ingo Molnar04289bb2008-12-11 08:38:42 +01004718static inline void perf_counter_exit_cpu(int cpu) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +01004719#endif
4720
4721static int __cpuinit
4722perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4723{
4724 unsigned int cpu = (long)hcpu;
4725
4726 switch (action) {
4727
4728 case CPU_UP_PREPARE:
4729 case CPU_UP_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01004730 perf_counter_init_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004731 break;
4732
Ingo Molnar28402972009-08-13 10:13:22 +02004733 case CPU_ONLINE:
4734 case CPU_ONLINE_FROZEN:
4735 hw_perf_counter_setup_online(cpu);
4736 break;
4737
Thomas Gleixner0793a612008-12-04 20:12:29 +01004738 case CPU_DOWN_PREPARE:
4739 case CPU_DOWN_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01004740 perf_counter_exit_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004741 break;
4742
4743 default:
4744 break;
4745 }
4746
4747 return NOTIFY_OK;
4748}
4749
Paul Mackerrasf38b0822009-06-02 21:05:16 +10004750/*
4751 * This has to have a higher priority than migration_notifier in sched.c.
4752 */
Thomas Gleixner0793a612008-12-04 20:12:29 +01004753static struct notifier_block __cpuinitdata perf_cpu_nb = {
4754 .notifier_call = perf_cpu_notify,
Paul Mackerrasf38b0822009-06-02 21:05:16 +10004755 .priority = 20,
Thomas Gleixner0793a612008-12-04 20:12:29 +01004756};
4757
Ingo Molnar0d905bc2009-05-04 19:13:30 +02004758void __init perf_counter_init(void)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004759{
4760 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4761 (void *)(long)smp_processor_id());
Ingo Molnar28402972009-08-13 10:13:22 +02004762 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4763 (void *)(long)smp_processor_id());
Thomas Gleixner0793a612008-12-04 20:12:29 +01004764 register_cpu_notifier(&perf_cpu_nb);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004765}
Thomas Gleixner0793a612008-12-04 20:12:29 +01004766
4767static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4768{
4769 return sprintf(buf, "%d\n", perf_reserved_percpu);
4770}
4771
4772static ssize_t
4773perf_set_reserve_percpu(struct sysdev_class *class,
4774 const char *buf,
4775 size_t count)
4776{
4777 struct perf_cpu_context *cpuctx;
4778 unsigned long val;
4779 int err, cpu, mpt;
4780
4781 err = strict_strtoul(buf, 10, &val);
4782 if (err)
4783 return err;
4784 if (val > perf_max_counters)
4785 return -EINVAL;
4786
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004787 spin_lock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004788 perf_reserved_percpu = val;
4789 for_each_online_cpu(cpu) {
4790 cpuctx = &per_cpu(perf_cpu_context, cpu);
4791 spin_lock_irq(&cpuctx->ctx.lock);
4792 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4793 perf_max_counters - perf_reserved_percpu);
4794 cpuctx->max_pertask = mpt;
4795 spin_unlock_irq(&cpuctx->ctx.lock);
4796 }
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004797 spin_unlock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004798
4799 return count;
4800}
4801
4802static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4803{
4804 return sprintf(buf, "%d\n", perf_overcommit);
4805}
4806
4807static ssize_t
4808perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4809{
4810 unsigned long val;
4811 int err;
4812
4813 err = strict_strtoul(buf, 10, &val);
4814 if (err)
4815 return err;
4816 if (val > 1)
4817 return -EINVAL;
4818
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004819 spin_lock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004820 perf_overcommit = val;
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004821 spin_unlock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004822
4823 return count;
4824}
4825
4826static SYSDEV_CLASS_ATTR(
4827 reserve_percpu,
4828 0644,
4829 perf_show_reserve_percpu,
4830 perf_set_reserve_percpu
4831 );
4832
4833static SYSDEV_CLASS_ATTR(
4834 overcommit,
4835 0644,
4836 perf_show_overcommit,
4837 perf_set_overcommit
4838 );
4839
4840static struct attribute *perfclass_attrs[] = {
4841 &attr_reserve_percpu.attr,
4842 &attr_overcommit.attr,
4843 NULL
4844};
4845
4846static struct attribute_group perfclass_attr_group = {
4847 .attrs = perfclass_attrs,
4848 .name = "perf_counters",
4849};
4850
4851static int __init perf_counter_sysfs_init(void)
4852{
4853 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4854 &perfclass_attr_group);
4855}
4856device_initcall(perf_counter_sysfs_init);