blob: 06d233a06da59842a81f51daf24f57c333660d85 [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counter core code
3 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Paul Mackerrasc5dd0162009-04-30 09:48:16 +10007 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
Peter Zijlstra7b732a72009-03-23 18:22:10 +01008 *
9 * For licensing details see kernel-base/COPYING
Thomas Gleixner0793a612008-12-04 20:12:29 +010010 */
11
12#include <linux/fs.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010013#include <linux/mm.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010014#include <linux/cpu.h>
15#include <linux/smp.h>
Ingo Molnar04289bb2008-12-11 08:38:42 +010016#include <linux/file.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010017#include <linux/poll.h>
18#include <linux/sysfs.h>
Ingo Molnar22a4f652009-06-01 10:13:37 +020019#include <linux/dcache.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010020#include <linux/percpu.h>
Ingo Molnar22a4f652009-06-01 10:13:37 +020021#include <linux/ptrace.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010022#include <linux/vmstat.h>
23#include <linux/hardirq.h>
24#include <linux/rculist.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010025#include <linux/uaccess.h>
26#include <linux/syscalls.h>
27#include <linux/anon_inodes.h>
Ingo Molnaraa9c4c02008-12-17 14:10:57 +010028#include <linux/kernel_stat.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010029#include <linux/perf_counter.h>
30
Tim Blechmann4e193bd2009-03-14 14:29:25 +010031#include <asm/irq_regs.h>
32
Thomas Gleixner0793a612008-12-04 20:12:29 +010033/*
34 * Each CPU has a list of per CPU counters:
35 */
36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
Ingo Molnar088e2852008-12-14 20:21:00 +010038int perf_max_counters __read_mostly = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +010039static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1;
41
Peter Zijlstra7fc23a52009-05-08 18:52:21 +020042static atomic_t nr_counters __read_mostly;
Peter Zijlstra60313eb2009-06-04 16:53:44 +020043static atomic_t nr_mmap_counters __read_mostly;
Peter Zijlstra60313eb2009-06-04 16:53:44 +020044static atomic_t nr_comm_counters __read_mostly;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +020045static atomic_t nr_task_counters __read_mostly;
Peter Zijlstra9ee318a2009-04-09 10:53:44 +020046
Peter Zijlstra07647712009-06-11 11:18:36 +020047/*
Peter Zijlstradf58ab22009-06-11 11:25:05 +020048 * perf counter paranoia level:
Ingo Molnar0fbdea12009-09-02 21:46:00 +020049 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu counters for unpriv
52 * 2 - disallow kernel profiling for unpriv
Peter Zijlstra07647712009-06-11 11:18:36 +020053 */
Ingo Molnar6bb56342009-08-28 13:44:53 +020054int sysctl_perf_counter_paranoid __read_mostly = 1;
Peter Zijlstra07647712009-06-11 11:18:36 +020055
Ingo Molnar0fbdea12009-09-02 21:46:00 +020056static inline bool perf_paranoid_tracepoint_raw(void)
57{
58 return sysctl_perf_counter_paranoid > -1;
59}
60
Peter Zijlstra07647712009-06-11 11:18:36 +020061static inline bool perf_paranoid_cpu(void)
62{
63 return sysctl_perf_counter_paranoid > 0;
64}
65
66static inline bool perf_paranoid_kernel(void)
67{
68 return sysctl_perf_counter_paranoid > 1;
69}
70
Peter Zijlstra789f90f2009-05-15 15:19:27 +020071int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
Peter Zijlstradf58ab22009-06-11 11:25:05 +020072
73/*
74 * max perf counter sample rate
75 */
76int sysctl_perf_counter_sample_rate __read_mostly = 100000;
Peter Zijlstra1ccd1542009-04-09 10:53:45 +020077
Peter Zijlstraa96bbc12009-06-03 14:01:36 +020078static atomic64_t perf_counter_id;
79
Thomas Gleixner0793a612008-12-04 20:12:29 +010080/*
Ingo Molnar1dce8d92009-05-04 19:23:18 +020081 * Lock for (sysadmin-configurable) counter reservations:
Thomas Gleixner0793a612008-12-04 20:12:29 +010082 */
Ingo Molnar1dce8d92009-05-04 19:23:18 +020083static DEFINE_SPINLOCK(perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +010084
85/*
86 * Architecture provided APIs - weak aliases:
87 */
Robert Richter4aeb0b42009-04-29 12:47:03 +020088extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +010089{
Paul Mackerrasff6f0542009-01-09 16:19:25 +110090 return NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +010091}
92
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020093void __weak hw_perf_disable(void) { barrier(); }
94void __weak hw_perf_enable(void) { barrier(); }
95
Paul Mackerras01d02872009-01-14 13:44:19 +110096void __weak hw_perf_counter_setup(int cpu) { barrier(); }
Ingo Molnar28402972009-08-13 10:13:22 +020097void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
Ingo Molnar22a4f652009-06-01 10:13:37 +020098
99int __weak
100hw_perf_group_sched_in(struct perf_counter *group_leader,
Paul Mackerras3cbed422009-01-09 16:43:42 +1100101 struct perf_cpu_context *cpuctx,
102 struct perf_counter_context *ctx, int cpu)
103{
104 return 0;
105}
Thomas Gleixner0793a612008-12-04 20:12:29 +0100106
Paul Mackerras4eb96fc2009-01-09 17:24:34 +1100107void __weak perf_counter_print_debug(void) { }
108
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200109static DEFINE_PER_CPU(int, disable_count);
110
111void __perf_disable(void)
112{
113 __get_cpu_var(disable_count)++;
114}
115
116bool __perf_enable(void)
117{
118 return !--__get_cpu_var(disable_count);
119}
120
121void perf_disable(void)
122{
123 __perf_disable();
124 hw_perf_disable();
125}
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200126
127void perf_enable(void)
128{
129 if (__perf_enable())
130 hw_perf_enable();
131}
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200132
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000133static void get_ctx(struct perf_counter_context *ctx)
134{
Peter Zijlstrae5289d42009-06-19 13:22:51 +0200135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000136}
137
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000138static void free_ctx(struct rcu_head *head)
139{
140 struct perf_counter_context *ctx;
141
142 ctx = container_of(head, struct perf_counter_context, rcu_head);
143 kfree(ctx);
144}
145
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000146static void put_ctx(struct perf_counter_context *ctx)
147{
Paul Mackerras564c2b22009-05-22 14:27:22 +1000148 if (atomic_dec_and_test(&ctx->refcount)) {
149 if (ctx->parent_ctx)
150 put_ctx(ctx->parent_ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000151 if (ctx->task)
152 put_task_struct(ctx->task);
153 call_rcu(&ctx->rcu_head, free_ctx);
Paul Mackerras564c2b22009-05-22 14:27:22 +1000154 }
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000155}
156
Peter Zijlstra71a851b2009-07-10 09:06:56 +0200157static void unclone_ctx(struct perf_counter_context *ctx)
158{
159 if (ctx->parent_ctx) {
160 put_ctx(ctx->parent_ctx);
161 ctx->parent_ctx = NULL;
162 }
163}
164
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200165/*
Peter Zijlstra7f453c22009-07-21 13:19:40 +0200166 * If we inherit counters we want to return the parent counter id
167 * to userspace.
168 */
169static u64 primary_counter_id(struct perf_counter *counter)
170{
171 u64 id = counter->id;
172
173 if (counter->parent)
174 id = counter->parent->id;
175
176 return id;
177}
178
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200179/*
Paul Mackerras25346b932009-06-01 17:48:12 +1000180 * Get the perf_counter_context for a task and lock it.
181 * This has to cope with with the fact that until it is locked,
182 * the context could get moved to another task.
183 */
Ingo Molnar22a4f652009-06-01 10:13:37 +0200184static struct perf_counter_context *
185perf_lock_task_context(struct task_struct *task, unsigned long *flags)
Paul Mackerras25346b932009-06-01 17:48:12 +1000186{
187 struct perf_counter_context *ctx;
188
189 rcu_read_lock();
190 retry:
191 ctx = rcu_dereference(task->perf_counter_ctxp);
192 if (ctx) {
193 /*
194 * If this context is a clone of another, it might
195 * get swapped for another underneath us by
196 * perf_counter_task_sched_out, though the
197 * rcu_read_lock() protects us from any context
198 * getting freed. Lock the context and check if it
199 * got swapped before we could get the lock, and retry
200 * if so. If we locked the right context, then it
201 * can't get swapped on us any more.
202 */
203 spin_lock_irqsave(&ctx->lock, *flags);
204 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
205 spin_unlock_irqrestore(&ctx->lock, *flags);
206 goto retry;
207 }
Peter Zijlstrab49a9e72009-06-19 17:39:33 +0200208
209 if (!atomic_inc_not_zero(&ctx->refcount)) {
210 spin_unlock_irqrestore(&ctx->lock, *flags);
211 ctx = NULL;
212 }
Paul Mackerras25346b932009-06-01 17:48:12 +1000213 }
214 rcu_read_unlock();
215 return ctx;
216}
217
218/*
219 * Get the context for a task and increment its pin_count so it
220 * can't get swapped to another task. This also increments its
221 * reference count so that the context can't get freed.
222 */
223static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
224{
225 struct perf_counter_context *ctx;
226 unsigned long flags;
227
228 ctx = perf_lock_task_context(task, &flags);
229 if (ctx) {
230 ++ctx->pin_count;
Paul Mackerras25346b932009-06-01 17:48:12 +1000231 spin_unlock_irqrestore(&ctx->lock, flags);
232 }
233 return ctx;
234}
235
236static void perf_unpin_context(struct perf_counter_context *ctx)
237{
238 unsigned long flags;
239
240 spin_lock_irqsave(&ctx->lock, flags);
241 --ctx->pin_count;
242 spin_unlock_irqrestore(&ctx->lock, flags);
243 put_ctx(ctx);
244}
245
246/*
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200247 * Add a counter from the lists for its context.
248 * Must be called with ctx->mutex and ctx->lock held.
249 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100250static void
251list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
252{
253 struct perf_counter *group_leader = counter->group_leader;
254
255 /*
256 * Depending on whether it is a standalone or sibling counter,
257 * add it straight to the context's counter list, or to the group
258 * leader's sibling list:
259 */
Peter Zijlstra3df5eda2009-05-08 18:52:22 +0200260 if (group_leader == counter)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100261 list_add_tail(&counter->list_entry, &ctx->counter_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +0100262 else {
Ingo Molnar04289bb2008-12-11 08:38:42 +0100263 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +0100264 group_leader->nr_siblings++;
265 }
Peter Zijlstra592903c2009-03-13 12:21:36 +0100266
267 list_add_rcu(&counter->event_entry, &ctx->event_list);
Peter Zijlstra8bc20952009-05-15 20:45:59 +0200268 ctx->nr_counters++;
Peter Zijlstrabfbd3382009-06-24 21:11:59 +0200269 if (counter->attr.inherit_stat)
270 ctx->nr_stat++;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100271}
272
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000273/*
274 * Remove a counter from the lists for its context.
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200275 * Must be called with ctx->mutex and ctx->lock held.
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000276 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100277static void
278list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
279{
280 struct perf_counter *sibling, *tmp;
281
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000282 if (list_empty(&counter->list_entry))
283 return;
Peter Zijlstra8bc20952009-05-15 20:45:59 +0200284 ctx->nr_counters--;
Peter Zijlstrabfbd3382009-06-24 21:11:59 +0200285 if (counter->attr.inherit_stat)
286 ctx->nr_stat--;
Peter Zijlstra8bc20952009-05-15 20:45:59 +0200287
Ingo Molnar04289bb2008-12-11 08:38:42 +0100288 list_del_init(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +0100289 list_del_rcu(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100290
Peter Zijlstra5c148192009-03-25 12:30:23 +0100291 if (counter->group_leader != counter)
292 counter->group_leader->nr_siblings--;
293
Ingo Molnar04289bb2008-12-11 08:38:42 +0100294 /*
295 * If this was a group counter with sibling counters then
296 * upgrade the siblings to singleton counters by adding them
297 * to the context list directly:
298 */
299 list_for_each_entry_safe(sibling, tmp,
300 &counter->sibling_list, list_entry) {
301
Peter Zijlstra75564232009-03-13 12:21:29 +0100302 list_move_tail(&sibling->list_entry, &ctx->counter_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100303 sibling->group_leader = sibling;
304 }
305}
306
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100307static void
308counter_sched_out(struct perf_counter *counter,
309 struct perf_cpu_context *cpuctx,
310 struct perf_counter_context *ctx)
311{
312 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
313 return;
314
315 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra970892a2009-08-13 11:47:54 +0200316 if (counter->pending_disable) {
317 counter->pending_disable = 0;
318 counter->state = PERF_COUNTER_STATE_OFF;
319 }
Peter Zijlstra4af49982009-04-06 11:45:10 +0200320 counter->tstamp_stopped = ctx->time;
Robert Richter4aeb0b42009-04-29 12:47:03 +0200321 counter->pmu->disable(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100322 counter->oncpu = -1;
323
324 if (!is_software_counter(counter))
325 cpuctx->active_oncpu--;
326 ctx->nr_active--;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200327 if (counter->attr.exclusive || !cpuctx->active_oncpu)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100328 cpuctx->exclusive = 0;
329}
330
Paul Mackerrasd859e292009-01-17 18:10:22 +1100331static void
332group_sched_out(struct perf_counter *group_counter,
333 struct perf_cpu_context *cpuctx,
334 struct perf_counter_context *ctx)
335{
336 struct perf_counter *counter;
337
338 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
339 return;
340
341 counter_sched_out(group_counter, cpuctx, ctx);
342
343 /*
344 * Schedule out siblings (if any):
345 */
346 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
347 counter_sched_out(counter, cpuctx, ctx);
348
Peter Zijlstra0d486962009-06-02 19:22:16 +0200349 if (group_counter->attr.exclusive)
Paul Mackerrasd859e292009-01-17 18:10:22 +1100350 cpuctx->exclusive = 0;
351}
352
Thomas Gleixner0793a612008-12-04 20:12:29 +0100353/*
354 * Cross CPU call to remove a performance counter
355 *
356 * We disable the counter on the hardware level first. After that we
357 * remove it from the context list.
358 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100359static void __perf_counter_remove_from_context(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100360{
361 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
362 struct perf_counter *counter = info;
363 struct perf_counter_context *ctx = counter->ctx;
364
365 /*
366 * If this is a task context, we need to check whether it is
367 * the current task context of this cpu. If not it has been
368 * scheduled out before the smp call arrived.
369 */
Peter Zijlstra665c2142009-05-29 14:51:57 +0200370 if (ctx->task && cpuctx->task_ctx != ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100371 return;
372
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200373 spin_lock(&ctx->lock);
Ingo Molnar34adc802009-05-20 20:13:28 +0200374 /*
375 * Protect the list operation against NMI by disabling the
376 * counters on a global level.
377 */
378 perf_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100379
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100380 counter_sched_out(counter, cpuctx, ctx);
381
Ingo Molnar04289bb2008-12-11 08:38:42 +0100382 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100383
384 if (!ctx->task) {
385 /*
386 * Allow more per task counters with respect to the
387 * reservation:
388 */
389 cpuctx->max_pertask =
390 min(perf_max_counters - ctx->nr_counters,
391 perf_max_counters - perf_reserved_percpu);
392 }
393
Ingo Molnar34adc802009-05-20 20:13:28 +0200394 perf_enable();
Peter Zijlstra665c2142009-05-29 14:51:57 +0200395 spin_unlock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100396}
397
398
399/*
400 * Remove the counter from a task's (or a CPU's) list of counters.
401 *
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200402 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100403 *
404 * CPU counters are removed with a smp call. For task counters we only
405 * call when the task is on a CPU.
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000406 *
407 * If counter->ctx is a cloned context, callers must make sure that
408 * every task struct that counter->ctx->task could possibly point to
409 * remains valid. This is OK when called from perf_release since
410 * that only calls us on the top-level context, which can't be a clone.
411 * When called from perf_counter_exit_task, it's OK because the
412 * context has been detached from its task.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100413 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100414static void perf_counter_remove_from_context(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100415{
416 struct perf_counter_context *ctx = counter->ctx;
417 struct task_struct *task = ctx->task;
418
419 if (!task) {
420 /*
421 * Per cpu counters are removed via an smp call and
422 * the removal is always sucessful.
423 */
424 smp_call_function_single(counter->cpu,
Ingo Molnar04289bb2008-12-11 08:38:42 +0100425 __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100426 counter, 1);
427 return;
428 }
429
430retry:
Ingo Molnar04289bb2008-12-11 08:38:42 +0100431 task_oncpu_function_call(task, __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100432 counter);
433
434 spin_lock_irq(&ctx->lock);
435 /*
436 * If the context is active we need to retry the smp call.
437 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100438 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100439 spin_unlock_irq(&ctx->lock);
440 goto retry;
441 }
442
443 /*
444 * The lock prevents that this context is scheduled in so we
Ingo Molnar04289bb2008-12-11 08:38:42 +0100445 * can remove the counter safely, if the call above did not
Thomas Gleixner0793a612008-12-04 20:12:29 +0100446 * succeed.
447 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100448 if (!list_empty(&counter->list_entry)) {
Ingo Molnar04289bb2008-12-11 08:38:42 +0100449 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100450 }
451 spin_unlock_irq(&ctx->lock);
452}
453
Peter Zijlstra4af49982009-04-06 11:45:10 +0200454static inline u64 perf_clock(void)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100455{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200456 return cpu_clock(smp_processor_id());
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100457}
458
459/*
460 * Update the record of the current time in a context.
461 */
Peter Zijlstra4af49982009-04-06 11:45:10 +0200462static void update_context_time(struct perf_counter_context *ctx)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100463{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200464 u64 now = perf_clock();
465
466 ctx->time += now - ctx->timestamp;
467 ctx->timestamp = now;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100468}
469
470/*
471 * Update the total_time_enabled and total_time_running fields for a counter.
472 */
473static void update_counter_times(struct perf_counter *counter)
474{
475 struct perf_counter_context *ctx = counter->ctx;
476 u64 run_end;
477
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000478 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
479 counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE)
Peter Zijlstra4af49982009-04-06 11:45:10 +0200480 return;
481
482 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
483
484 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
485 run_end = counter->tstamp_stopped;
486 else
487 run_end = ctx->time;
488
489 counter->total_time_running = run_end - counter->tstamp_running;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100490}
491
492/*
493 * Update total_time_enabled and total_time_running for all counters in a group.
494 */
495static void update_group_times(struct perf_counter *leader)
496{
497 struct perf_counter *counter;
498
499 update_counter_times(leader);
500 list_for_each_entry(counter, &leader->sibling_list, list_entry)
501 update_counter_times(counter);
502}
503
504/*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100505 * Cross CPU call to disable a performance counter
506 */
507static void __perf_counter_disable(void *info)
508{
509 struct perf_counter *counter = info;
510 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
511 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100512
513 /*
514 * If this is a per-task counter, need to check whether this
515 * counter's task is the current task on this cpu.
516 */
Peter Zijlstra665c2142009-05-29 14:51:57 +0200517 if (ctx->task && cpuctx->task_ctx != ctx)
Paul Mackerrasd859e292009-01-17 18:10:22 +1100518 return;
519
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200520 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100521
522 /*
523 * If the counter is on, turn it off.
524 * If it is in error state, leave it in error state.
525 */
526 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
Peter Zijlstra4af49982009-04-06 11:45:10 +0200527 update_context_time(ctx);
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000528 update_group_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100529 if (counter == counter->group_leader)
530 group_sched_out(counter, cpuctx, ctx);
531 else
532 counter_sched_out(counter, cpuctx, ctx);
533 counter->state = PERF_COUNTER_STATE_OFF;
534 }
535
Peter Zijlstra665c2142009-05-29 14:51:57 +0200536 spin_unlock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100537}
538
539/*
540 * Disable a counter.
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000541 *
542 * If counter->ctx is a cloned context, callers must make sure that
543 * every task struct that counter->ctx->task could possibly point to
544 * remains valid. This condition is satisifed when called through
545 * perf_counter_for_each_child or perf_counter_for_each because they
546 * hold the top-level counter's child_mutex, so any descendant that
547 * goes to exit will block in sync_child_counter.
548 * When called from perf_pending_counter it's OK because counter->ctx
549 * is the current context on this CPU and preemption is disabled,
550 * hence we can't get into perf_counter_task_sched_out for this context.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100551 */
552static void perf_counter_disable(struct perf_counter *counter)
553{
554 struct perf_counter_context *ctx = counter->ctx;
555 struct task_struct *task = ctx->task;
556
557 if (!task) {
558 /*
559 * Disable the counter on the cpu that it's on
560 */
561 smp_call_function_single(counter->cpu, __perf_counter_disable,
562 counter, 1);
563 return;
564 }
565
566 retry:
567 task_oncpu_function_call(task, __perf_counter_disable, counter);
568
569 spin_lock_irq(&ctx->lock);
570 /*
571 * If the counter is still active, we need to retry the cross-call.
572 */
573 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
574 spin_unlock_irq(&ctx->lock);
575 goto retry;
576 }
577
578 /*
579 * Since we have the lock this context can't be scheduled
580 * in, so we can change the state safely.
581 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100582 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000583 update_group_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100584 counter->state = PERF_COUNTER_STATE_OFF;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100585 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100586
587 spin_unlock_irq(&ctx->lock);
588}
589
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100590static int
591counter_sched_in(struct perf_counter *counter,
592 struct perf_cpu_context *cpuctx,
593 struct perf_counter_context *ctx,
594 int cpu)
595{
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100596 if (counter->state <= PERF_COUNTER_STATE_OFF)
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100597 return 0;
598
599 counter->state = PERF_COUNTER_STATE_ACTIVE;
600 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
601 /*
602 * The new state must be visible before we turn it on in the hardware:
603 */
604 smp_wmb();
605
Robert Richter4aeb0b42009-04-29 12:47:03 +0200606 if (counter->pmu->enable(counter)) {
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100607 counter->state = PERF_COUNTER_STATE_INACTIVE;
608 counter->oncpu = -1;
609 return -EAGAIN;
610 }
611
Peter Zijlstra4af49982009-04-06 11:45:10 +0200612 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100613
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100614 if (!is_software_counter(counter))
615 cpuctx->active_oncpu++;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100616 ctx->nr_active++;
617
Peter Zijlstra0d486962009-06-02 19:22:16 +0200618 if (counter->attr.exclusive)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100619 cpuctx->exclusive = 1;
620
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100621 return 0;
622}
623
Paul Mackerras6751b712009-05-11 12:08:02 +1000624static int
625group_sched_in(struct perf_counter *group_counter,
626 struct perf_cpu_context *cpuctx,
627 struct perf_counter_context *ctx,
628 int cpu)
629{
630 struct perf_counter *counter, *partial_group;
631 int ret;
632
633 if (group_counter->state == PERF_COUNTER_STATE_OFF)
634 return 0;
635
636 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
637 if (ret)
638 return ret < 0 ? ret : 0;
639
Paul Mackerras6751b712009-05-11 12:08:02 +1000640 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
641 return -EAGAIN;
642
643 /*
644 * Schedule in siblings as one group (if any):
645 */
646 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
Paul Mackerras6751b712009-05-11 12:08:02 +1000647 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
648 partial_group = counter;
649 goto group_error;
650 }
651 }
652
653 return 0;
654
655group_error:
656 /*
657 * Groups can be scheduled in as one unit only, so undo any
658 * partial group before returning:
659 */
660 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
661 if (counter == partial_group)
662 break;
663 counter_sched_out(counter, cpuctx, ctx);
664 }
665 counter_sched_out(group_counter, cpuctx, ctx);
666
667 return -EAGAIN;
668}
669
Thomas Gleixner0793a612008-12-04 20:12:29 +0100670/*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100671 * Return 1 for a group consisting entirely of software counters,
672 * 0 if the group contains any hardware counters.
673 */
674static int is_software_only_group(struct perf_counter *leader)
675{
676 struct perf_counter *counter;
677
678 if (!is_software_counter(leader))
679 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100680
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100681 list_for_each_entry(counter, &leader->sibling_list, list_entry)
682 if (!is_software_counter(counter))
683 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100684
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100685 return 1;
686}
687
688/*
689 * Work out whether we can put this counter group on the CPU now.
690 */
691static int group_can_go_on(struct perf_counter *counter,
692 struct perf_cpu_context *cpuctx,
693 int can_add_hw)
694{
695 /*
696 * Groups consisting entirely of software counters can always go on.
697 */
698 if (is_software_only_group(counter))
699 return 1;
700 /*
701 * If an exclusive group is already on, no other hardware
702 * counters can go on.
703 */
704 if (cpuctx->exclusive)
705 return 0;
706 /*
707 * If this group is exclusive and there are already
708 * counters on the CPU, it can't go on.
709 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200710 if (counter->attr.exclusive && cpuctx->active_oncpu)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100711 return 0;
712 /*
713 * Otherwise, try to add it if all previous groups were able
714 * to go on.
715 */
716 return can_add_hw;
717}
718
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100719static void add_counter_to_ctx(struct perf_counter *counter,
720 struct perf_counter_context *ctx)
721{
722 list_add_counter(counter, ctx);
Peter Zijlstra4af49982009-04-06 11:45:10 +0200723 counter->tstamp_enabled = ctx->time;
724 counter->tstamp_running = ctx->time;
725 counter->tstamp_stopped = ctx->time;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100726}
727
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100728/*
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100729 * Cross CPU call to install and enable a performance counter
Peter Zijlstra682076a2009-05-23 18:28:57 +0200730 *
731 * Must be called with ctx->mutex held
Thomas Gleixner0793a612008-12-04 20:12:29 +0100732 */
733static void __perf_install_in_context(void *info)
734{
735 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
736 struct perf_counter *counter = info;
737 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100738 struct perf_counter *leader = counter->group_leader;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100739 int cpu = smp_processor_id();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100740 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100741
742 /*
743 * If this is a task context, we need to check whether it is
744 * the current task context of this cpu. If not it has been
745 * scheduled out before the smp call arrived.
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000746 * Or possibly this is the right context but it isn't
747 * on this cpu because it had no counters.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100748 */
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000749 if (ctx->task && cpuctx->task_ctx != ctx) {
Peter Zijlstra665c2142009-05-29 14:51:57 +0200750 if (cpuctx->task_ctx || ctx->task != current)
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000751 return;
752 cpuctx->task_ctx = ctx;
753 }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100754
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200755 spin_lock(&ctx->lock);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000756 ctx->is_active = 1;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200757 update_context_time(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100758
759 /*
760 * Protect the list operation against NMI by disabling the
761 * counters on a global level. NOP for non NMI based counters.
762 */
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200763 perf_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100764
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100765 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100766
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100767 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100768 * Don't put the counter on if it is disabled or if
769 * it is in a group and the group isn't on.
770 */
771 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
772 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
773 goto unlock;
774
775 /*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100776 * An exclusive counter can't go on if there are already active
777 * hardware counters, and no hardware counter can go on if there
778 * is already an exclusive counter on.
779 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100780 if (!group_can_go_on(counter, cpuctx, 1))
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100781 err = -EEXIST;
782 else
783 err = counter_sched_in(counter, cpuctx, ctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100784
Paul Mackerrasd859e292009-01-17 18:10:22 +1100785 if (err) {
786 /*
787 * This counter couldn't go on. If it is in a group
788 * then we have to pull the whole group off.
789 * If the counter group is pinned then put it in error state.
790 */
791 if (leader != counter)
792 group_sched_out(leader, cpuctx, ctx);
Peter Zijlstra0d486962009-06-02 19:22:16 +0200793 if (leader->attr.pinned) {
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100794 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100795 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100796 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100797 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100798
799 if (!err && !ctx->task && cpuctx->max_pertask)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100800 cpuctx->max_pertask--;
801
Paul Mackerrasd859e292009-01-17 18:10:22 +1100802 unlock:
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200803 perf_enable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100804
Peter Zijlstra665c2142009-05-29 14:51:57 +0200805 spin_unlock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100806}
807
808/*
809 * Attach a performance counter to a context
810 *
811 * First we add the counter to the list with the hardware enable bit
812 * in counter->hw_config cleared.
813 *
814 * If the counter is attached to a task which is on a CPU we use a smp
815 * call to enable it in the task context. The task might have been
816 * scheduled away, but we check this in the smp call again.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100817 *
818 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100819 */
820static void
821perf_install_in_context(struct perf_counter_context *ctx,
822 struct perf_counter *counter,
823 int cpu)
824{
825 struct task_struct *task = ctx->task;
826
Thomas Gleixner0793a612008-12-04 20:12:29 +0100827 if (!task) {
828 /*
829 * Per cpu counters are installed via an smp call and
830 * the install is always sucessful.
831 */
832 smp_call_function_single(cpu, __perf_install_in_context,
833 counter, 1);
834 return;
835 }
836
Thomas Gleixner0793a612008-12-04 20:12:29 +0100837retry:
838 task_oncpu_function_call(task, __perf_install_in_context,
839 counter);
840
841 spin_lock_irq(&ctx->lock);
842 /*
Thomas Gleixner0793a612008-12-04 20:12:29 +0100843 * we need to retry the smp call.
844 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100845 if (ctx->is_active && list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100846 spin_unlock_irq(&ctx->lock);
847 goto retry;
848 }
849
850 /*
851 * The lock prevents that this context is scheduled in so we
852 * can add the counter safely, if it the call above did not
853 * succeed.
854 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100855 if (list_empty(&counter->list_entry))
856 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100857 spin_unlock_irq(&ctx->lock);
858}
859
Paul Mackerrasd859e292009-01-17 18:10:22 +1100860/*
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000861 * Put a counter into inactive state and update time fields.
862 * Enabling the leader of a group effectively enables all
863 * the group members that aren't explicitly disabled, so we
864 * have to update their ->tstamp_enabled also.
865 * Note: this works for group members as well as group leaders
866 * since the non-leader members' sibling_lists will be empty.
867 */
868static void __perf_counter_mark_enabled(struct perf_counter *counter,
869 struct perf_counter_context *ctx)
870{
871 struct perf_counter *sub;
872
873 counter->state = PERF_COUNTER_STATE_INACTIVE;
874 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
875 list_for_each_entry(sub, &counter->sibling_list, list_entry)
876 if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
877 sub->tstamp_enabled =
878 ctx->time - sub->total_time_enabled;
879}
880
881/*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100882 * Cross CPU call to enable a performance counter
883 */
884static void __perf_counter_enable(void *info)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100885{
Paul Mackerrasd859e292009-01-17 18:10:22 +1100886 struct perf_counter *counter = info;
887 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
888 struct perf_counter_context *ctx = counter->ctx;
889 struct perf_counter *leader = counter->group_leader;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100890 int err;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100891
892 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100893 * If this is a per-task counter, need to check whether this
894 * counter's task is the current task on this cpu.
Ingo Molnar04289bb2008-12-11 08:38:42 +0100895 */
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000896 if (ctx->task && cpuctx->task_ctx != ctx) {
Peter Zijlstra665c2142009-05-29 14:51:57 +0200897 if (cpuctx->task_ctx || ctx->task != current)
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000898 return;
899 cpuctx->task_ctx = ctx;
900 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100901
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200902 spin_lock(&ctx->lock);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000903 ctx->is_active = 1;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200904 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100905
906 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
907 goto unlock;
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000908 __perf_counter_mark_enabled(counter, ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100909
910 /*
911 * If the counter is in a group and isn't the group leader,
912 * then don't put it on unless the group is on.
913 */
914 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
915 goto unlock;
916
Paul Mackerrase758a332009-05-12 21:59:01 +1000917 if (!group_can_go_on(counter, cpuctx, 1)) {
Paul Mackerrasd859e292009-01-17 18:10:22 +1100918 err = -EEXIST;
Paul Mackerrase758a332009-05-12 21:59:01 +1000919 } else {
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200920 perf_disable();
Paul Mackerrase758a332009-05-12 21:59:01 +1000921 if (counter == leader)
922 err = group_sched_in(counter, cpuctx, ctx,
923 smp_processor_id());
924 else
925 err = counter_sched_in(counter, cpuctx, ctx,
926 smp_processor_id());
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200927 perf_enable();
Paul Mackerrase758a332009-05-12 21:59:01 +1000928 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100929
930 if (err) {
931 /*
932 * If this counter can't go on and it's part of a
933 * group, then the whole group has to come off.
934 */
935 if (leader != counter)
936 group_sched_out(leader, cpuctx, ctx);
Peter Zijlstra0d486962009-06-02 19:22:16 +0200937 if (leader->attr.pinned) {
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100938 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100939 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100940 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100941 }
942
943 unlock:
Peter Zijlstra665c2142009-05-29 14:51:57 +0200944 spin_unlock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100945}
946
947/*
948 * Enable a counter.
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000949 *
950 * If counter->ctx is a cloned context, callers must make sure that
951 * every task struct that counter->ctx->task could possibly point to
952 * remains valid. This condition is satisfied when called through
953 * perf_counter_for_each_child or perf_counter_for_each as described
954 * for perf_counter_disable.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100955 */
956static void perf_counter_enable(struct perf_counter *counter)
957{
958 struct perf_counter_context *ctx = counter->ctx;
959 struct task_struct *task = ctx->task;
960
961 if (!task) {
962 /*
963 * Enable the counter on the cpu that it's on
964 */
965 smp_call_function_single(counter->cpu, __perf_counter_enable,
966 counter, 1);
967 return;
968 }
969
970 spin_lock_irq(&ctx->lock);
971 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
972 goto out;
973
974 /*
975 * If the counter is in error state, clear that first.
976 * That way, if we see the counter in error state below, we
977 * know that it has gone back into error state, as distinct
978 * from the task having been scheduled away before the
979 * cross-call arrived.
980 */
981 if (counter->state == PERF_COUNTER_STATE_ERROR)
982 counter->state = PERF_COUNTER_STATE_OFF;
983
984 retry:
985 spin_unlock_irq(&ctx->lock);
986 task_oncpu_function_call(task, __perf_counter_enable, counter);
987
988 spin_lock_irq(&ctx->lock);
989
990 /*
991 * If the context is active and the counter is still off,
992 * we need to retry the cross-call.
993 */
994 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
995 goto retry;
996
997 /*
998 * Since we have the lock this context can't be scheduled
999 * in, so we can change the state safely.
1000 */
Paul Mackerrasfa289be2009-08-25 15:17:20 +10001001 if (counter->state == PERF_COUNTER_STATE_OFF)
1002 __perf_counter_mark_enabled(counter, ctx);
1003
Paul Mackerrasd859e292009-01-17 18:10:22 +11001004 out:
1005 spin_unlock_irq(&ctx->lock);
1006}
1007
Peter Zijlstra2023b352009-05-05 17:50:26 +02001008static int perf_counter_refresh(struct perf_counter *counter, int refresh)
Peter Zijlstra79f14642009-04-06 11:45:07 +02001009{
Peter Zijlstra2023b352009-05-05 17:50:26 +02001010 /*
1011 * not supported on inherited counters
1012 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02001013 if (counter->attr.inherit)
Peter Zijlstra2023b352009-05-05 17:50:26 +02001014 return -EINVAL;
1015
Peter Zijlstra79f14642009-04-06 11:45:07 +02001016 atomic_add(refresh, &counter->event_limit);
1017 perf_counter_enable(counter);
Peter Zijlstra2023b352009-05-05 17:50:26 +02001018
1019 return 0;
Peter Zijlstra79f14642009-04-06 11:45:07 +02001020}
1021
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001022void __perf_counter_sched_out(struct perf_counter_context *ctx,
1023 struct perf_cpu_context *cpuctx)
1024{
1025 struct perf_counter *counter;
1026
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001027 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001028 ctx->is_active = 0;
1029 if (likely(!ctx->nr_counters))
1030 goto out;
Peter Zijlstra4af49982009-04-06 11:45:10 +02001031 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001032
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001033 perf_disable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001034 if (ctx->nr_active) {
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001035 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1036 if (counter != counter->group_leader)
1037 counter_sched_out(counter, cpuctx, ctx);
1038 else
1039 group_sched_out(counter, cpuctx, ctx);
1040 }
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001041 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001042 perf_enable();
Paul Mackerrasd859e292009-01-17 18:10:22 +11001043 out:
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001044 spin_unlock(&ctx->lock);
1045}
1046
Thomas Gleixner0793a612008-12-04 20:12:29 +01001047/*
Paul Mackerras564c2b22009-05-22 14:27:22 +10001048 * Test whether two contexts are equivalent, i.e. whether they
1049 * have both been cloned from the same version of the same context
1050 * and they both have the same number of enabled counters.
1051 * If the number of enabled counters is the same, then the set
1052 * of enabled counters should be the same, because these are both
1053 * inherited contexts, therefore we can't access individual counters
1054 * in them directly with an fd; we can only enable/disable all
1055 * counters via prctl, or enable/disable all counters in a family
1056 * via ioctl, which will have the same effect on both contexts.
1057 */
1058static int context_equiv(struct perf_counter_context *ctx1,
1059 struct perf_counter_context *ctx2)
1060{
1061 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001062 && ctx1->parent_gen == ctx2->parent_gen
Paul Mackerras25346b932009-06-01 17:48:12 +10001063 && !ctx1->pin_count && !ctx2->pin_count;
Paul Mackerras564c2b22009-05-22 14:27:22 +10001064}
1065
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001066static void __perf_counter_read(void *counter);
1067
1068static void __perf_counter_sync_stat(struct perf_counter *counter,
1069 struct perf_counter *next_counter)
1070{
1071 u64 value;
1072
1073 if (!counter->attr.inherit_stat)
1074 return;
1075
1076 /*
1077 * Update the counter value, we cannot use perf_counter_read()
1078 * because we're in the middle of a context switch and have IRQs
1079 * disabled, which upsets smp_call_function_single(), however
1080 * we know the counter must be on the current CPU, therefore we
1081 * don't need to use it.
1082 */
1083 switch (counter->state) {
1084 case PERF_COUNTER_STATE_ACTIVE:
1085 __perf_counter_read(counter);
1086 break;
1087
1088 case PERF_COUNTER_STATE_INACTIVE:
1089 update_counter_times(counter);
1090 break;
1091
1092 default:
1093 break;
1094 }
1095
1096 /*
1097 * In order to keep per-task stats reliable we need to flip the counter
1098 * values when we flip the contexts.
1099 */
1100 value = atomic64_read(&next_counter->count);
1101 value = atomic64_xchg(&counter->count, value);
1102 atomic64_set(&next_counter->count, value);
1103
Peter Zijlstra19d2e752009-06-26 13:10:23 +02001104 swap(counter->total_time_enabled, next_counter->total_time_enabled);
1105 swap(counter->total_time_running, next_counter->total_time_running);
1106
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001107 /*
Peter Zijlstra19d2e752009-06-26 13:10:23 +02001108 * Since we swizzled the values, update the user visible data too.
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001109 */
Peter Zijlstra19d2e752009-06-26 13:10:23 +02001110 perf_counter_update_userpage(counter);
1111 perf_counter_update_userpage(next_counter);
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001112}
1113
1114#define list_next_entry(pos, member) \
1115 list_entry(pos->member.next, typeof(*pos), member)
1116
1117static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1118 struct perf_counter_context *next_ctx)
1119{
1120 struct perf_counter *counter, *next_counter;
1121
1122 if (!ctx->nr_stat)
1123 return;
1124
1125 counter = list_first_entry(&ctx->event_list,
1126 struct perf_counter, event_entry);
1127
1128 next_counter = list_first_entry(&next_ctx->event_list,
1129 struct perf_counter, event_entry);
1130
1131 while (&counter->event_entry != &ctx->event_list &&
1132 &next_counter->event_entry != &next_ctx->event_list) {
1133
1134 __perf_counter_sync_stat(counter, next_counter);
1135
1136 counter = list_next_entry(counter, event_entry);
Peter Zijlstra10545982009-08-06 18:06:26 +02001137 next_counter = list_next_entry(next_counter, event_entry);
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001138 }
1139}
1140
Paul Mackerras564c2b22009-05-22 14:27:22 +10001141/*
Thomas Gleixner0793a612008-12-04 20:12:29 +01001142 * Called from scheduler to remove the counters of the current task,
1143 * with interrupts disabled.
1144 *
1145 * We stop each counter and update the counter value in counter->count.
1146 *
Ingo Molnar76715812008-12-17 14:20:28 +01001147 * This does not protect us against NMI, but disable()
Thomas Gleixner0793a612008-12-04 20:12:29 +01001148 * sets the disabled bit in the control field of counter _before_
1149 * accessing the counter control register. If a NMI hits, then it will
1150 * not restart the counter.
1151 */
Paul Mackerras564c2b22009-05-22 14:27:22 +10001152void perf_counter_task_sched_out(struct task_struct *task,
1153 struct task_struct *next, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001154{
1155 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001156 struct perf_counter_context *ctx = task->perf_counter_ctxp;
Paul Mackerras564c2b22009-05-22 14:27:22 +10001157 struct perf_counter_context *next_ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001158 struct perf_counter_context *parent;
Peter Zijlstra4a0deca2009-03-19 20:26:12 +01001159 struct pt_regs *regs;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001160 int do_switch = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001161
Peter Zijlstra10989fb2009-05-25 14:45:28 +02001162 regs = task_pt_regs(task);
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02001163 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
Peter Zijlstra10989fb2009-05-25 14:45:28 +02001164
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001165 if (likely(!ctx || !cpuctx->task_ctx))
Thomas Gleixner0793a612008-12-04 20:12:29 +01001166 return;
1167
Peter Zijlstrabce379b2009-04-06 11:45:13 +02001168 update_context_time(ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001169
1170 rcu_read_lock();
1171 parent = rcu_dereference(ctx->parent_ctx);
Paul Mackerras564c2b22009-05-22 14:27:22 +10001172 next_ctx = next->perf_counter_ctxp;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001173 if (parent && next_ctx &&
1174 rcu_dereference(next_ctx->parent_ctx) == parent) {
1175 /*
1176 * Looks like the two contexts are clones, so we might be
1177 * able to optimize the context switch. We lock both
1178 * contexts and check that they are clones under the
1179 * lock (including re-checking that neither has been
1180 * uncloned in the meantime). It doesn't matter which
1181 * order we take the locks because no other cpu could
1182 * be trying to lock both of these tasks.
1183 */
1184 spin_lock(&ctx->lock);
1185 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1186 if (context_equiv(ctx, next_ctx)) {
Peter Zijlstra665c2142009-05-29 14:51:57 +02001187 /*
1188 * XXX do we need a memory barrier of sorts
1189 * wrt to rcu_dereference() of perf_counter_ctxp
1190 */
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001191 task->perf_counter_ctxp = next_ctx;
1192 next->perf_counter_ctxp = ctx;
1193 ctx->task = next;
1194 next_ctx->task = task;
1195 do_switch = 0;
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001196
1197 perf_counter_sync_stat(ctx, next_ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001198 }
1199 spin_unlock(&next_ctx->lock);
1200 spin_unlock(&ctx->lock);
Paul Mackerras564c2b22009-05-22 14:27:22 +10001201 }
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001202 rcu_read_unlock();
Paul Mackerras564c2b22009-05-22 14:27:22 +10001203
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001204 if (do_switch) {
1205 __perf_counter_sched_out(ctx, cpuctx);
1206 cpuctx->task_ctx = NULL;
1207 }
Thomas Gleixner0793a612008-12-04 20:12:29 +01001208}
1209
Peter Zijlstra665c2142009-05-29 14:51:57 +02001210/*
1211 * Called with IRQs disabled
1212 */
Paul Mackerrasa08b1592009-05-11 15:46:10 +10001213static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1214{
1215 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1216
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001217 if (!cpuctx->task_ctx)
1218 return;
Ingo Molnar012b84d2009-05-17 11:08:41 +02001219
1220 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1221 return;
1222
Paul Mackerrasa08b1592009-05-11 15:46:10 +10001223 __perf_counter_sched_out(ctx, cpuctx);
1224 cpuctx->task_ctx = NULL;
1225}
1226
Peter Zijlstra665c2142009-05-29 14:51:57 +02001227/*
1228 * Called with IRQs disabled
1229 */
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001230static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
Ingo Molnar04289bb2008-12-11 08:38:42 +01001231{
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001232 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001233}
1234
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001235static void
1236__perf_counter_sched_in(struct perf_counter_context *ctx,
1237 struct perf_cpu_context *cpuctx, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001238{
Thomas Gleixner0793a612008-12-04 20:12:29 +01001239 struct perf_counter *counter;
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +11001240 int can_add_hw = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001241
Thomas Gleixner0793a612008-12-04 20:12:29 +01001242 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001243 ctx->is_active = 1;
1244 if (likely(!ctx->nr_counters))
1245 goto out;
1246
Peter Zijlstra4af49982009-04-06 11:45:10 +02001247 ctx->timestamp = perf_clock();
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001248
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001249 perf_disable();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001250
1251 /*
1252 * First go through the list and put on any pinned groups
1253 * in order to give them the best chance of going on.
1254 */
Ingo Molnar04289bb2008-12-11 08:38:42 +01001255 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001256 if (counter->state <= PERF_COUNTER_STATE_OFF ||
Peter Zijlstra0d486962009-06-02 19:22:16 +02001257 !counter->attr.pinned)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001258 continue;
1259 if (counter->cpu != -1 && counter->cpu != cpu)
1260 continue;
1261
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001262 if (counter != counter->group_leader)
1263 counter_sched_in(counter, cpuctx, ctx, cpu);
1264 else {
1265 if (group_can_go_on(counter, cpuctx, 1))
1266 group_sched_in(counter, cpuctx, ctx, cpu);
1267 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001268
1269 /*
1270 * If this pinned group hasn't been scheduled,
1271 * put it in error state.
1272 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001273 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1274 update_group_times(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001275 counter->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001276 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001277 }
1278
1279 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1280 /*
1281 * Ignore counters in OFF or ERROR state, and
1282 * ignore pinned counters since we did them already.
1283 */
1284 if (counter->state <= PERF_COUNTER_STATE_OFF ||
Peter Zijlstra0d486962009-06-02 19:22:16 +02001285 counter->attr.pinned)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001286 continue;
1287
Ingo Molnar04289bb2008-12-11 08:38:42 +01001288 /*
1289 * Listen to the 'cpu' scheduling filter constraint
1290 * of counters:
1291 */
Thomas Gleixner0793a612008-12-04 20:12:29 +01001292 if (counter->cpu != -1 && counter->cpu != cpu)
1293 continue;
1294
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001295 if (counter != counter->group_leader) {
1296 if (counter_sched_in(counter, cpuctx, ctx, cpu))
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +11001297 can_add_hw = 0;
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001298 } else {
1299 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1300 if (group_sched_in(counter, cpuctx, ctx, cpu))
1301 can_add_hw = 0;
1302 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001303 }
Thomas Gleixner0793a612008-12-04 20:12:29 +01001304 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001305 perf_enable();
Paul Mackerrasd859e292009-01-17 18:10:22 +11001306 out:
Thomas Gleixner0793a612008-12-04 20:12:29 +01001307 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001308}
Ingo Molnar04289bb2008-12-11 08:38:42 +01001309
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001310/*
1311 * Called from scheduler to add the counters of the current task
1312 * with interrupts disabled.
1313 *
1314 * We restore the counter value and then enable it.
1315 *
1316 * This does not protect us against NMI, but enable()
1317 * sets the enabled bit in the control field of counter _before_
1318 * accessing the counter control register. If a NMI hits, then it will
1319 * keep the counter running.
1320 */
1321void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1322{
1323 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001324 struct perf_counter_context *ctx = task->perf_counter_ctxp;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001325
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001326 if (likely(!ctx))
1327 return;
Paul Mackerras564c2b22009-05-22 14:27:22 +10001328 if (cpuctx->task_ctx == ctx)
1329 return;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001330 __perf_counter_sched_in(ctx, cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001331 cpuctx->task_ctx = ctx;
1332}
1333
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001334static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1335{
1336 struct perf_counter_context *ctx = &cpuctx->ctx;
1337
1338 __perf_counter_sched_in(ctx, cpuctx, cpu);
1339}
1340
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001341#define MAX_INTERRUPTS (~0ULL)
1342
1343static void perf_log_throttle(struct perf_counter *counter, int enable);
Peter Zijlstra26b119b2009-05-20 12:21:20 +02001344
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001345static void perf_adjust_period(struct perf_counter *counter, u64 events)
1346{
1347 struct hw_perf_counter *hwc = &counter->hw;
1348 u64 period, sample_period;
1349 s64 delta;
1350
1351 events *= hwc->sample_period;
1352 period = div64_u64(events, counter->attr.sample_freq);
1353
1354 delta = (s64)(period - hwc->sample_period);
1355 delta = (delta + 7) / 8; /* low pass filter */
1356
1357 sample_period = hwc->sample_period + delta;
1358
1359 if (!sample_period)
1360 sample_period = 1;
1361
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001362 hwc->sample_period = sample_period;
1363}
1364
1365static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001366{
1367 struct perf_counter *counter;
Peter Zijlstra6a24ed6c2009-06-05 18:01:29 +02001368 struct hw_perf_counter *hwc;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001369 u64 interrupts, freq;
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001370
1371 spin_lock(&ctx->lock);
1372 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1373 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1374 continue;
1375
Peter Zijlstra6a24ed6c2009-06-05 18:01:29 +02001376 hwc = &counter->hw;
1377
1378 interrupts = hwc->interrupts;
1379 hwc->interrupts = 0;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001380
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001381 /*
1382 * unthrottle counters on the tick
1383 */
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001384 if (interrupts == MAX_INTERRUPTS) {
1385 perf_log_throttle(counter, 1);
1386 counter->pmu->unthrottle(counter);
Peter Zijlstradf58ab22009-06-11 11:25:05 +02001387 interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001388 }
1389
Peter Zijlstra0d486962009-06-02 19:22:16 +02001390 if (!counter->attr.freq || !counter->attr.sample_freq)
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001391 continue;
1392
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001393 /*
1394 * if the specified freq < HZ then we need to skip ticks
1395 */
Peter Zijlstra6a24ed6c2009-06-05 18:01:29 +02001396 if (counter->attr.sample_freq < HZ) {
1397 freq = counter->attr.sample_freq;
1398
1399 hwc->freq_count += freq;
1400 hwc->freq_interrupts += interrupts;
1401
1402 if (hwc->freq_count < HZ)
1403 continue;
1404
1405 interrupts = hwc->freq_interrupts;
1406 hwc->freq_interrupts = 0;
1407 hwc->freq_count -= HZ;
1408 } else
1409 freq = HZ;
1410
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001411 perf_adjust_period(counter, freq * interrupts);
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001412
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001413 /*
1414 * In order to avoid being stalled by an (accidental) huge
1415 * sample period, force reset the sample period if we didn't
1416 * get any events in this freq period.
1417 */
1418 if (!interrupts) {
1419 perf_disable();
1420 counter->pmu->disable(counter);
Paul Mackerras87847b82009-06-13 17:06:50 +10001421 atomic64_set(&hwc->period_left, 0);
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001422 counter->pmu->enable(counter);
1423 perf_enable();
1424 }
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001425 }
1426 spin_unlock(&ctx->lock);
1427}
1428
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001429/*
1430 * Round-robin a context's counters:
1431 */
1432static void rotate_ctx(struct perf_counter_context *ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001433{
Thomas Gleixner0793a612008-12-04 20:12:29 +01001434 struct perf_counter *counter;
1435
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001436 if (!ctx->nr_counters)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001437 return;
1438
Thomas Gleixner0793a612008-12-04 20:12:29 +01001439 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001440 /*
Ingo Molnar04289bb2008-12-11 08:38:42 +01001441 * Rotate the first entry last (works just fine for group counters too):
Thomas Gleixner0793a612008-12-04 20:12:29 +01001442 */
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001443 perf_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +01001444 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Peter Zijlstra75564232009-03-13 12:21:29 +01001445 list_move_tail(&counter->list_entry, &ctx->counter_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001446 break;
1447 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001448 perf_enable();
Thomas Gleixner0793a612008-12-04 20:12:29 +01001449
1450 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001451}
Thomas Gleixner0793a612008-12-04 20:12:29 +01001452
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001453void perf_counter_task_tick(struct task_struct *curr, int cpu)
1454{
Peter Zijlstra7fc23a52009-05-08 18:52:21 +02001455 struct perf_cpu_context *cpuctx;
1456 struct perf_counter_context *ctx;
1457
1458 if (!atomic_read(&nr_counters))
1459 return;
1460
1461 cpuctx = &per_cpu(perf_cpu_context, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001462 ctx = curr->perf_counter_ctxp;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001463
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001464 perf_ctx_adjust_freq(&cpuctx->ctx);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001465 if (ctx)
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001466 perf_ctx_adjust_freq(ctx);
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001467
Ingo Molnarb82914c2009-05-04 18:54:32 +02001468 perf_counter_cpu_sched_out(cpuctx);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001469 if (ctx)
1470 __perf_counter_task_sched_out(ctx);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001471
Ingo Molnarb82914c2009-05-04 18:54:32 +02001472 rotate_ctx(&cpuctx->ctx);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001473 if (ctx)
1474 rotate_ctx(ctx);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001475
Ingo Molnarb82914c2009-05-04 18:54:32 +02001476 perf_counter_cpu_sched_in(cpuctx, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001477 if (ctx)
1478 perf_counter_task_sched_in(curr, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001479}
1480
1481/*
Paul Mackerras57e79862009-06-30 16:07:19 +10001482 * Enable all of a task's counters that have been marked enable-on-exec.
1483 * This expects task == current.
1484 */
1485static void perf_counter_enable_on_exec(struct task_struct *task)
1486{
1487 struct perf_counter_context *ctx;
1488 struct perf_counter *counter;
1489 unsigned long flags;
1490 int enabled = 0;
1491
1492 local_irq_save(flags);
1493 ctx = task->perf_counter_ctxp;
1494 if (!ctx || !ctx->nr_counters)
1495 goto out;
1496
1497 __perf_counter_task_sched_out(ctx);
1498
1499 spin_lock(&ctx->lock);
1500
1501 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1502 if (!counter->attr.enable_on_exec)
1503 continue;
1504 counter->attr.enable_on_exec = 0;
1505 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
1506 continue;
Paul Mackerrasfa289be2009-08-25 15:17:20 +10001507 __perf_counter_mark_enabled(counter, ctx);
Paul Mackerras57e79862009-06-30 16:07:19 +10001508 enabled = 1;
1509 }
1510
1511 /*
1512 * Unclone this context if we enabled any counter.
1513 */
Peter Zijlstra71a851b2009-07-10 09:06:56 +02001514 if (enabled)
1515 unclone_ctx(ctx);
Paul Mackerras57e79862009-06-30 16:07:19 +10001516
1517 spin_unlock(&ctx->lock);
1518
1519 perf_counter_task_sched_in(task, smp_processor_id());
1520 out:
1521 local_irq_restore(flags);
1522}
1523
1524/*
Thomas Gleixner0793a612008-12-04 20:12:29 +01001525 * Cross CPU call to read the hardware counter
1526 */
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001527static void __perf_counter_read(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001528{
Paul Mackerrase1ac3612009-08-14 15:39:10 +10001529 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001530 struct perf_counter *counter = info;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001531 struct perf_counter_context *ctx = counter->ctx;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001532 unsigned long flags;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001533
Paul Mackerrase1ac3612009-08-14 15:39:10 +10001534 /*
1535 * If this is a task context, we need to check whether it is
1536 * the current task context of this cpu. If not it has been
1537 * scheduled out before the smp call arrived. In that case
1538 * counter->count would have been updated to a recent sample
1539 * when the counter was scheduled out.
1540 */
1541 if (ctx->task && cpuctx->task_ctx != ctx)
1542 return;
1543
Peter Zijlstra849691a2009-04-06 11:45:12 +02001544 local_irq_save(flags);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001545 if (ctx->is_active)
Peter Zijlstra4af49982009-04-06 11:45:10 +02001546 update_context_time(ctx);
Robert Richter4aeb0b42009-04-29 12:47:03 +02001547 counter->pmu->read(counter);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001548 update_counter_times(counter);
Peter Zijlstra849691a2009-04-06 11:45:12 +02001549 local_irq_restore(flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001550}
1551
Ingo Molnar04289bb2008-12-11 08:38:42 +01001552static u64 perf_counter_read(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001553{
1554 /*
1555 * If counter is enabled and currently active on a CPU, update the
1556 * value in the counter structure:
1557 */
Ingo Molnar6a930702008-12-11 15:17:03 +01001558 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001559 smp_call_function_single(counter->oncpu,
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001560 __perf_counter_read, counter, 1);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001561 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1562 update_counter_times(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001563 }
1564
Ingo Molnaree060942008-12-13 09:00:03 +01001565 return atomic64_read(&counter->count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001566}
1567
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001568/*
1569 * Initialize the perf_counter context in a task_struct:
1570 */
1571static void
1572__perf_counter_init_context(struct perf_counter_context *ctx,
1573 struct task_struct *task)
1574{
1575 memset(ctx, 0, sizeof(*ctx));
1576 spin_lock_init(&ctx->lock);
1577 mutex_init(&ctx->mutex);
1578 INIT_LIST_HEAD(&ctx->counter_list);
1579 INIT_LIST_HEAD(&ctx->event_list);
1580 atomic_set(&ctx->refcount, 1);
1581 ctx->task = task;
1582}
1583
Thomas Gleixner0793a612008-12-04 20:12:29 +01001584static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1585{
Ingo Molnar22a4f652009-06-01 10:13:37 +02001586 struct perf_counter_context *ctx;
1587 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001588 struct task_struct *task;
Paul Mackerras25346b932009-06-01 17:48:12 +10001589 unsigned long flags;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001590 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001591
1592 /*
1593 * If cpu is not a wildcard then this is a percpu counter:
1594 */
1595 if (cpu != -1) {
1596 /* Must be root to operate on a CPU counter: */
Peter Zijlstra07647712009-06-11 11:18:36 +02001597 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
Thomas Gleixner0793a612008-12-04 20:12:29 +01001598 return ERR_PTR(-EACCES);
1599
1600 if (cpu < 0 || cpu > num_possible_cpus())
1601 return ERR_PTR(-EINVAL);
1602
1603 /*
1604 * We could be clever and allow to attach a counter to an
1605 * offline CPU and activate it when the CPU comes up, but
1606 * that's for later.
1607 */
1608 if (!cpu_isset(cpu, cpu_online_map))
1609 return ERR_PTR(-ENODEV);
1610
1611 cpuctx = &per_cpu(perf_cpu_context, cpu);
1612 ctx = &cpuctx->ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001613 get_ctx(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001614
Thomas Gleixner0793a612008-12-04 20:12:29 +01001615 return ctx;
1616 }
1617
1618 rcu_read_lock();
1619 if (!pid)
1620 task = current;
1621 else
1622 task = find_task_by_vpid(pid);
1623 if (task)
1624 get_task_struct(task);
1625 rcu_read_unlock();
1626
1627 if (!task)
1628 return ERR_PTR(-ESRCH);
1629
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001630 /*
1631 * Can't attach counters to a dying task.
1632 */
1633 err = -ESRCH;
1634 if (task->flags & PF_EXITING)
1635 goto errout;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001636
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001637 /* Reuse ptrace permission checks for now. */
1638 err = -EACCES;
1639 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1640 goto errout;
1641
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001642 retry:
Paul Mackerras25346b932009-06-01 17:48:12 +10001643 ctx = perf_lock_task_context(task, &flags);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001644 if (ctx) {
Peter Zijlstra71a851b2009-07-10 09:06:56 +02001645 unclone_ctx(ctx);
Paul Mackerras25346b932009-06-01 17:48:12 +10001646 spin_unlock_irqrestore(&ctx->lock, flags);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001647 }
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001648
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001649 if (!ctx) {
1650 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001651 err = -ENOMEM;
1652 if (!ctx)
1653 goto errout;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001654 __perf_counter_init_context(ctx, task);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001655 get_ctx(ctx);
1656 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001657 /*
1658 * We raced with some other task; use
1659 * the context they set.
1660 */
1661 kfree(ctx);
Paul Mackerras25346b932009-06-01 17:48:12 +10001662 goto retry;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001663 }
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001664 get_task_struct(task);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001665 }
1666
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001667 put_task_struct(task);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001668 return ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001669
1670 errout:
1671 put_task_struct(task);
1672 return ERR_PTR(err);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001673}
1674
Peter Zijlstra592903c2009-03-13 12:21:36 +01001675static void free_counter_rcu(struct rcu_head *head)
1676{
1677 struct perf_counter *counter;
1678
1679 counter = container_of(head, struct perf_counter, rcu_head);
Peter Zijlstra709e50c2009-06-02 14:13:15 +02001680 if (counter->ns)
1681 put_pid_ns(counter->ns);
Peter Zijlstra592903c2009-03-13 12:21:36 +01001682 kfree(counter);
1683}
1684
Peter Zijlstra925d5192009-03-30 19:07:02 +02001685static void perf_pending_sync(struct perf_counter *counter);
1686
Peter Zijlstraf1600952009-03-19 20:26:16 +01001687static void free_counter(struct perf_counter *counter)
1688{
Peter Zijlstra925d5192009-03-30 19:07:02 +02001689 perf_pending_sync(counter);
1690
Peter Zijlstraf3440112009-06-22 13:58:35 +02001691 if (!counter->parent) {
1692 atomic_dec(&nr_counters);
1693 if (counter->attr.mmap)
1694 atomic_dec(&nr_mmap_counters);
1695 if (counter->attr.comm)
1696 atomic_dec(&nr_comm_counters);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02001697 if (counter->attr.task)
1698 atomic_dec(&nr_task_counters);
Peter Zijlstraf3440112009-06-22 13:58:35 +02001699 }
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02001700
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02001701 if (counter->output) {
1702 fput(counter->output->filp);
1703 counter->output = NULL;
1704 }
1705
Peter Zijlstrae077df42009-03-19 20:26:17 +01001706 if (counter->destroy)
1707 counter->destroy(counter);
1708
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001709 put_ctx(counter->ctx);
Peter Zijlstraf1600952009-03-19 20:26:16 +01001710 call_rcu(&counter->rcu_head, free_counter_rcu);
1711}
1712
Thomas Gleixner0793a612008-12-04 20:12:29 +01001713/*
1714 * Called when the last reference to the file is gone.
1715 */
1716static int perf_release(struct inode *inode, struct file *file)
1717{
1718 struct perf_counter *counter = file->private_data;
1719 struct perf_counter_context *ctx = counter->ctx;
1720
1721 file->private_data = NULL;
1722
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001723 WARN_ON_ONCE(ctx->parent_ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001724 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001725 perf_counter_remove_from_context(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001726 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001727
Peter Zijlstra082ff5a2009-05-23 18:29:00 +02001728 mutex_lock(&counter->owner->perf_counter_mutex);
1729 list_del_init(&counter->owner_entry);
1730 mutex_unlock(&counter->owner->perf_counter_mutex);
1731 put_task_struct(counter->owner);
1732
Peter Zijlstraf1600952009-03-19 20:26:16 +01001733 free_counter(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001734
1735 return 0;
1736}
1737
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001738static int perf_counter_read_size(struct perf_counter *counter)
1739{
1740 int entry = sizeof(u64); /* value */
1741 int size = 0;
1742 int nr = 1;
1743
1744 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1745 size += sizeof(u64);
1746
1747 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1748 size += sizeof(u64);
1749
1750 if (counter->attr.read_format & PERF_FORMAT_ID)
1751 entry += sizeof(u64);
1752
1753 if (counter->attr.read_format & PERF_FORMAT_GROUP) {
1754 nr += counter->group_leader->nr_siblings;
1755 size += sizeof(u64);
1756 }
1757
1758 size += entry * nr;
1759
1760 return size;
1761}
1762
1763static u64 perf_counter_read_value(struct perf_counter *counter)
Peter Zijlstrae53c0992009-07-24 14:42:10 +02001764{
1765 struct perf_counter *child;
1766 u64 total = 0;
1767
1768 total += perf_counter_read(counter);
1769 list_for_each_entry(child, &counter->child_list, child_list)
1770 total += perf_counter_read(child);
1771
1772 return total;
1773}
1774
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001775static int perf_counter_read_entry(struct perf_counter *counter,
1776 u64 read_format, char __user *buf)
1777{
1778 int n = 0, count = 0;
1779 u64 values[2];
1780
1781 values[n++] = perf_counter_read_value(counter);
1782 if (read_format & PERF_FORMAT_ID)
1783 values[n++] = primary_counter_id(counter);
1784
1785 count = n * sizeof(u64);
1786
1787 if (copy_to_user(buf, values, count))
1788 return -EFAULT;
1789
1790 return count;
1791}
1792
1793static int perf_counter_read_group(struct perf_counter *counter,
1794 u64 read_format, char __user *buf)
1795{
1796 struct perf_counter *leader = counter->group_leader, *sub;
1797 int n = 0, size = 0, err = -EFAULT;
1798 u64 values[3];
1799
1800 values[n++] = 1 + leader->nr_siblings;
1801 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1802 values[n++] = leader->total_time_enabled +
1803 atomic64_read(&leader->child_total_time_enabled);
1804 }
1805 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1806 values[n++] = leader->total_time_running +
1807 atomic64_read(&leader->child_total_time_running);
1808 }
1809
1810 size = n * sizeof(u64);
1811
1812 if (copy_to_user(buf, values, size))
1813 return -EFAULT;
1814
1815 err = perf_counter_read_entry(leader, read_format, buf + size);
1816 if (err < 0)
1817 return err;
1818
1819 size += err;
1820
1821 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
Peter Zijlstra4464fca2009-08-21 17:19:36 +02001822 err = perf_counter_read_entry(sub, read_format,
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001823 buf + size);
1824 if (err < 0)
1825 return err;
1826
1827 size += err;
1828 }
1829
1830 return size;
1831}
1832
1833static int perf_counter_read_one(struct perf_counter *counter,
1834 u64 read_format, char __user *buf)
1835{
1836 u64 values[4];
1837 int n = 0;
1838
1839 values[n++] = perf_counter_read_value(counter);
1840 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1841 values[n++] = counter->total_time_enabled +
1842 atomic64_read(&counter->child_total_time_enabled);
1843 }
1844 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1845 values[n++] = counter->total_time_running +
1846 atomic64_read(&counter->child_total_time_running);
1847 }
1848 if (read_format & PERF_FORMAT_ID)
1849 values[n++] = primary_counter_id(counter);
1850
1851 if (copy_to_user(buf, values, n * sizeof(u64)))
1852 return -EFAULT;
1853
1854 return n * sizeof(u64);
1855}
1856
Thomas Gleixner0793a612008-12-04 20:12:29 +01001857/*
1858 * Read the performance counter - simple non blocking version for now
1859 */
1860static ssize_t
1861perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1862{
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001863 u64 read_format = counter->attr.read_format;
1864 int ret;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001865
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001866 /*
1867 * Return end-of-file for a read on a counter that is in
1868 * error state (i.e. because it was pinned but it couldn't be
1869 * scheduled on to the CPU at some point).
1870 */
1871 if (counter->state == PERF_COUNTER_STATE_ERROR)
1872 return 0;
1873
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001874 if (count < perf_counter_read_size(counter))
1875 return -ENOSPC;
1876
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001877 WARN_ON_ONCE(counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001878 mutex_lock(&counter->child_mutex);
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001879 if (read_format & PERF_FORMAT_GROUP)
1880 ret = perf_counter_read_group(counter, read_format, buf);
1881 else
1882 ret = perf_counter_read_one(counter, read_format, buf);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001883 mutex_unlock(&counter->child_mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001884
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001885 return ret;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001886}
1887
1888static ssize_t
Thomas Gleixner0793a612008-12-04 20:12:29 +01001889perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1890{
1891 struct perf_counter *counter = file->private_data;
1892
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001893 return perf_read_hw(counter, buf, count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001894}
1895
1896static unsigned int perf_poll(struct file *file, poll_table *wait)
1897{
1898 struct perf_counter *counter = file->private_data;
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001899 struct perf_mmap_data *data;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001900 unsigned int events = POLL_HUP;
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001901
1902 rcu_read_lock();
1903 data = rcu_dereference(counter->data);
1904 if (data)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001905 events = atomic_xchg(&data->poll, 0);
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001906 rcu_read_unlock();
Thomas Gleixner0793a612008-12-04 20:12:29 +01001907
1908 poll_wait(file, &counter->waitq, wait);
1909
Thomas Gleixner0793a612008-12-04 20:12:29 +01001910 return events;
1911}
1912
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001913static void perf_counter_reset(struct perf_counter *counter)
1914{
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001915 (void)perf_counter_read(counter);
Paul Mackerras615a3f12009-05-11 15:50:21 +10001916 atomic64_set(&counter->count, 0);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001917 perf_counter_update_userpage(counter);
1918}
1919
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001920/*
1921 * Holding the top-level counter's child_mutex means that any
1922 * descendant process that has inherited this counter will block
1923 * in sync_child_counter if it goes to exit, thus satisfying the
1924 * task existence requirements of perf_counter_enable/disable.
1925 */
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001926static void perf_counter_for_each_child(struct perf_counter *counter,
1927 void (*func)(struct perf_counter *))
1928{
1929 struct perf_counter *child;
1930
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001931 WARN_ON_ONCE(counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001932 mutex_lock(&counter->child_mutex);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001933 func(counter);
1934 list_for_each_entry(child, &counter->child_list, child_list)
1935 func(child);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001936 mutex_unlock(&counter->child_mutex);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001937}
1938
1939static void perf_counter_for_each(struct perf_counter *counter,
1940 void (*func)(struct perf_counter *))
1941{
Peter Zijlstra75f937f2009-06-15 15:05:12 +02001942 struct perf_counter_context *ctx = counter->ctx;
1943 struct perf_counter *sibling;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001944
Peter Zijlstra75f937f2009-06-15 15:05:12 +02001945 WARN_ON_ONCE(ctx->parent_ctx);
1946 mutex_lock(&ctx->mutex);
1947 counter = counter->group_leader;
1948
1949 perf_counter_for_each_child(counter, func);
1950 func(counter);
1951 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1952 perf_counter_for_each_child(counter, func);
1953 mutex_unlock(&ctx->mutex);
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001954}
1955
Peter Zijlstra08247e32009-06-02 16:46:57 +02001956static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1957{
1958 struct perf_counter_context *ctx = counter->ctx;
1959 unsigned long size;
1960 int ret = 0;
1961 u64 value;
1962
Peter Zijlstra0d486962009-06-02 19:22:16 +02001963 if (!counter->attr.sample_period)
Peter Zijlstra08247e32009-06-02 16:46:57 +02001964 return -EINVAL;
1965
1966 size = copy_from_user(&value, arg, sizeof(value));
1967 if (size != sizeof(value))
1968 return -EFAULT;
1969
1970 if (!value)
1971 return -EINVAL;
1972
1973 spin_lock_irq(&ctx->lock);
Peter Zijlstra0d486962009-06-02 19:22:16 +02001974 if (counter->attr.freq) {
Peter Zijlstradf58ab22009-06-11 11:25:05 +02001975 if (value > sysctl_perf_counter_sample_rate) {
Peter Zijlstra08247e32009-06-02 16:46:57 +02001976 ret = -EINVAL;
1977 goto unlock;
1978 }
1979
Peter Zijlstra0d486962009-06-02 19:22:16 +02001980 counter->attr.sample_freq = value;
Peter Zijlstra08247e32009-06-02 16:46:57 +02001981 } else {
Peter Zijlstra0d486962009-06-02 19:22:16 +02001982 counter->attr.sample_period = value;
Peter Zijlstra08247e32009-06-02 16:46:57 +02001983 counter->hw.sample_period = value;
Peter Zijlstra08247e32009-06-02 16:46:57 +02001984 }
1985unlock:
1986 spin_unlock_irq(&ctx->lock);
1987
1988 return ret;
1989}
1990
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02001991int perf_counter_set_output(struct perf_counter *counter, int output_fd);
1992
Paul Mackerrasd859e292009-01-17 18:10:22 +11001993static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1994{
1995 struct perf_counter *counter = file->private_data;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001996 void (*func)(struct perf_counter *);
1997 u32 flags = arg;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001998
1999 switch (cmd) {
2000 case PERF_COUNTER_IOC_ENABLE:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002001 func = perf_counter_enable;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002002 break;
2003 case PERF_COUNTER_IOC_DISABLE:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002004 func = perf_counter_disable;
Peter Zijlstra79f14642009-04-06 11:45:07 +02002005 break;
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02002006 case PERF_COUNTER_IOC_RESET:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002007 func = perf_counter_reset;
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02002008 break;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002009
2010 case PERF_COUNTER_IOC_REFRESH:
2011 return perf_counter_refresh(counter, arg);
Peter Zijlstra08247e32009-06-02 16:46:57 +02002012
2013 case PERF_COUNTER_IOC_PERIOD:
2014 return perf_counter_period(counter, (u64 __user *)arg);
2015
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002016 case PERF_COUNTER_IOC_SET_OUTPUT:
2017 return perf_counter_set_output(counter, arg);
2018
Paul Mackerrasd859e292009-01-17 18:10:22 +11002019 default:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002020 return -ENOTTY;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002021 }
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002022
2023 if (flags & PERF_IOC_FLAG_GROUP)
2024 perf_counter_for_each(counter, func);
2025 else
2026 perf_counter_for_each_child(counter, func);
2027
2028 return 0;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002029}
2030
Peter Zijlstra771d7cd2009-05-25 14:45:26 +02002031int perf_counter_task_enable(void)
2032{
2033 struct perf_counter *counter;
2034
2035 mutex_lock(&current->perf_counter_mutex);
2036 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
2037 perf_counter_for_each_child(counter, perf_counter_enable);
2038 mutex_unlock(&current->perf_counter_mutex);
2039
2040 return 0;
2041}
2042
2043int perf_counter_task_disable(void)
2044{
2045 struct perf_counter *counter;
2046
2047 mutex_lock(&current->perf_counter_mutex);
2048 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
2049 perf_counter_for_each_child(counter, perf_counter_disable);
2050 mutex_unlock(&current->perf_counter_mutex);
2051
2052 return 0;
2053}
2054
Ingo Molnarf738eb12009-08-18 11:32:24 +02002055#ifndef PERF_COUNTER_INDEX_OFFSET
2056# define PERF_COUNTER_INDEX_OFFSET 0
2057#endif
2058
Peter Zijlstra194002b2009-06-22 16:35:24 +02002059static int perf_counter_index(struct perf_counter *counter)
2060{
2061 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2062 return 0;
2063
2064 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
2065}
2066
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002067/*
2068 * Callers need to ensure there can be no nesting of this function, otherwise
2069 * the seqlock logic goes bad. We can not serialize this because the arch
2070 * code calls this from NMI context.
2071 */
2072void perf_counter_update_userpage(struct perf_counter *counter)
Paul Mackerras37d81822009-03-23 18:22:08 +01002073{
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002074 struct perf_counter_mmap_page *userpg;
Ingo Molnar22a4f652009-06-01 10:13:37 +02002075 struct perf_mmap_data *data;
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002076
2077 rcu_read_lock();
2078 data = rcu_dereference(counter->data);
2079 if (!data)
2080 goto unlock;
2081
2082 userpg = data->user_page;
Paul Mackerras37d81822009-03-23 18:22:08 +01002083
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002084 /*
2085 * Disable preemption so as to not let the corresponding user-space
2086 * spin too long if we get preempted.
2087 */
2088 preempt_disable();
Paul Mackerras37d81822009-03-23 18:22:08 +01002089 ++userpg->lock;
Peter Zijlstra92f22a32009-04-02 11:12:04 +02002090 barrier();
Peter Zijlstra194002b2009-06-22 16:35:24 +02002091 userpg->index = perf_counter_index(counter);
Paul Mackerras37d81822009-03-23 18:22:08 +01002092 userpg->offset = atomic64_read(&counter->count);
2093 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
2094 userpg->offset -= atomic64_read(&counter->hw.prev_count);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002095
Peter Zijlstra7f8b4e42009-06-22 14:34:35 +02002096 userpg->time_enabled = counter->total_time_enabled +
2097 atomic64_read(&counter->child_total_time_enabled);
2098
2099 userpg->time_running = counter->total_time_running +
2100 atomic64_read(&counter->child_total_time_running);
2101
Peter Zijlstra92f22a32009-04-02 11:12:04 +02002102 barrier();
Paul Mackerras37d81822009-03-23 18:22:08 +01002103 ++userpg->lock;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002104 preempt_enable();
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002105unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002106 rcu_read_unlock();
Paul Mackerras37d81822009-03-23 18:22:08 +01002107}
2108
2109static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2110{
2111 struct perf_counter *counter = vma->vm_file->private_data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002112 struct perf_mmap_data *data;
2113 int ret = VM_FAULT_SIGBUS;
Paul Mackerras37d81822009-03-23 18:22:08 +01002114
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002115 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2116 if (vmf->pgoff == 0)
2117 ret = 0;
2118 return ret;
2119 }
2120
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002121 rcu_read_lock();
2122 data = rcu_dereference(counter->data);
2123 if (!data)
2124 goto unlock;
Paul Mackerras37d81822009-03-23 18:22:08 +01002125
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002126 if (vmf->pgoff == 0) {
2127 vmf->page = virt_to_page(data->user_page);
2128 } else {
2129 int nr = vmf->pgoff - 1;
2130
2131 if ((unsigned)nr > data->nr_pages)
2132 goto unlock;
2133
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002134 if (vmf->flags & FAULT_FLAG_WRITE)
2135 goto unlock;
2136
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002137 vmf->page = virt_to_page(data->data_pages[nr]);
2138 }
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002139
Paul Mackerras37d81822009-03-23 18:22:08 +01002140 get_page(vmf->page);
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002141 vmf->page->mapping = vma->vm_file->f_mapping;
2142 vmf->page->index = vmf->pgoff;
2143
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002144 ret = 0;
2145unlock:
2146 rcu_read_unlock();
2147
2148 return ret;
2149}
2150
2151static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
2152{
2153 struct perf_mmap_data *data;
2154 unsigned long size;
2155 int i;
2156
2157 WARN_ON(atomic_read(&counter->mmap_count));
2158
2159 size = sizeof(struct perf_mmap_data);
2160 size += nr_pages * sizeof(void *);
2161
2162 data = kzalloc(size, GFP_KERNEL);
2163 if (!data)
2164 goto fail;
2165
2166 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
2167 if (!data->user_page)
2168 goto fail_user_page;
2169
2170 for (i = 0; i < nr_pages; i++) {
2171 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
2172 if (!data->data_pages[i])
2173 goto fail_data_pages;
2174 }
2175
2176 data->nr_pages = nr_pages;
Peter Zijlstra22c15582009-05-05 17:50:25 +02002177 atomic_set(&data->lock, -1);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002178
Peter Zijlstra2667de82009-09-17 19:01:10 +02002179 if (counter->attr.watermark) {
2180 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2181 counter->attr.wakeup_watermark);
2182 }
2183 if (!data->watermark)
2184 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2185
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002186 rcu_assign_pointer(counter->data, data);
2187
Paul Mackerras37d81822009-03-23 18:22:08 +01002188 return 0;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002189
2190fail_data_pages:
2191 for (i--; i >= 0; i--)
2192 free_page((unsigned long)data->data_pages[i]);
2193
2194 free_page((unsigned long)data->user_page);
2195
2196fail_user_page:
2197 kfree(data);
2198
2199fail:
2200 return -ENOMEM;
2201}
2202
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002203static void perf_mmap_free_page(unsigned long addr)
2204{
Kevin Cernekee5bfd7562009-07-05 12:08:19 -07002205 struct page *page = virt_to_page((void *)addr);
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002206
2207 page->mapping = NULL;
2208 __free_page(page);
2209}
2210
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002211static void __perf_mmap_data_free(struct rcu_head *rcu_head)
2212{
Ingo Molnar22a4f652009-06-01 10:13:37 +02002213 struct perf_mmap_data *data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002214 int i;
2215
Ingo Molnar22a4f652009-06-01 10:13:37 +02002216 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2217
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002218 perf_mmap_free_page((unsigned long)data->user_page);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002219 for (i = 0; i < data->nr_pages; i++)
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002220 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2221
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002222 kfree(data);
2223}
2224
2225static void perf_mmap_data_free(struct perf_counter *counter)
2226{
2227 struct perf_mmap_data *data = counter->data;
2228
2229 WARN_ON(atomic_read(&counter->mmap_count));
2230
2231 rcu_assign_pointer(counter->data, NULL);
2232 call_rcu(&data->rcu_head, __perf_mmap_data_free);
2233}
2234
2235static void perf_mmap_open(struct vm_area_struct *vma)
2236{
2237 struct perf_counter *counter = vma->vm_file->private_data;
2238
2239 atomic_inc(&counter->mmap_count);
2240}
2241
2242static void perf_mmap_close(struct vm_area_struct *vma)
2243{
2244 struct perf_counter *counter = vma->vm_file->private_data;
2245
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10002246 WARN_ON_ONCE(counter->ctx->parent_ctx);
Ingo Molnar22a4f652009-06-01 10:13:37 +02002247 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002248 struct user_struct *user = current_user();
2249
2250 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
Peter Zijlstrac5078f72009-05-05 17:50:24 +02002251 vma->vm_mm->locked_vm -= counter->data->nr_locked;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002252 perf_mmap_data_free(counter);
2253 mutex_unlock(&counter->mmap_mutex);
2254 }
Paul Mackerras37d81822009-03-23 18:22:08 +01002255}
2256
2257static struct vm_operations_struct perf_mmap_vmops = {
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002258 .open = perf_mmap_open,
2259 .close = perf_mmap_close,
2260 .fault = perf_mmap_fault,
2261 .page_mkwrite = perf_mmap_fault,
Paul Mackerras37d81822009-03-23 18:22:08 +01002262};
2263
2264static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2265{
2266 struct perf_counter *counter = file->private_data;
Ingo Molnar22a4f652009-06-01 10:13:37 +02002267 unsigned long user_locked, user_lock_limit;
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002268 struct user_struct *user = current_user();
Ingo Molnar22a4f652009-06-01 10:13:37 +02002269 unsigned long locked, lock_limit;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002270 unsigned long vma_size;
2271 unsigned long nr_pages;
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002272 long user_extra, extra;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002273 int ret = 0;
Paul Mackerras37d81822009-03-23 18:22:08 +01002274
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002275 if (!(vma->vm_flags & VM_SHARED))
Paul Mackerras37d81822009-03-23 18:22:08 +01002276 return -EINVAL;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002277
2278 vma_size = vma->vm_end - vma->vm_start;
2279 nr_pages = (vma_size / PAGE_SIZE) - 1;
2280
Peter Zijlstra7730d862009-03-25 12:48:31 +01002281 /*
2282 * If we have data pages ensure they're a power-of-two number, so we
2283 * can do bitmasks instead of modulo.
2284 */
2285 if (nr_pages != 0 && !is_power_of_2(nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01002286 return -EINVAL;
2287
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002288 if (vma_size != PAGE_SIZE * (1 + nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01002289 return -EINVAL;
2290
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002291 if (vma->vm_pgoff != 0)
2292 return -EINVAL;
Paul Mackerras37d81822009-03-23 18:22:08 +01002293
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10002294 WARN_ON_ONCE(counter->ctx->parent_ctx);
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002295 mutex_lock(&counter->mmap_mutex);
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002296 if (counter->output) {
2297 ret = -EINVAL;
2298 goto unlock;
2299 }
2300
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002301 if (atomic_inc_not_zero(&counter->mmap_count)) {
2302 if (nr_pages != counter->data->nr_pages)
2303 ret = -EINVAL;
2304 goto unlock;
2305 }
2306
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002307 user_extra = nr_pages + 1;
2308 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
Ingo Molnara3862d32009-05-24 09:02:37 +02002309
2310 /*
2311 * Increase the limit linearly with more CPUs:
2312 */
2313 user_lock_limit *= num_online_cpus();
2314
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002315 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
Peter Zijlstrac5078f72009-05-05 17:50:24 +02002316
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002317 extra = 0;
2318 if (user_locked > user_lock_limit)
2319 extra = user_locked - user_lock_limit;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002320
2321 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2322 lock_limit >>= PAGE_SHIFT;
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002323 locked = vma->vm_mm->locked_vm + extra;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002324
Ingo Molnar459ec282009-09-13 17:33:44 +02002325 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2326 !capable(CAP_IPC_LOCK)) {
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002327 ret = -EPERM;
2328 goto unlock;
2329 }
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002330
2331 WARN_ON(counter->data);
2332 ret = perf_mmap_data_alloc(counter, nr_pages);
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002333 if (ret)
2334 goto unlock;
2335
2336 atomic_set(&counter->mmap_count, 1);
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002337 atomic_long_add(user_extra, &user->locked_vm);
Peter Zijlstrac5078f72009-05-05 17:50:24 +02002338 vma->vm_mm->locked_vm += extra;
2339 counter->data->nr_locked = extra;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002340 if (vma->vm_flags & VM_WRITE)
2341 counter->data->writable = 1;
2342
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002343unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002344 mutex_unlock(&counter->mmap_mutex);
Paul Mackerras37d81822009-03-23 18:22:08 +01002345
Paul Mackerras37d81822009-03-23 18:22:08 +01002346 vma->vm_flags |= VM_RESERVED;
2347 vma->vm_ops = &perf_mmap_vmops;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002348
2349 return ret;
Paul Mackerras37d81822009-03-23 18:22:08 +01002350}
2351
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02002352static int perf_fasync(int fd, struct file *filp, int on)
2353{
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02002354 struct inode *inode = filp->f_path.dentry->d_inode;
Ingo Molnar22a4f652009-06-01 10:13:37 +02002355 struct perf_counter *counter = filp->private_data;
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02002356 int retval;
2357
2358 mutex_lock(&inode->i_mutex);
2359 retval = fasync_helper(fd, filp, on, &counter->fasync);
2360 mutex_unlock(&inode->i_mutex);
2361
2362 if (retval < 0)
2363 return retval;
2364
2365 return 0;
2366}
2367
Thomas Gleixner0793a612008-12-04 20:12:29 +01002368static const struct file_operations perf_fops = {
2369 .release = perf_release,
2370 .read = perf_read,
2371 .poll = perf_poll,
Paul Mackerrasd859e292009-01-17 18:10:22 +11002372 .unlocked_ioctl = perf_ioctl,
2373 .compat_ioctl = perf_ioctl,
Paul Mackerras37d81822009-03-23 18:22:08 +01002374 .mmap = perf_mmap,
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02002375 .fasync = perf_fasync,
Thomas Gleixner0793a612008-12-04 20:12:29 +01002376};
2377
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002378/*
Peter Zijlstra925d5192009-03-30 19:07:02 +02002379 * Perf counter wakeup
2380 *
2381 * If there's data, ensure we set the poll() state and publish everything
2382 * to user-space before waking everybody up.
2383 */
2384
2385void perf_counter_wakeup(struct perf_counter *counter)
2386{
Peter Zijlstra925d5192009-03-30 19:07:02 +02002387 wake_up_all(&counter->waitq);
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002388
2389 if (counter->pending_kill) {
2390 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
2391 counter->pending_kill = 0;
2392 }
Peter Zijlstra925d5192009-03-30 19:07:02 +02002393}
2394
2395/*
2396 * Pending wakeups
2397 *
2398 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2399 *
2400 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2401 * single linked list and use cmpxchg() to add entries lockless.
2402 */
2403
Peter Zijlstra79f14642009-04-06 11:45:07 +02002404static void perf_pending_counter(struct perf_pending_entry *entry)
2405{
2406 struct perf_counter *counter = container_of(entry,
2407 struct perf_counter, pending);
2408
2409 if (counter->pending_disable) {
2410 counter->pending_disable = 0;
Peter Zijlstra970892a2009-08-13 11:47:54 +02002411 __perf_counter_disable(counter);
Peter Zijlstra79f14642009-04-06 11:45:07 +02002412 }
2413
2414 if (counter->pending_wakeup) {
2415 counter->pending_wakeup = 0;
2416 perf_counter_wakeup(counter);
2417 }
2418}
2419
Peter Zijlstra671dec52009-04-06 11:45:02 +02002420#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02002421
Peter Zijlstra671dec52009-04-06 11:45:02 +02002422static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
Peter Zijlstra925d5192009-03-30 19:07:02 +02002423 PENDING_TAIL,
2424};
2425
Peter Zijlstra671dec52009-04-06 11:45:02 +02002426static void perf_pending_queue(struct perf_pending_entry *entry,
2427 void (*func)(struct perf_pending_entry *))
Peter Zijlstra925d5192009-03-30 19:07:02 +02002428{
Peter Zijlstra671dec52009-04-06 11:45:02 +02002429 struct perf_pending_entry **head;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002430
Peter Zijlstra671dec52009-04-06 11:45:02 +02002431 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02002432 return;
2433
Peter Zijlstra671dec52009-04-06 11:45:02 +02002434 entry->func = func;
2435
2436 head = &get_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002437
2438 do {
Peter Zijlstra671dec52009-04-06 11:45:02 +02002439 entry->next = *head;
2440 } while (cmpxchg(head, entry->next, entry) != entry->next);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002441
2442 set_perf_counter_pending();
2443
Peter Zijlstra671dec52009-04-06 11:45:02 +02002444 put_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002445}
2446
2447static int __perf_pending_run(void)
2448{
Peter Zijlstra671dec52009-04-06 11:45:02 +02002449 struct perf_pending_entry *list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002450 int nr = 0;
2451
Peter Zijlstra671dec52009-04-06 11:45:02 +02002452 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002453 while (list != PENDING_TAIL) {
Peter Zijlstra671dec52009-04-06 11:45:02 +02002454 void (*func)(struct perf_pending_entry *);
2455 struct perf_pending_entry *entry = list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002456
2457 list = list->next;
2458
Peter Zijlstra671dec52009-04-06 11:45:02 +02002459 func = entry->func;
2460 entry->next = NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002461 /*
2462 * Ensure we observe the unqueue before we issue the wakeup,
2463 * so that we won't be waiting forever.
2464 * -- see perf_not_pending().
2465 */
2466 smp_wmb();
2467
Peter Zijlstra671dec52009-04-06 11:45:02 +02002468 func(entry);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002469 nr++;
2470 }
2471
2472 return nr;
2473}
2474
2475static inline int perf_not_pending(struct perf_counter *counter)
2476{
2477 /*
2478 * If we flush on whatever cpu we run, there is a chance we don't
2479 * need to wait.
2480 */
2481 get_cpu();
2482 __perf_pending_run();
2483 put_cpu();
2484
2485 /*
2486 * Ensure we see the proper queue state before going to sleep
2487 * so that we do not miss the wakeup. -- see perf_pending_handle()
2488 */
2489 smp_rmb();
Peter Zijlstra671dec52009-04-06 11:45:02 +02002490 return counter->pending.next == NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002491}
2492
2493static void perf_pending_sync(struct perf_counter *counter)
2494{
2495 wait_event(counter->waitq, perf_not_pending(counter));
2496}
2497
2498void perf_counter_do_pending(void)
2499{
2500 __perf_pending_run();
2501}
2502
2503/*
Peter Zijlstra394ee072009-03-30 19:07:14 +02002504 * Callchain support -- arch specific
2505 */
2506
Peter Zijlstra9c03d882009-04-06 11:45:00 +02002507__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
Peter Zijlstra394ee072009-03-30 19:07:14 +02002508{
2509 return NULL;
2510}
2511
2512/*
Peter Zijlstra0322cd62009-03-19 20:26:19 +01002513 * Output
2514 */
Peter Zijlstra2667de82009-09-17 19:01:10 +02002515static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2516 unsigned long offset, unsigned long head)
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002517{
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002518 unsigned long mask;
2519
2520 if (!data->writable)
2521 return true;
2522
2523 mask = (data->nr_pages << PAGE_SHIFT) - 1;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002524
2525 offset = (offset - tail) & mask;
2526 head = (head - tail) & mask;
2527
2528 if ((int)(head - offset) < 0)
2529 return false;
2530
2531 return true;
2532}
2533
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002534static void perf_output_wakeup(struct perf_output_handle *handle)
Peter Zijlstra78d613e2009-03-30 19:07:11 +02002535{
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002536 atomic_set(&handle->data->poll, POLL_IN);
2537
Peter Zijlstra671dec52009-04-06 11:45:02 +02002538 if (handle->nmi) {
Peter Zijlstra79f14642009-04-06 11:45:07 +02002539 handle->counter->pending_wakeup = 1;
Peter Zijlstra671dec52009-04-06 11:45:02 +02002540 perf_pending_queue(&handle->counter->pending,
Peter Zijlstra79f14642009-04-06 11:45:07 +02002541 perf_pending_counter);
Peter Zijlstra671dec52009-04-06 11:45:02 +02002542 } else
Peter Zijlstra78d613e2009-03-30 19:07:11 +02002543 perf_counter_wakeup(handle->counter);
2544}
2545
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002546/*
2547 * Curious locking construct.
2548 *
2549 * We need to ensure a later event doesn't publish a head when a former
2550 * event isn't done writing. However since we need to deal with NMIs we
2551 * cannot fully serialize things.
2552 *
2553 * What we do is serialize between CPUs so we only have to deal with NMI
2554 * nesting on a single CPU.
2555 *
2556 * We only publish the head (and generate a wakeup) when the outer-most
2557 * event completes.
2558 */
2559static void perf_output_lock(struct perf_output_handle *handle)
2560{
2561 struct perf_mmap_data *data = handle->data;
2562 int cpu;
2563
2564 handle->locked = 0;
2565
2566 local_irq_save(handle->flags);
2567 cpu = smp_processor_id();
2568
2569 if (in_nmi() && atomic_read(&data->lock) == cpu)
2570 return;
2571
Peter Zijlstra22c15582009-05-05 17:50:25 +02002572 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002573 cpu_relax();
2574
2575 handle->locked = 1;
2576}
2577
2578static void perf_output_unlock(struct perf_output_handle *handle)
2579{
2580 struct perf_mmap_data *data = handle->data;
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002581 unsigned long head;
2582 int cpu;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002583
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002584 data->done_head = data->head;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002585
2586 if (!handle->locked)
2587 goto out;
2588
2589again:
2590 /*
2591 * The xchg implies a full barrier that ensures all writes are done
2592 * before we publish the new head, matched by a rmb() in userspace when
2593 * reading this position.
2594 */
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002595 while ((head = atomic_long_xchg(&data->done_head, 0)))
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002596 data->user_page->data_head = head;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002597
2598 /*
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002599 * NMI can happen here, which means we can miss a done_head update.
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002600 */
2601
Peter Zijlstra22c15582009-05-05 17:50:25 +02002602 cpu = atomic_xchg(&data->lock, -1);
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002603 WARN_ON_ONCE(cpu != smp_processor_id());
2604
2605 /*
2606 * Therefore we have to validate we did not indeed do so.
2607 */
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002608 if (unlikely(atomic_long_read(&data->done_head))) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002609 /*
2610 * Since we had it locked, we can lock it again.
2611 */
Peter Zijlstra22c15582009-05-05 17:50:25 +02002612 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002613 cpu_relax();
2614
2615 goto again;
2616 }
2617
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002618 if (atomic_xchg(&data->wakeup, 0))
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002619 perf_output_wakeup(handle);
2620out:
2621 local_irq_restore(handle->flags);
2622}
2623
Markus Metzger5622f292009-09-15 13:00:23 +02002624void perf_output_copy(struct perf_output_handle *handle,
2625 const void *buf, unsigned int len)
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002626{
2627 unsigned int pages_mask;
2628 unsigned int offset;
2629 unsigned int size;
2630 void **pages;
2631
2632 offset = handle->offset;
2633 pages_mask = handle->data->nr_pages - 1;
2634 pages = handle->data->data_pages;
2635
2636 do {
2637 unsigned int page_offset;
2638 int nr;
2639
2640 nr = (offset >> PAGE_SHIFT) & pages_mask;
2641 page_offset = offset & (PAGE_SIZE - 1);
2642 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2643
2644 memcpy(pages[nr] + page_offset, buf, size);
2645
2646 len -= size;
2647 buf += size;
2648 offset += size;
2649 } while (len);
2650
2651 handle->offset = offset;
Peter Zijlstra63e35b22009-03-25 12:30:24 +01002652
Peter Zijlstra53020fe2009-05-13 21:26:19 +02002653 /*
2654 * Check we didn't copy past our reservation window, taking the
2655 * possible unsigned int wrap into account.
2656 */
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002657 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002658}
2659
Markus Metzger5622f292009-09-15 13:00:23 +02002660int perf_output_begin(struct perf_output_handle *handle,
2661 struct perf_counter *counter, unsigned int size,
2662 int nmi, int sample)
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002663{
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002664 struct perf_counter *output_counter;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002665 struct perf_mmap_data *data;
Peter Zijlstra2667de82009-09-17 19:01:10 +02002666 unsigned long tail, offset, head;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002667 int have_lost;
2668 struct {
2669 struct perf_event_header header;
2670 u64 id;
2671 u64 lost;
2672 } lost_event;
2673
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002674 rcu_read_lock();
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002675 /*
2676 * For inherited counters we send all the output towards the parent.
2677 */
2678 if (counter->parent)
2679 counter = counter->parent;
2680
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002681 output_counter = rcu_dereference(counter->output);
2682 if (output_counter)
2683 counter = output_counter;
2684
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002685 data = rcu_dereference(counter->data);
2686 if (!data)
2687 goto out;
2688
2689 handle->data = data;
2690 handle->counter = counter;
2691 handle->nmi = nmi;
2692 handle->sample = sample;
2693
2694 if (!data->nr_pages)
2695 goto fail;
2696
2697 have_lost = atomic_read(&data->lost);
2698 if (have_lost)
2699 size += sizeof(lost_event);
2700
2701 perf_output_lock(handle);
2702
2703 do {
Peter Zijlstra2667de82009-09-17 19:01:10 +02002704 /*
2705 * Userspace could choose to issue a mb() before updating the
2706 * tail pointer. So that all reads will be completed before the
2707 * write is issued.
2708 */
2709 tail = ACCESS_ONCE(data->user_page->data_tail);
2710 smp_rmb();
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002711 offset = head = atomic_long_read(&data->head);
2712 head += size;
Peter Zijlstra2667de82009-09-17 19:01:10 +02002713 if (unlikely(!perf_output_space(data, tail, offset, head)))
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002714 goto fail;
2715 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2716
2717 handle->offset = offset;
2718 handle->head = head;
2719
Peter Zijlstra2667de82009-09-17 19:01:10 +02002720 if (head - tail > data->watermark)
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002721 atomic_set(&data->wakeup, 1);
2722
2723 if (have_lost) {
2724 lost_event.header.type = PERF_EVENT_LOST;
2725 lost_event.header.misc = 0;
2726 lost_event.header.size = sizeof(lost_event);
2727 lost_event.id = counter->id;
2728 lost_event.lost = atomic_xchg(&data->lost, 0);
2729
2730 perf_output_put(handle, lost_event);
2731 }
2732
2733 return 0;
2734
2735fail:
2736 atomic_inc(&data->lost);
2737 perf_output_unlock(handle);
2738out:
2739 rcu_read_unlock();
2740
2741 return -ENOSPC;
2742}
2743
Markus Metzger5622f292009-09-15 13:00:23 +02002744void perf_output_end(struct perf_output_handle *handle)
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002745{
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002746 struct perf_counter *counter = handle->counter;
2747 struct perf_mmap_data *data = handle->data;
2748
Peter Zijlstra0d486962009-06-02 19:22:16 +02002749 int wakeup_events = counter->attr.wakeup_events;
Peter Zijlstrac4578102009-04-02 11:12:01 +02002750
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002751 if (handle->sample && wakeup_events) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002752 int events = atomic_inc_return(&data->events);
Peter Zijlstrac4578102009-04-02 11:12:01 +02002753 if (events >= wakeup_events) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002754 atomic_sub(wakeup_events, &data->events);
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002755 atomic_set(&data->wakeup, 1);
Peter Zijlstrac4578102009-04-02 11:12:01 +02002756 }
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002757 }
2758
2759 perf_output_unlock(handle);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002760 rcu_read_unlock();
2761}
2762
Peter Zijlstra709e50c2009-06-02 14:13:15 +02002763static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2764{
2765 /*
2766 * only top level counters have the pid namespace they were created in
2767 */
2768 if (counter->parent)
2769 counter = counter->parent;
2770
2771 return task_tgid_nr_ns(p, counter->ns);
2772}
2773
2774static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2775{
2776 /*
2777 * only top level counters have the pid namespace they were created in
2778 */
2779 if (counter->parent)
2780 counter = counter->parent;
2781
2782 return task_pid_nr_ns(p, counter->ns);
2783}
2784
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02002785static void perf_output_read_one(struct perf_output_handle *handle,
2786 struct perf_counter *counter)
2787{
2788 u64 read_format = counter->attr.read_format;
2789 u64 values[4];
2790 int n = 0;
2791
2792 values[n++] = atomic64_read(&counter->count);
2793 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2794 values[n++] = counter->total_time_enabled +
2795 atomic64_read(&counter->child_total_time_enabled);
2796 }
2797 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2798 values[n++] = counter->total_time_running +
2799 atomic64_read(&counter->child_total_time_running);
2800 }
2801 if (read_format & PERF_FORMAT_ID)
2802 values[n++] = primary_counter_id(counter);
2803
2804 perf_output_copy(handle, values, n * sizeof(u64));
2805}
2806
2807/*
2808 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
2809 */
2810static void perf_output_read_group(struct perf_output_handle *handle,
2811 struct perf_counter *counter)
2812{
2813 struct perf_counter *leader = counter->group_leader, *sub;
2814 u64 read_format = counter->attr.read_format;
2815 u64 values[5];
2816 int n = 0;
2817
2818 values[n++] = 1 + leader->nr_siblings;
2819
2820 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2821 values[n++] = leader->total_time_enabled;
2822
2823 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2824 values[n++] = leader->total_time_running;
2825
2826 if (leader != counter)
2827 leader->pmu->read(leader);
2828
2829 values[n++] = atomic64_read(&leader->count);
2830 if (read_format & PERF_FORMAT_ID)
2831 values[n++] = primary_counter_id(leader);
2832
2833 perf_output_copy(handle, values, n * sizeof(u64));
2834
2835 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2836 n = 0;
2837
2838 if (sub != counter)
2839 sub->pmu->read(sub);
2840
2841 values[n++] = atomic64_read(&sub->count);
2842 if (read_format & PERF_FORMAT_ID)
2843 values[n++] = primary_counter_id(sub);
2844
2845 perf_output_copy(handle, values, n * sizeof(u64));
2846 }
2847}
2848
2849static void perf_output_read(struct perf_output_handle *handle,
2850 struct perf_counter *counter)
2851{
2852 if (counter->attr.read_format & PERF_FORMAT_GROUP)
2853 perf_output_read_group(handle, counter);
2854 else
2855 perf_output_read_one(handle, counter);
2856}
2857
Markus Metzger5622f292009-09-15 13:00:23 +02002858void perf_output_sample(struct perf_output_handle *handle,
2859 struct perf_event_header *header,
2860 struct perf_sample_data *data,
2861 struct perf_counter *counter)
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002862{
Markus Metzger5622f292009-09-15 13:00:23 +02002863 u64 sample_type = data->type;
2864
2865 perf_output_put(handle, *header);
2866
2867 if (sample_type & PERF_SAMPLE_IP)
2868 perf_output_put(handle, data->ip);
2869
2870 if (sample_type & PERF_SAMPLE_TID)
2871 perf_output_put(handle, data->tid_entry);
2872
2873 if (sample_type & PERF_SAMPLE_TIME)
2874 perf_output_put(handle, data->time);
2875
2876 if (sample_type & PERF_SAMPLE_ADDR)
2877 perf_output_put(handle, data->addr);
2878
2879 if (sample_type & PERF_SAMPLE_ID)
2880 perf_output_put(handle, data->id);
2881
2882 if (sample_type & PERF_SAMPLE_STREAM_ID)
2883 perf_output_put(handle, data->stream_id);
2884
2885 if (sample_type & PERF_SAMPLE_CPU)
2886 perf_output_put(handle, data->cpu_entry);
2887
2888 if (sample_type & PERF_SAMPLE_PERIOD)
2889 perf_output_put(handle, data->period);
2890
2891 if (sample_type & PERF_SAMPLE_READ)
2892 perf_output_read(handle, counter);
2893
2894 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2895 if (data->callchain) {
2896 int size = 1;
2897
2898 if (data->callchain)
2899 size += data->callchain->nr;
2900
2901 size *= sizeof(u64);
2902
2903 perf_output_copy(handle, data->callchain, size);
2904 } else {
2905 u64 nr = 0;
2906 perf_output_put(handle, nr);
2907 }
2908 }
2909
2910 if (sample_type & PERF_SAMPLE_RAW) {
2911 if (data->raw) {
2912 perf_output_put(handle, data->raw->size);
2913 perf_output_copy(handle, data->raw->data,
2914 data->raw->size);
2915 } else {
2916 struct {
2917 u32 size;
2918 u32 data;
2919 } raw = {
2920 .size = sizeof(u32),
2921 .data = 0,
2922 };
2923 perf_output_put(handle, raw);
2924 }
2925 }
2926}
2927
2928void perf_prepare_sample(struct perf_event_header *header,
2929 struct perf_sample_data *data,
2930 struct perf_counter *counter,
2931 struct pt_regs *regs)
2932{
Peter Zijlstra0d486962009-06-02 19:22:16 +02002933 u64 sample_type = counter->attr.sample_type;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002934
Markus Metzger5622f292009-09-15 13:00:23 +02002935 data->type = sample_type;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002936
Markus Metzger5622f292009-09-15 13:00:23 +02002937 header->type = PERF_EVENT_SAMPLE;
2938 header->size = sizeof(*header);
2939
2940 header->misc = 0;
2941 header->misc |= perf_misc_flags(regs);
Peter Zijlstra6fab0192009-04-08 15:01:26 +02002942
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002943 if (sample_type & PERF_SAMPLE_IP) {
Markus Metzger5622f292009-09-15 13:00:23 +02002944 data->ip = perf_instruction_pointer(regs);
2945
2946 header->size += sizeof(data->ip);
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002947 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002948
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002949 if (sample_type & PERF_SAMPLE_TID) {
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002950 /* namespace issues */
Markus Metzger5622f292009-09-15 13:00:23 +02002951 data->tid_entry.pid = perf_counter_pid(counter, current);
2952 data->tid_entry.tid = perf_counter_tid(counter, current);
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002953
Markus Metzger5622f292009-09-15 13:00:23 +02002954 header->size += sizeof(data->tid_entry);
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002955 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002956
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002957 if (sample_type & PERF_SAMPLE_TIME) {
Peter Zijlstradef0a9b2009-09-18 20:14:01 +02002958 data->time = perf_clock();
Peter Zijlstra4d855452009-04-08 15:01:32 +02002959
Markus Metzger5622f292009-09-15 13:00:23 +02002960 header->size += sizeof(data->time);
Peter Zijlstra4d855452009-04-08 15:01:32 +02002961 }
2962
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002963 if (sample_type & PERF_SAMPLE_ADDR)
Markus Metzger5622f292009-09-15 13:00:23 +02002964 header->size += sizeof(data->addr);
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002965
Markus Metzger5622f292009-09-15 13:00:23 +02002966 if (sample_type & PERF_SAMPLE_ID) {
2967 data->id = primary_counter_id(counter);
Peter Zijlstraa85f61a2009-05-08 18:52:23 +02002968
Markus Metzger5622f292009-09-15 13:00:23 +02002969 header->size += sizeof(data->id);
2970 }
2971
2972 if (sample_type & PERF_SAMPLE_STREAM_ID) {
2973 data->stream_id = counter->id;
2974
2975 header->size += sizeof(data->stream_id);
2976 }
Peter Zijlstra7f453c22009-07-21 13:19:40 +02002977
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002978 if (sample_type & PERF_SAMPLE_CPU) {
Markus Metzger5622f292009-09-15 13:00:23 +02002979 data->cpu_entry.cpu = raw_smp_processor_id();
2980 data->cpu_entry.reserved = 0;
Peter Zijlstraf370e1e2009-05-08 18:52:24 +02002981
Markus Metzger5622f292009-09-15 13:00:23 +02002982 header->size += sizeof(data->cpu_entry);
Peter Zijlstraf370e1e2009-05-08 18:52:24 +02002983 }
2984
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002985 if (sample_type & PERF_SAMPLE_PERIOD)
Markus Metzger5622f292009-09-15 13:00:23 +02002986 header->size += sizeof(data->period);
Peter Zijlstra689802b2009-06-05 15:05:43 +02002987
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02002988 if (sample_type & PERF_SAMPLE_READ)
Markus Metzger5622f292009-09-15 13:00:23 +02002989 header->size += perf_counter_read_size(counter);
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002990
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002991 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
Markus Metzger5622f292009-09-15 13:00:23 +02002992 int size = 1;
Peter Zijlstra394ee072009-03-30 19:07:14 +02002993
Markus Metzger5622f292009-09-15 13:00:23 +02002994 data->callchain = perf_callchain(regs);
2995
2996 if (data->callchain)
2997 size += data->callchain->nr;
2998
2999 header->size += size * sizeof(u64);
Peter Zijlstra394ee072009-03-30 19:07:14 +02003000 }
3001
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003002 if (sample_type & PERF_SAMPLE_RAW) {
Peter Zijlstraa0445602009-08-10 11:16:52 +02003003 int size = sizeof(u32);
3004
3005 if (data->raw)
3006 size += data->raw->size;
3007 else
3008 size += sizeof(u32);
3009
3010 WARN_ON_ONCE(size & (sizeof(u64)-1));
Markus Metzger5622f292009-09-15 13:00:23 +02003011 header->size += size;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003012 }
Markus Metzger5622f292009-09-15 13:00:23 +02003013}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003014
Markus Metzger5622f292009-09-15 13:00:23 +02003015static void perf_counter_output(struct perf_counter *counter, int nmi,
3016 struct perf_sample_data *data,
3017 struct pt_regs *regs)
3018{
3019 struct perf_output_handle handle;
3020 struct perf_event_header header;
3021
3022 perf_prepare_sample(&header, data, counter, regs);
3023
3024 if (perf_output_begin(&handle, counter, header.size, nmi, 1))
Peter Zijlstra5ed00412009-03-30 19:07:12 +02003025 return;
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01003026
Markus Metzger5622f292009-09-15 13:00:23 +02003027 perf_output_sample(&handle, &header, data, counter);
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003028
Peter Zijlstra5ed00412009-03-30 19:07:12 +02003029 perf_output_end(&handle);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01003030}
3031
Peter Zijlstra0322cd62009-03-19 20:26:19 +01003032/*
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003033 * read event
3034 */
3035
3036struct perf_read_event {
3037 struct perf_event_header header;
3038
3039 u32 pid;
3040 u32 tid;
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003041};
3042
3043static void
3044perf_counter_read_event(struct perf_counter *counter,
3045 struct task_struct *task)
3046{
3047 struct perf_output_handle handle;
3048 struct perf_read_event event = {
3049 .header = {
3050 .type = PERF_EVENT_READ,
3051 .misc = 0,
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02003052 .size = sizeof(event) + perf_counter_read_size(counter),
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003053 },
3054 .pid = perf_counter_pid(counter, task),
3055 .tid = perf_counter_tid(counter, task),
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003056 };
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02003057 int ret;
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003058
3059 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
3060 if (ret)
3061 return;
3062
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02003063 perf_output_put(&handle, event);
3064 perf_output_read(&handle, counter);
3065
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003066 perf_output_end(&handle);
3067}
3068
3069/*
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003070 * task tracking -- fork/exit
3071 *
3072 * enabled by: attr.comm | attr.mmap | attr.task
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003073 */
3074
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003075struct perf_task_event {
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003076 struct task_struct *task;
3077 struct perf_counter_context *task_ctx;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003078
3079 struct {
3080 struct perf_event_header header;
3081
3082 u32 pid;
3083 u32 ppid;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003084 u32 tid;
3085 u32 ptid;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003086 } event;
3087};
3088
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003089static void perf_counter_task_output(struct perf_counter *counter,
3090 struct perf_task_event *task_event)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003091{
3092 struct perf_output_handle handle;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003093 int size = task_event->event.header.size;
3094 struct task_struct *task = task_event->task;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003095 int ret = perf_output_begin(&handle, counter, size, 0, 0);
3096
3097 if (ret)
3098 return;
3099
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003100 task_event->event.pid = perf_counter_pid(counter, task);
Peter Zijlstra94d5d1b2009-08-13 16:14:42 +02003101 task_event->event.ppid = perf_counter_pid(counter, current);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003102
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003103 task_event->event.tid = perf_counter_tid(counter, task);
Peter Zijlstra94d5d1b2009-08-13 16:14:42 +02003104 task_event->event.ptid = perf_counter_tid(counter, current);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003105
3106 perf_output_put(&handle, task_event->event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003107 perf_output_end(&handle);
3108}
3109
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003110static int perf_counter_task_match(struct perf_counter *counter)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003111{
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003112 if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003113 return 1;
3114
3115 return 0;
3116}
3117
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003118static void perf_counter_task_ctx(struct perf_counter_context *ctx,
3119 struct perf_task_event *task_event)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003120{
3121 struct perf_counter *counter;
3122
3123 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3124 return;
3125
3126 rcu_read_lock();
3127 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003128 if (perf_counter_task_match(counter))
3129 perf_counter_task_output(counter, task_event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003130 }
3131 rcu_read_unlock();
3132}
3133
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003134static void perf_counter_task_event(struct perf_task_event *task_event)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003135{
3136 struct perf_cpu_context *cpuctx;
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003137 struct perf_counter_context *ctx = task_event->task_ctx;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003138
3139 cpuctx = &get_cpu_var(perf_cpu_context);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003140 perf_counter_task_ctx(&cpuctx->ctx, task_event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003141 put_cpu_var(perf_cpu_context);
3142
3143 rcu_read_lock();
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003144 if (!ctx)
3145 ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003146 if (ctx)
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003147 perf_counter_task_ctx(ctx, task_event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003148 rcu_read_unlock();
3149}
3150
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003151static void perf_counter_task(struct task_struct *task,
3152 struct perf_counter_context *task_ctx,
3153 int new)
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003154{
3155 struct perf_task_event task_event;
3156
3157 if (!atomic_read(&nr_comm_counters) &&
3158 !atomic_read(&nr_mmap_counters) &&
3159 !atomic_read(&nr_task_counters))
3160 return;
3161
3162 task_event = (struct perf_task_event){
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003163 .task = task,
3164 .task_ctx = task_ctx,
3165 .event = {
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003166 .header = {
3167 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
3168 .misc = 0,
3169 .size = sizeof(task_event.event),
3170 },
3171 /* .pid */
3172 /* .ppid */
3173 /* .tid */
3174 /* .ptid */
3175 },
3176 };
3177
3178 perf_counter_task_event(&task_event);
3179}
3180
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003181void perf_counter_fork(struct task_struct *task)
3182{
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003183 perf_counter_task(task, NULL, 1);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003184}
3185
3186/*
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003187 * comm tracking
3188 */
3189
3190struct perf_comm_event {
Ingo Molnar22a4f652009-06-01 10:13:37 +02003191 struct task_struct *task;
3192 char *comm;
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003193 int comm_size;
3194
3195 struct {
3196 struct perf_event_header header;
3197
3198 u32 pid;
3199 u32 tid;
3200 } event;
3201};
3202
3203static void perf_counter_comm_output(struct perf_counter *counter,
3204 struct perf_comm_event *comm_event)
3205{
3206 struct perf_output_handle handle;
3207 int size = comm_event->event.header.size;
3208 int ret = perf_output_begin(&handle, counter, size, 0, 0);
3209
3210 if (ret)
3211 return;
3212
Peter Zijlstra709e50c2009-06-02 14:13:15 +02003213 comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
3214 comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
3215
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003216 perf_output_put(&handle, comm_event->event);
3217 perf_output_copy(&handle, comm_event->comm,
3218 comm_event->comm_size);
3219 perf_output_end(&handle);
3220}
3221
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003222static int perf_counter_comm_match(struct perf_counter *counter)
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003223{
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003224 if (counter->attr.comm)
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003225 return 1;
3226
3227 return 0;
3228}
3229
3230static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
3231 struct perf_comm_event *comm_event)
3232{
3233 struct perf_counter *counter;
3234
3235 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3236 return;
3237
3238 rcu_read_lock();
3239 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003240 if (perf_counter_comm_match(counter))
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003241 perf_counter_comm_output(counter, comm_event);
3242 }
3243 rcu_read_unlock();
3244}
3245
3246static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3247{
3248 struct perf_cpu_context *cpuctx;
Peter Zijlstra665c2142009-05-29 14:51:57 +02003249 struct perf_counter_context *ctx;
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003250 unsigned int size;
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003251 char comm[TASK_COMM_LEN];
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003252
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003253 memset(comm, 0, sizeof(comm));
3254 strncpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnar888fcee2009-04-09 09:48:22 +02003255 size = ALIGN(strlen(comm)+1, sizeof(u64));
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003256
3257 comm_event->comm = comm;
3258 comm_event->comm_size = size;
3259
3260 comm_event->event.header.size = sizeof(comm_event->event) + size;
3261
3262 cpuctx = &get_cpu_var(perf_cpu_context);
3263 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
3264 put_cpu_var(perf_cpu_context);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003265
3266 rcu_read_lock();
3267 /*
3268 * doesn't really matter which of the child contexts the
3269 * events ends up in.
3270 */
3271 ctx = rcu_dereference(current->perf_counter_ctxp);
3272 if (ctx)
3273 perf_counter_comm_ctx(ctx, comm_event);
3274 rcu_read_unlock();
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003275}
3276
3277void perf_counter_comm(struct task_struct *task)
3278{
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003279 struct perf_comm_event comm_event;
3280
Paul Mackerras57e79862009-06-30 16:07:19 +10003281 if (task->perf_counter_ctxp)
3282 perf_counter_enable_on_exec(task);
3283
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003284 if (!atomic_read(&nr_comm_counters))
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003285 return;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10003286
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003287 comm_event = (struct perf_comm_event){
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003288 .task = task,
Peter Zijlstra573402d2009-07-22 11:13:50 +02003289 /* .comm */
3290 /* .comm_size */
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003291 .event = {
Peter Zijlstra573402d2009-07-22 11:13:50 +02003292 .header = {
3293 .type = PERF_EVENT_COMM,
3294 .misc = 0,
3295 /* .size */
3296 },
3297 /* .pid */
3298 /* .tid */
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003299 },
3300 };
3301
3302 perf_counter_comm_event(&comm_event);
3303}
3304
3305/*
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003306 * mmap tracking
3307 */
3308
3309struct perf_mmap_event {
Peter Zijlstra089dd792009-06-05 14:04:55 +02003310 struct vm_area_struct *vma;
3311
3312 const char *file_name;
3313 int file_size;
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003314
3315 struct {
3316 struct perf_event_header header;
3317
3318 u32 pid;
3319 u32 tid;
3320 u64 start;
3321 u64 len;
3322 u64 pgoff;
3323 } event;
3324};
3325
3326static void perf_counter_mmap_output(struct perf_counter *counter,
3327 struct perf_mmap_event *mmap_event)
3328{
3329 struct perf_output_handle handle;
3330 int size = mmap_event->event.header.size;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02003331 int ret = perf_output_begin(&handle, counter, size, 0, 0);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003332
3333 if (ret)
3334 return;
3335
Peter Zijlstra709e50c2009-06-02 14:13:15 +02003336 mmap_event->event.pid = perf_counter_pid(counter, current);
3337 mmap_event->event.tid = perf_counter_tid(counter, current);
3338
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003339 perf_output_put(&handle, mmap_event->event);
3340 perf_output_copy(&handle, mmap_event->file_name,
3341 mmap_event->file_size);
Peter Zijlstra78d613e2009-03-30 19:07:11 +02003342 perf_output_end(&handle);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003343}
3344
3345static int perf_counter_mmap_match(struct perf_counter *counter,
3346 struct perf_mmap_event *mmap_event)
3347{
Peter Zijlstrad99e9442009-06-04 17:08:58 +02003348 if (counter->attr.mmap)
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003349 return 1;
3350
3351 return 0;
3352}
3353
3354static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
3355 struct perf_mmap_event *mmap_event)
3356{
3357 struct perf_counter *counter;
3358
3359 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3360 return;
3361
3362 rcu_read_lock();
3363 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3364 if (perf_counter_mmap_match(counter, mmap_event))
3365 perf_counter_mmap_output(counter, mmap_event);
3366 }
3367 rcu_read_unlock();
3368}
3369
3370static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3371{
3372 struct perf_cpu_context *cpuctx;
Peter Zijlstra665c2142009-05-29 14:51:57 +02003373 struct perf_counter_context *ctx;
Peter Zijlstra089dd792009-06-05 14:04:55 +02003374 struct vm_area_struct *vma = mmap_event->vma;
3375 struct file *file = vma->vm_file;
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003376 unsigned int size;
3377 char tmp[16];
3378 char *buf = NULL;
Peter Zijlstra089dd792009-06-05 14:04:55 +02003379 const char *name;
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003380
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003381 memset(tmp, 0, sizeof(tmp));
3382
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003383 if (file) {
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003384 /*
3385 * d_path works from the end of the buffer backwards, so we
3386 * need to add enough zero bytes after the string to handle
3387 * the 64bit alignment we do later.
3388 */
3389 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003390 if (!buf) {
3391 name = strncpy(tmp, "//enomem", sizeof(tmp));
3392 goto got_name;
3393 }
Peter Zijlstrad3d21c42009-04-09 10:53:46 +02003394 name = d_path(&file->f_path, buf, PATH_MAX);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003395 if (IS_ERR(name)) {
3396 name = strncpy(tmp, "//toolong", sizeof(tmp));
3397 goto got_name;
3398 }
3399 } else {
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003400 if (arch_vma_name(mmap_event->vma)) {
3401 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3402 sizeof(tmp));
Peter Zijlstra089dd792009-06-05 14:04:55 +02003403 goto got_name;
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003404 }
Peter Zijlstra089dd792009-06-05 14:04:55 +02003405
3406 if (!vma->vm_mm) {
3407 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3408 goto got_name;
3409 }
3410
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003411 name = strncpy(tmp, "//anon", sizeof(tmp));
3412 goto got_name;
3413 }
3414
3415got_name:
Ingo Molnar888fcee2009-04-09 09:48:22 +02003416 size = ALIGN(strlen(name)+1, sizeof(u64));
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003417
3418 mmap_event->file_name = name;
3419 mmap_event->file_size = size;
3420
3421 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
3422
3423 cpuctx = &get_cpu_var(perf_cpu_context);
3424 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
3425 put_cpu_var(perf_cpu_context);
3426
Peter Zijlstra665c2142009-05-29 14:51:57 +02003427 rcu_read_lock();
3428 /*
3429 * doesn't really matter which of the child contexts the
3430 * events ends up in.
3431 */
3432 ctx = rcu_dereference(current->perf_counter_ctxp);
3433 if (ctx)
3434 perf_counter_mmap_ctx(ctx, mmap_event);
3435 rcu_read_unlock();
3436
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003437 kfree(buf);
3438}
3439
Peter Zijlstra089dd792009-06-05 14:04:55 +02003440void __perf_counter_mmap(struct vm_area_struct *vma)
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003441{
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003442 struct perf_mmap_event mmap_event;
3443
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003444 if (!atomic_read(&nr_mmap_counters))
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003445 return;
3446
3447 mmap_event = (struct perf_mmap_event){
Peter Zijlstra089dd792009-06-05 14:04:55 +02003448 .vma = vma,
Peter Zijlstra573402d2009-07-22 11:13:50 +02003449 /* .file_name */
3450 /* .file_size */
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003451 .event = {
Peter Zijlstra573402d2009-07-22 11:13:50 +02003452 .header = {
3453 .type = PERF_EVENT_MMAP,
3454 .misc = 0,
3455 /* .size */
3456 },
3457 /* .pid */
3458 /* .tid */
Peter Zijlstra089dd792009-06-05 14:04:55 +02003459 .start = vma->vm_start,
3460 .len = vma->vm_end - vma->vm_start,
3461 .pgoff = vma->vm_pgoff,
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003462 },
3463 };
3464
3465 perf_counter_mmap_event(&mmap_event);
3466}
3467
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003468/*
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003469 * IRQ throttle logging
3470 */
3471
3472static void perf_log_throttle(struct perf_counter *counter, int enable)
3473{
3474 struct perf_output_handle handle;
3475 int ret;
3476
3477 struct {
3478 struct perf_event_header header;
3479 u64 time;
Peter Zijlstracca3f452009-06-11 14:57:55 +02003480 u64 id;
Peter Zijlstra7f453c22009-07-21 13:19:40 +02003481 u64 stream_id;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003482 } throttle_event = {
3483 .header = {
Anton Blanchard966ee4d2009-07-22 23:05:46 +10003484 .type = PERF_EVENT_THROTTLE,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003485 .misc = 0,
3486 .size = sizeof(throttle_event),
3487 },
Peter Zijlstradef0a9b2009-09-18 20:14:01 +02003488 .time = perf_clock(),
Peter Zijlstra7f453c22009-07-21 13:19:40 +02003489 .id = primary_counter_id(counter),
3490 .stream_id = counter->id,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003491 };
3492
Anton Blanchard966ee4d2009-07-22 23:05:46 +10003493 if (enable)
3494 throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
3495
Ingo Molnar0127c3e2009-05-25 22:03:26 +02003496 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003497 if (ret)
3498 return;
3499
3500 perf_output_put(&handle, throttle_event);
3501 perf_output_end(&handle);
3502}
3503
3504/*
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01003505 * Generic counter overflow handling, sampling.
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003506 */
3507
Peter Zijlstra850bc732009-09-17 18:47:11 +02003508static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
Markus Metzger5622f292009-09-15 13:00:23 +02003509 int throttle, struct perf_sample_data *data,
3510 struct pt_regs *regs)
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003511{
Peter Zijlstra79f14642009-04-06 11:45:07 +02003512 int events = atomic_read(&counter->event_limit);
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003513 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra79f14642009-04-06 11:45:07 +02003514 int ret = 0;
3515
Peter Zijlstra850bc732009-09-17 18:47:11 +02003516 throttle = (throttle && counter->pmu->unthrottle != NULL);
3517
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003518 if (!throttle) {
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003519 hwc->interrupts++;
Ingo Molnar128f0482009-06-03 22:19:36 +02003520 } else {
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003521 if (hwc->interrupts != MAX_INTERRUPTS) {
3522 hwc->interrupts++;
Peter Zijlstradf58ab22009-06-11 11:25:05 +02003523 if (HZ * hwc->interrupts >
3524 (u64)sysctl_perf_counter_sample_rate) {
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003525 hwc->interrupts = MAX_INTERRUPTS;
Ingo Molnar128f0482009-06-03 22:19:36 +02003526 perf_log_throttle(counter, 0);
3527 ret = 1;
3528 }
3529 } else {
3530 /*
3531 * Keep re-disabling counters even though on the previous
3532 * pass we disabled it - just in case we raced with a
3533 * sched-in and the counter got enabled again:
3534 */
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003535 ret = 1;
3536 }
3537 }
Peter Zijlstra60db5e02009-05-15 15:19:28 +02003538
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003539 if (counter->attr.freq) {
Peter Zijlstradef0a9b2009-09-18 20:14:01 +02003540 u64 now = perf_clock();
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003541 s64 delta = now - hwc->freq_stamp;
3542
3543 hwc->freq_stamp = now;
3544
3545 if (delta > 0 && delta < TICK_NSEC)
3546 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
3547 }
3548
Peter Zijlstra2023b352009-05-05 17:50:26 +02003549 /*
3550 * XXX event_limit might not quite work as expected on inherited
3551 * counters
3552 */
3553
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02003554 counter->pending_kill = POLL_IN;
Peter Zijlstra79f14642009-04-06 11:45:07 +02003555 if (events && atomic_dec_and_test(&counter->event_limit)) {
3556 ret = 1;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02003557 counter->pending_kill = POLL_HUP;
Peter Zijlstra79f14642009-04-06 11:45:07 +02003558 if (nmi) {
3559 counter->pending_disable = 1;
3560 perf_pending_queue(&counter->pending,
3561 perf_pending_counter);
3562 } else
3563 perf_counter_disable(counter);
3564 }
3565
Markus Metzger5622f292009-09-15 13:00:23 +02003566 perf_counter_output(counter, nmi, data, regs);
Peter Zijlstra79f14642009-04-06 11:45:07 +02003567 return ret;
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003568}
3569
Peter Zijlstra850bc732009-09-17 18:47:11 +02003570int perf_counter_overflow(struct perf_counter *counter, int nmi,
Markus Metzger5622f292009-09-15 13:00:23 +02003571 struct perf_sample_data *data,
3572 struct pt_regs *regs)
Peter Zijlstra850bc732009-09-17 18:47:11 +02003573{
Markus Metzger5622f292009-09-15 13:00:23 +02003574 return __perf_counter_overflow(counter, nmi, 1, data, regs);
Peter Zijlstra850bc732009-09-17 18:47:11 +02003575}
3576
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003577/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003578 * Generic software counter infrastructure
3579 */
3580
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003581/*
3582 * We directly increment counter->count and keep a second value in
3583 * counter->hw.period_left to count intervals. This period counter
3584 * is kept in the range [-sample_period, 0] so that we can use the
3585 * sign as trigger.
3586 */
3587
3588static u64 perf_swcounter_set_period(struct perf_counter *counter)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003589{
3590 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003591 u64 period = hwc->last_period;
3592 u64 nr, offset;
3593 s64 old, val;
3594
3595 hwc->last_period = hwc->sample_period;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003596
3597again:
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003598 old = val = atomic64_read(&hwc->period_left);
3599 if (val < 0)
3600 return 0;
3601
3602 nr = div64_u64(period + val, period);
3603 offset = nr * period;
3604 val -= offset;
3605 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003606 goto again;
3607
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003608 return nr;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003609}
3610
3611static void perf_swcounter_overflow(struct perf_counter *counter,
Markus Metzger5622f292009-09-15 13:00:23 +02003612 int nmi, struct perf_sample_data *data,
3613 struct pt_regs *regs)
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003614{
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003615 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra850bc732009-09-17 18:47:11 +02003616 int throttle = 0;
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003617 u64 overflow;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02003618
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003619 data->period = counter->hw.last_period;
3620 overflow = perf_swcounter_set_period(counter);
3621
3622 if (hwc->interrupts == MAX_INTERRUPTS)
3623 return;
3624
3625 for (; overflow; overflow--) {
Markus Metzger5622f292009-09-15 13:00:23 +02003626 if (__perf_counter_overflow(counter, nmi, throttle,
3627 data, regs)) {
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003628 /*
3629 * We inhibit the overflow from happening when
3630 * hwc->interrupts == MAX_INTERRUPTS.
3631 */
3632 break;
3633 }
Peter Zijlstracf450a72009-09-18 12:18:14 +02003634 throttle = 1;
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003635 }
3636}
3637
3638static void perf_swcounter_unthrottle(struct perf_counter *counter)
3639{
3640 /*
3641 * Nothing to do, we already reset hwc->interrupts.
3642 */
3643}
3644
3645static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
Markus Metzger5622f292009-09-15 13:00:23 +02003646 int nmi, struct perf_sample_data *data,
3647 struct pt_regs *regs)
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003648{
3649 struct hw_perf_counter *hwc = &counter->hw;
3650
3651 atomic64_add(nr, &counter->count);
3652
3653 if (!hwc->sample_period)
3654 return;
3655
Markus Metzger5622f292009-09-15 13:00:23 +02003656 if (!regs)
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003657 return;
3658
3659 if (!atomic64_add_negative(nr, &hwc->period_left))
Markus Metzger5622f292009-09-15 13:00:23 +02003660 perf_swcounter_overflow(counter, nmi, data, regs);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003661}
3662
Paul Mackerras880ca152009-06-01 17:49:14 +10003663static int perf_swcounter_is_counting(struct perf_counter *counter)
3664{
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003665 /*
3666 * The counter is active, we're good!
3667 */
Paul Mackerras880ca152009-06-01 17:49:14 +10003668 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3669 return 1;
3670
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003671 /*
3672 * The counter is off/error, not counting.
3673 */
Paul Mackerras880ca152009-06-01 17:49:14 +10003674 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3675 return 0;
3676
3677 /*
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003678 * The counter is inactive, if the context is active
3679 * we're part of a group that didn't make it on the 'pmu',
3680 * not counting.
Paul Mackerras880ca152009-06-01 17:49:14 +10003681 */
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003682 if (counter->ctx->is_active)
3683 return 0;
3684
3685 /*
3686 * We're inactive and the context is too, this means the
3687 * task is scheduled out, we're counting events that happen
3688 * to us, like migration events.
3689 */
3690 return 1;
Paul Mackerras880ca152009-06-01 17:49:14 +10003691}
3692
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003693static int perf_swcounter_match(struct perf_counter *counter,
Peter Zijlstra1c432d82009-06-11 13:19:29 +02003694 enum perf_type_id type,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003695 u32 event, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003696{
Paul Mackerras880ca152009-06-01 17:49:14 +10003697 if (!perf_swcounter_is_counting(counter))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003698 return 0;
3699
Ingo Molnara21ca2c2009-06-06 09:58:57 +02003700 if (counter->attr.type != type)
3701 return 0;
3702 if (counter->attr.config != event)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003703 return 0;
3704
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003705 if (regs) {
Peter Zijlstra0d486962009-06-02 19:22:16 +02003706 if (counter->attr.exclude_user && user_mode(regs))
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003707 return 0;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003708
Peter Zijlstra0d486962009-06-02 19:22:16 +02003709 if (counter->attr.exclude_kernel && !user_mode(regs))
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003710 return 0;
3711 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003712
3713 return 1;
3714}
3715
3716static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003717 enum perf_type_id type,
3718 u32 event, u64 nr, int nmi,
Markus Metzger5622f292009-09-15 13:00:23 +02003719 struct perf_sample_data *data,
3720 struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003721{
3722 struct perf_counter *counter;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003723
Peter Zijlstra01ef09d2009-03-19 20:26:11 +01003724 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003725 return;
3726
Peter Zijlstra592903c2009-03-13 12:21:36 +01003727 rcu_read_lock();
3728 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Markus Metzger5622f292009-09-15 13:00:23 +02003729 if (perf_swcounter_match(counter, type, event, regs))
3730 perf_swcounter_add(counter, nr, nmi, data, regs);
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003731 }
Peter Zijlstra592903c2009-03-13 12:21:36 +01003732 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003733}
3734
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003735static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3736{
3737 if (in_nmi())
3738 return &cpuctx->recursion[3];
3739
3740 if (in_irq())
3741 return &cpuctx->recursion[2];
3742
3743 if (in_softirq())
3744 return &cpuctx->recursion[1];
3745
3746 return &cpuctx->recursion[0];
3747}
3748
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003749static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3750 u64 nr, int nmi,
Markus Metzger5622f292009-09-15 13:00:23 +02003751 struct perf_sample_data *data,
3752 struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003753{
3754 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003755 int *recursion = perf_swcounter_recursion_context(cpuctx);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003756 struct perf_counter_context *ctx;
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003757
3758 if (*recursion)
3759 goto out;
3760
3761 (*recursion)++;
3762 barrier();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003763
Peter Zijlstra78f13e92009-04-08 15:01:33 +02003764 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
Markus Metzger5622f292009-09-15 13:00:23 +02003765 nr, nmi, data, regs);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003766 rcu_read_lock();
3767 /*
3768 * doesn't really matter which of the child contexts the
3769 * events ends up in.
3770 */
3771 ctx = rcu_dereference(current->perf_counter_ctxp);
3772 if (ctx)
Markus Metzger5622f292009-09-15 13:00:23 +02003773 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003774 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003775
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003776 barrier();
3777 (*recursion)--;
3778
3779out:
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003780 put_cpu_var(perf_cpu_context);
3781}
3782
Peter Zijlstraf29ac752009-06-19 18:27:26 +02003783void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3784 struct pt_regs *regs, u64 addr)
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003785{
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003786 struct perf_sample_data data = {
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003787 .addr = addr,
3788 };
3789
Markus Metzger5622f292009-09-15 13:00:23 +02003790 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi,
3791 &data, regs);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003792}
3793
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003794static void perf_swcounter_read(struct perf_counter *counter)
3795{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003796}
3797
3798static int perf_swcounter_enable(struct perf_counter *counter)
3799{
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003800 struct hw_perf_counter *hwc = &counter->hw;
3801
3802 if (hwc->sample_period) {
3803 hwc->last_period = hwc->sample_period;
3804 perf_swcounter_set_period(counter);
3805 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003806 return 0;
3807}
3808
3809static void perf_swcounter_disable(struct perf_counter *counter)
3810{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003811}
3812
Robert Richter4aeb0b42009-04-29 12:47:03 +02003813static const struct pmu perf_ops_generic = {
Peter Zijlstraac17dc82009-03-13 12:21:34 +01003814 .enable = perf_swcounter_enable,
3815 .disable = perf_swcounter_disable,
3816 .read = perf_swcounter_read,
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003817 .unthrottle = perf_swcounter_unthrottle,
Peter Zijlstraac17dc82009-03-13 12:21:34 +01003818};
3819
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003820/*
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003821 * hrtimer based swcounter callback
3822 */
3823
3824static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3825{
3826 enum hrtimer_restart ret = HRTIMER_RESTART;
3827 struct perf_sample_data data;
Markus Metzger5622f292009-09-15 13:00:23 +02003828 struct pt_regs *regs;
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003829 struct perf_counter *counter;
3830 u64 period;
3831
3832 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3833 counter->pmu->read(counter);
3834
3835 data.addr = 0;
Markus Metzger5622f292009-09-15 13:00:23 +02003836 regs = get_irq_regs();
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003837 /*
3838 * In case we exclude kernel IPs or are somehow not in interrupt
3839 * context, provide the next best thing, the user IP.
3840 */
Markus Metzger5622f292009-09-15 13:00:23 +02003841 if ((counter->attr.exclude_kernel || !regs) &&
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003842 !counter->attr.exclude_user)
Markus Metzger5622f292009-09-15 13:00:23 +02003843 regs = task_pt_regs(current);
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003844
Markus Metzger5622f292009-09-15 13:00:23 +02003845 if (regs) {
3846 if (perf_counter_overflow(counter, 0, &data, regs))
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003847 ret = HRTIMER_NORESTART;
3848 }
3849
3850 period = max_t(u64, 10000, counter->hw.sample_period);
3851 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3852
3853 return ret;
3854}
3855
3856/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003857 * Software counter: cpu wall time clock
3858 */
3859
Paul Mackerras9abf8a02009-01-09 16:26:43 +11003860static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3861{
3862 int cpu = raw_smp_processor_id();
3863 s64 prev;
3864 u64 now;
3865
3866 now = cpu_clock(cpu);
3867 prev = atomic64_read(&counter->hw.prev_count);
3868 atomic64_set(&counter->hw.prev_count, now);
3869 atomic64_add(now - prev, &counter->count);
3870}
3871
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003872static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3873{
3874 struct hw_perf_counter *hwc = &counter->hw;
3875 int cpu = raw_smp_processor_id();
3876
3877 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
Peter Zijlstra039fc912009-03-13 16:43:47 +01003878 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3879 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003880 if (hwc->sample_period) {
3881 u64 period = max_t(u64, 10000, hwc->sample_period);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003882 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstra60db5e02009-05-15 15:19:28 +02003883 ns_to_ktime(period), 0,
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003884 HRTIMER_MODE_REL, 0);
3885 }
3886
3887 return 0;
3888}
3889
Ingo Molnar5c92d122008-12-11 13:21:10 +01003890static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3891{
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003892 if (counter->hw.sample_period)
Peter Zijlstrab986d7e2009-05-20 12:21:21 +02003893 hrtimer_cancel(&counter->hw.hrtimer);
Paul Mackerras9abf8a02009-01-09 16:26:43 +11003894 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01003895}
3896
3897static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3898{
Paul Mackerras9abf8a02009-01-09 16:26:43 +11003899 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01003900}
3901
Robert Richter4aeb0b42009-04-29 12:47:03 +02003902static const struct pmu perf_ops_cpu_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01003903 .enable = cpu_clock_perf_counter_enable,
3904 .disable = cpu_clock_perf_counter_disable,
3905 .read = cpu_clock_perf_counter_read,
Ingo Molnar5c92d122008-12-11 13:21:10 +01003906};
3907
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01003908/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003909 * Software counter: task time clock
3910 */
3911
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003912static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
Ingo Molnarbae43c92008-12-11 14:03:20 +01003913{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003914 u64 prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003915 s64 delta;
Ingo Molnarbae43c92008-12-11 14:03:20 +01003916
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02003917 prev = atomic64_xchg(&counter->hw.prev_count, now);
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003918 delta = now - prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003919 atomic64_add(delta, &counter->count);
Ingo Molnarbae43c92008-12-11 14:03:20 +01003920}
3921
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01003922static int task_clock_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003923{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003924 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02003925 u64 now;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003926
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02003927 now = counter->ctx->time;
3928
3929 atomic64_set(&hwc->prev_count, now);
Peter Zijlstra039fc912009-03-13 16:43:47 +01003930 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3931 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003932 if (hwc->sample_period) {
3933 u64 period = max_t(u64, 10000, hwc->sample_period);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003934 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstra60db5e02009-05-15 15:19:28 +02003935 ns_to_ktime(period), 0,
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003936 HRTIMER_MODE_REL, 0);
3937 }
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01003938
3939 return 0;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003940}
3941
3942static void task_clock_perf_counter_disable(struct perf_counter *counter)
3943{
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003944 if (counter->hw.sample_period)
Peter Zijlstrab986d7e2009-05-20 12:21:21 +02003945 hrtimer_cancel(&counter->hw.hrtimer);
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003946 task_clock_perf_counter_update(counter, counter->ctx->time);
3947
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003948}
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01003949
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003950static void task_clock_perf_counter_read(struct perf_counter *counter)
3951{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003952 u64 time;
3953
3954 if (!in_nmi()) {
3955 update_context_time(counter->ctx);
3956 time = counter->ctx->time;
3957 } else {
3958 u64 now = perf_clock();
3959 u64 delta = now - counter->ctx->timestamp;
3960 time = counter->ctx->time + delta;
3961 }
3962
3963 task_clock_perf_counter_update(counter, time);
Ingo Molnarbae43c92008-12-11 14:03:20 +01003964}
3965
Robert Richter4aeb0b42009-04-29 12:47:03 +02003966static const struct pmu perf_ops_task_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01003967 .enable = task_clock_perf_counter_enable,
3968 .disable = task_clock_perf_counter_disable,
3969 .read = task_clock_perf_counter_read,
Ingo Molnarbae43c92008-12-11 14:03:20 +01003970};
3971
Peter Zijlstrae077df42009-03-19 20:26:17 +01003972#ifdef CONFIG_EVENT_PROFILE
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003973void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3974 int entry_size)
Peter Zijlstrae077df42009-03-19 20:26:17 +01003975{
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003976 struct perf_raw_record raw = {
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003977 .size = entry_size,
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003978 .data = record,
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003979 };
3980
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003981 struct perf_sample_data data = {
Peter Zijlstra3a659302009-07-21 17:34:57 +02003982 .addr = addr,
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003983 .raw = &raw,
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003984 };
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003985
Markus Metzger5622f292009-09-15 13:00:23 +02003986 struct pt_regs *regs = get_irq_regs();
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003987
Markus Metzger5622f292009-09-15 13:00:23 +02003988 if (!regs)
3989 regs = task_pt_regs(current);
3990
3991 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
3992 &data, regs);
Peter Zijlstrae077df42009-03-19 20:26:17 +01003993}
Steven Whitehouseff7b1b42009-04-15 16:55:05 +01003994EXPORT_SYMBOL_GPL(perf_tpcounter_event);
Peter Zijlstrae077df42009-03-19 20:26:17 +01003995
3996extern int ftrace_profile_enable(int);
3997extern void ftrace_profile_disable(int);
3998
3999static void tp_perf_counter_destroy(struct perf_counter *counter)
4000{
Chris Wilsond4d7d0b2009-07-06 09:31:33 +01004001 ftrace_profile_disable(counter->attr.config);
Peter Zijlstrae077df42009-03-19 20:26:17 +01004002}
4003
Robert Richter4aeb0b42009-04-29 12:47:03 +02004004static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
Peter Zijlstrae077df42009-03-19 20:26:17 +01004005{
Peter Zijlstraa4e95fc2009-08-10 11:20:12 +02004006 /*
4007 * Raw tracepoint data is a severe data leak, only allow root to
4008 * have these.
4009 */
4010 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
Ingo Molnar0fbdea12009-09-02 21:46:00 +02004011 perf_paranoid_tracepoint_raw() &&
Peter Zijlstraa4e95fc2009-08-10 11:20:12 +02004012 !capable(CAP_SYS_ADMIN))
4013 return ERR_PTR(-EPERM);
4014
Chris Wilsond4d7d0b2009-07-06 09:31:33 +01004015 if (ftrace_profile_enable(counter->attr.config))
Peter Zijlstrae077df42009-03-19 20:26:17 +01004016 return NULL;
4017
4018 counter->destroy = tp_perf_counter_destroy;
4019
4020 return &perf_ops_generic;
4021}
4022#else
Robert Richter4aeb0b42009-04-29 12:47:03 +02004023static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
Peter Zijlstrae077df42009-03-19 20:26:17 +01004024{
4025 return NULL;
4026}
4027#endif
4028
Peter Zijlstraf29ac752009-06-19 18:27:26 +02004029atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
4030
4031static void sw_perf_counter_destroy(struct perf_counter *counter)
4032{
4033 u64 event = counter->attr.config;
4034
Peter Zijlstraf3440112009-06-22 13:58:35 +02004035 WARN_ON(counter->parent);
4036
Peter Zijlstraf29ac752009-06-19 18:27:26 +02004037 atomic_dec(&perf_swcounter_enabled[event]);
4038}
4039
Robert Richter4aeb0b42009-04-29 12:47:03 +02004040static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar5c92d122008-12-11 13:21:10 +01004041{
Robert Richter4aeb0b42009-04-29 12:47:03 +02004042 const struct pmu *pmu = NULL;
Peter Zijlstraf29ac752009-06-19 18:27:26 +02004043 u64 event = counter->attr.config;
Ingo Molnar5c92d122008-12-11 13:21:10 +01004044
Paul Mackerras0475f9e2009-02-11 14:35:35 +11004045 /*
4046 * Software counters (currently) can't in general distinguish
4047 * between user, kernel and hypervisor events.
4048 * However, context switches and cpu migrations are considered
4049 * to be kernel events, and page faults are never hypervisor
4050 * events.
4051 */
Peter Zijlstraf29ac752009-06-19 18:27:26 +02004052 switch (event) {
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02004053 case PERF_COUNT_SW_CPU_CLOCK:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004054 pmu = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01004055
Ingo Molnar5c92d122008-12-11 13:21:10 +01004056 break;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02004057 case PERF_COUNT_SW_TASK_CLOCK:
Paul Mackerras23a185c2009-02-09 22:42:47 +11004058 /*
4059 * If the user instantiates this as a per-cpu counter,
4060 * use the cpu_clock counter instead.
4061 */
4062 if (counter->ctx->task)
Robert Richter4aeb0b42009-04-29 12:47:03 +02004063 pmu = &perf_ops_task_clock;
Paul Mackerras23a185c2009-02-09 22:42:47 +11004064 else
Robert Richter4aeb0b42009-04-29 12:47:03 +02004065 pmu = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01004066
Ingo Molnarbae43c92008-12-11 14:03:20 +01004067 break;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02004068 case PERF_COUNT_SW_PAGE_FAULTS:
4069 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
4070 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4071 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4072 case PERF_COUNT_SW_CPU_MIGRATIONS:
Peter Zijlstraf3440112009-06-22 13:58:35 +02004073 if (!counter->parent) {
4074 atomic_inc(&perf_swcounter_enabled[event]);
4075 counter->destroy = sw_perf_counter_destroy;
4076 }
Paul Mackerras3f731ca2009-06-01 17:52:30 +10004077 pmu = &perf_ops_generic;
Ingo Molnar6c594c22008-12-14 12:34:15 +01004078 break;
Ingo Molnar5c92d122008-12-11 13:21:10 +01004079 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01004080
Robert Richter4aeb0b42009-04-29 12:47:03 +02004081 return pmu;
Ingo Molnar5c92d122008-12-11 13:21:10 +01004082}
4083
Thomas Gleixner0793a612008-12-04 20:12:29 +01004084/*
4085 * Allocate and initialize a counter structure
4086 */
4087static struct perf_counter *
Peter Zijlstra0d486962009-06-02 19:22:16 +02004088perf_counter_alloc(struct perf_counter_attr *attr,
Ingo Molnar04289bb2008-12-11 08:38:42 +01004089 int cpu,
Paul Mackerras23a185c2009-02-09 22:42:47 +11004090 struct perf_counter_context *ctx,
Ingo Molnar9b51f662008-12-12 13:49:45 +01004091 struct perf_counter *group_leader,
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004092 struct perf_counter *parent_counter,
Ingo Molnar9b51f662008-12-12 13:49:45 +01004093 gfp_t gfpflags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004094{
Robert Richter4aeb0b42009-04-29 12:47:03 +02004095 const struct pmu *pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01004096 struct perf_counter *counter;
Peter Zijlstra60db5e02009-05-15 15:19:28 +02004097 struct hw_perf_counter *hwc;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004098 long err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004099
Ingo Molnar9b51f662008-12-12 13:49:45 +01004100 counter = kzalloc(sizeof(*counter), gfpflags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004101 if (!counter)
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004102 return ERR_PTR(-ENOMEM);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004103
Ingo Molnar04289bb2008-12-11 08:38:42 +01004104 /*
4105 * Single counters are their own group leaders, with an
4106 * empty sibling list:
4107 */
4108 if (!group_leader)
4109 group_leader = counter;
4110
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004111 mutex_init(&counter->child_mutex);
4112 INIT_LIST_HEAD(&counter->child_list);
4113
Ingo Molnar04289bb2008-12-11 08:38:42 +01004114 INIT_LIST_HEAD(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +01004115 INIT_LIST_HEAD(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004116 INIT_LIST_HEAD(&counter->sibling_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004117 init_waitqueue_head(&counter->waitq);
4118
Peter Zijlstra7b732a72009-03-23 18:22:10 +01004119 mutex_init(&counter->mmap_mutex);
4120
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004121 counter->cpu = cpu;
Peter Zijlstra0d486962009-06-02 19:22:16 +02004122 counter->attr = *attr;
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004123 counter->group_leader = group_leader;
4124 counter->pmu = NULL;
4125 counter->ctx = ctx;
4126 counter->oncpu = -1;
Ingo Molnar329d8762009-05-26 08:10:00 +02004127
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004128 counter->parent = parent_counter;
4129
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004130 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
4131 counter->id = atomic64_inc_return(&perf_counter_id);
4132
4133 counter->state = PERF_COUNTER_STATE_INACTIVE;
4134
Peter Zijlstra0d486962009-06-02 19:22:16 +02004135 if (attr->disabled)
Ingo Molnara86ed502008-12-17 00:43:10 +01004136 counter->state = PERF_COUNTER_STATE_OFF;
4137
Robert Richter4aeb0b42009-04-29 12:47:03 +02004138 pmu = NULL;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004139
Peter Zijlstra60db5e02009-05-15 15:19:28 +02004140 hwc = &counter->hw;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004141 hwc->sample_period = attr->sample_period;
Peter Zijlstra0d486962009-06-02 19:22:16 +02004142 if (attr->freq && attr->sample_freq)
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004143 hwc->sample_period = 1;
Peter Zijlstraeced1df2009-08-28 17:10:47 +02004144 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004145
4146 atomic64_set(&hwc->period_left, hwc->sample_period);
Peter Zijlstra60db5e02009-05-15 15:19:28 +02004147
Peter Zijlstra2023b352009-05-05 17:50:26 +02004148 /*
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02004149 * we currently do not support PERF_FORMAT_GROUP on inherited counters
Peter Zijlstra2023b352009-05-05 17:50:26 +02004150 */
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02004151 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
Peter Zijlstra2023b352009-05-05 17:50:26 +02004152 goto done;
4153
Ingo Molnara21ca2c2009-06-06 09:58:57 +02004154 switch (attr->type) {
Peter Zijlstra081fad82009-06-11 17:57:21 +02004155 case PERF_TYPE_RAW:
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004156 case PERF_TYPE_HARDWARE:
Ingo Molnar8326f442009-06-05 20:22:46 +02004157 case PERF_TYPE_HW_CACHE:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004158 pmu = hw_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004159 break;
4160
4161 case PERF_TYPE_SOFTWARE:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004162 pmu = sw_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004163 break;
4164
4165 case PERF_TYPE_TRACEPOINT:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004166 pmu = tp_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004167 break;
Peter Zijlstra974802e2009-06-12 12:46:55 +02004168
4169 default:
4170 break;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004171 }
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01004172done:
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004173 err = 0;
Robert Richter4aeb0b42009-04-29 12:47:03 +02004174 if (!pmu)
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004175 err = -EINVAL;
Robert Richter4aeb0b42009-04-29 12:47:03 +02004176 else if (IS_ERR(pmu))
4177 err = PTR_ERR(pmu);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004178
4179 if (err) {
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004180 if (counter->ns)
4181 put_pid_ns(counter->ns);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004182 kfree(counter);
4183 return ERR_PTR(err);
4184 }
4185
Robert Richter4aeb0b42009-04-29 12:47:03 +02004186 counter->pmu = pmu;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004187
Peter Zijlstraf3440112009-06-22 13:58:35 +02004188 if (!counter->parent) {
4189 atomic_inc(&nr_counters);
4190 if (counter->attr.mmap)
4191 atomic_inc(&nr_mmap_counters);
4192 if (counter->attr.comm)
4193 atomic_inc(&nr_comm_counters);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004194 if (counter->attr.task)
4195 atomic_inc(&nr_task_counters);
Peter Zijlstraf3440112009-06-22 13:58:35 +02004196 }
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02004197
Thomas Gleixner0793a612008-12-04 20:12:29 +01004198 return counter;
4199}
4200
Peter Zijlstra974802e2009-06-12 12:46:55 +02004201static int perf_copy_attr(struct perf_counter_attr __user *uattr,
4202 struct perf_counter_attr *attr)
4203{
4204 int ret;
4205 u32 size;
4206
4207 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4208 return -EFAULT;
4209
4210 /*
4211 * zero the full structure, so that a short copy will be nice.
4212 */
4213 memset(attr, 0, sizeof(*attr));
4214
4215 ret = get_user(size, &uattr->size);
4216 if (ret)
4217 return ret;
4218
4219 if (size > PAGE_SIZE) /* silly large */
4220 goto err_size;
4221
4222 if (!size) /* abi compat */
4223 size = PERF_ATTR_SIZE_VER0;
4224
4225 if (size < PERF_ATTR_SIZE_VER0)
4226 goto err_size;
4227
4228 /*
4229 * If we're handed a bigger struct than we know of,
4230 * ensure all the unknown bits are 0.
4231 */
4232 if (size > sizeof(*attr)) {
4233 unsigned long val;
4234 unsigned long __user *addr;
4235 unsigned long __user *end;
4236
4237 addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
4238 sizeof(unsigned long));
4239 end = PTR_ALIGN((void __user *)uattr + size,
4240 sizeof(unsigned long));
4241
4242 for (; addr < end; addr += sizeof(unsigned long)) {
4243 ret = get_user(val, addr);
4244 if (ret)
4245 return ret;
4246 if (val)
4247 goto err_size;
4248 }
4249 }
4250
4251 ret = copy_from_user(attr, uattr, size);
4252 if (ret)
4253 return -EFAULT;
4254
4255 /*
4256 * If the type exists, the corresponding creation will verify
4257 * the attr->config.
4258 */
4259 if (attr->type >= PERF_TYPE_MAX)
4260 return -EINVAL;
4261
4262 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
4263 return -EINVAL;
4264
4265 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4266 return -EINVAL;
4267
4268 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4269 return -EINVAL;
4270
4271out:
4272 return ret;
4273
4274err_size:
4275 put_user(sizeof(*attr), &uattr->size);
4276 ret = -E2BIG;
4277 goto out;
4278}
4279
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004280int perf_counter_set_output(struct perf_counter *counter, int output_fd)
4281{
4282 struct perf_counter *output_counter = NULL;
4283 struct file *output_file = NULL;
4284 struct perf_counter *old_output;
4285 int fput_needed = 0;
4286 int ret = -EINVAL;
4287
4288 if (!output_fd)
4289 goto set;
4290
4291 output_file = fget_light(output_fd, &fput_needed);
4292 if (!output_file)
4293 return -EBADF;
4294
4295 if (output_file->f_op != &perf_fops)
4296 goto out;
4297
4298 output_counter = output_file->private_data;
4299
4300 /* Don't chain output fds */
4301 if (output_counter->output)
4302 goto out;
4303
4304 /* Don't set an output fd when we already have an output channel */
4305 if (counter->data)
4306 goto out;
4307
4308 atomic_long_inc(&output_file->f_count);
4309
4310set:
4311 mutex_lock(&counter->mmap_mutex);
4312 old_output = counter->output;
4313 rcu_assign_pointer(counter->output, output_counter);
4314 mutex_unlock(&counter->mmap_mutex);
4315
4316 if (old_output) {
4317 /*
4318 * we need to make sure no existing perf_output_*()
4319 * is still referencing this counter.
4320 */
4321 synchronize_rcu();
4322 fput(old_output->filp);
4323 }
4324
4325 ret = 0;
4326out:
4327 fput_light(output_file, fput_needed);
4328 return ret;
4329}
4330
Thomas Gleixner0793a612008-12-04 20:12:29 +01004331/**
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004332 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
Ingo Molnar9f66a382008-12-10 12:33:23 +01004333 *
Peter Zijlstra0d486962009-06-02 19:22:16 +02004334 * @attr_uptr: event type attributes for monitoring/sampling
Thomas Gleixner0793a612008-12-04 20:12:29 +01004335 * @pid: target pid
Ingo Molnar9f66a382008-12-10 12:33:23 +01004336 * @cpu: target cpu
4337 * @group_fd: group leader counter fd
Thomas Gleixner0793a612008-12-04 20:12:29 +01004338 */
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004339SYSCALL_DEFINE5(perf_counter_open,
Peter Zijlstra974802e2009-06-12 12:46:55 +02004340 struct perf_counter_attr __user *, attr_uptr,
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004341 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004342{
Ingo Molnar04289bb2008-12-11 08:38:42 +01004343 struct perf_counter *counter, *group_leader;
Peter Zijlstra0d486962009-06-02 19:22:16 +02004344 struct perf_counter_attr attr;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004345 struct perf_counter_context *ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004346 struct file *counter_file = NULL;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004347 struct file *group_file = NULL;
4348 int fput_needed = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004349 int fput_needed2 = 0;
Ingo Molnardc86cab2009-09-03 18:03:00 +02004350 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004351
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004352 /* for future expandability... */
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004353 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004354 return -EINVAL;
4355
Ingo Molnardc86cab2009-09-03 18:03:00 +02004356 err = perf_copy_attr(attr_uptr, &attr);
4357 if (err)
4358 return err;
Thomas Gleixnereab656a2008-12-08 19:26:59 +01004359
Peter Zijlstra07647712009-06-11 11:18:36 +02004360 if (!attr.exclude_kernel) {
4361 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4362 return -EACCES;
4363 }
4364
Peter Zijlstradf58ab22009-06-11 11:25:05 +02004365 if (attr.freq) {
4366 if (attr.sample_freq > sysctl_perf_counter_sample_rate)
4367 return -EINVAL;
4368 }
4369
Ingo Molnar04289bb2008-12-11 08:38:42 +01004370 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01004371 * Get the target context (task or percpu):
4372 */
4373 ctx = find_get_context(pid, cpu);
4374 if (IS_ERR(ctx))
4375 return PTR_ERR(ctx);
4376
4377 /*
4378 * Look up the group leader (we will attach this counter to it):
Ingo Molnar04289bb2008-12-11 08:38:42 +01004379 */
4380 group_leader = NULL;
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004381 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
Ingo Molnardc86cab2009-09-03 18:03:00 +02004382 err = -EINVAL;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004383 group_file = fget_light(group_fd, &fput_needed);
4384 if (!group_file)
Ingo Molnarccff2862008-12-11 11:26:29 +01004385 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004386 if (group_file->f_op != &perf_fops)
Ingo Molnarccff2862008-12-11 11:26:29 +01004387 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004388
4389 group_leader = group_file->private_data;
4390 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01004391 * Do not allow a recursive hierarchy (this new sibling
4392 * becoming part of another group-sibling):
Ingo Molnar04289bb2008-12-11 08:38:42 +01004393 */
Ingo Molnarccff2862008-12-11 11:26:29 +01004394 if (group_leader->group_leader != group_leader)
4395 goto err_put_context;
4396 /*
4397 * Do not allow to attach to a group in a different
4398 * task or CPU context:
4399 */
4400 if (group_leader->ctx != ctx)
4401 goto err_put_context;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11004402 /*
4403 * Only a group leader can be exclusive or pinned
4404 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02004405 if (attr.exclusive || attr.pinned)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11004406 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004407 }
4408
Peter Zijlstra0d486962009-06-02 19:22:16 +02004409 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004410 NULL, GFP_KERNEL);
Ingo Molnardc86cab2009-09-03 18:03:00 +02004411 err = PTR_ERR(counter);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004412 if (IS_ERR(counter))
Thomas Gleixner0793a612008-12-04 20:12:29 +01004413 goto err_put_context;
4414
Ingo Molnardc86cab2009-09-03 18:03:00 +02004415 err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
4416 if (err < 0)
Ingo Molnar9b51f662008-12-12 13:49:45 +01004417 goto err_free_put_context;
4418
Ingo Molnardc86cab2009-09-03 18:03:00 +02004419 counter_file = fget_light(err, &fput_needed2);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004420 if (!counter_file)
4421 goto err_free_put_context;
4422
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004423 if (flags & PERF_FLAG_FD_OUTPUT) {
Ingo Molnardc86cab2009-09-03 18:03:00 +02004424 err = perf_counter_set_output(counter, group_fd);
4425 if (err)
4426 goto err_fput_free_put_context;
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004427 }
4428
Ingo Molnar9b51f662008-12-12 13:49:45 +01004429 counter->filp = counter_file;
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004430 WARN_ON_ONCE(ctx->parent_ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004431 mutex_lock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004432 perf_install_in_context(ctx, counter, cpu);
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004433 ++ctx->generation;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004434 mutex_unlock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004435
Peter Zijlstra082ff5a2009-05-23 18:29:00 +02004436 counter->owner = current;
4437 get_task_struct(current);
4438 mutex_lock(&current->perf_counter_mutex);
4439 list_add_tail(&counter->owner_entry, &current->perf_counter_list);
4440 mutex_unlock(&current->perf_counter_mutex);
4441
Ingo Molnardc86cab2009-09-03 18:03:00 +02004442err_fput_free_put_context:
Ingo Molnar9b51f662008-12-12 13:49:45 +01004443 fput_light(counter_file, fput_needed2);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004444
Ingo Molnar9b51f662008-12-12 13:49:45 +01004445err_free_put_context:
Ingo Molnardc86cab2009-09-03 18:03:00 +02004446 if (err < 0)
4447 kfree(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004448
4449err_put_context:
Ingo Molnardc86cab2009-09-03 18:03:00 +02004450 if (err < 0)
4451 put_ctx(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004452
Ingo Molnardc86cab2009-09-03 18:03:00 +02004453 fput_light(group_file, fput_needed);
4454
4455 return err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004456}
4457
Ingo Molnar9b51f662008-12-12 13:49:45 +01004458/*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004459 * inherit a counter from parent task to child task:
4460 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11004461static struct perf_counter *
Ingo Molnar9b51f662008-12-12 13:49:45 +01004462inherit_counter(struct perf_counter *parent_counter,
4463 struct task_struct *parent,
4464 struct perf_counter_context *parent_ctx,
4465 struct task_struct *child,
Paul Mackerrasd859e292009-01-17 18:10:22 +11004466 struct perf_counter *group_leader,
Ingo Molnar9b51f662008-12-12 13:49:45 +01004467 struct perf_counter_context *child_ctx)
4468{
4469 struct perf_counter *child_counter;
4470
Paul Mackerrasd859e292009-01-17 18:10:22 +11004471 /*
4472 * Instead of creating recursive hierarchies of counters,
4473 * we link inherited counters back to the original parent,
4474 * which has a filp for sure, which we use as the reference
4475 * count:
4476 */
4477 if (parent_counter->parent)
4478 parent_counter = parent_counter->parent;
4479
Peter Zijlstra0d486962009-06-02 19:22:16 +02004480 child_counter = perf_counter_alloc(&parent_counter->attr,
Paul Mackerras23a185c2009-02-09 22:42:47 +11004481 parent_counter->cpu, child_ctx,
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004482 group_leader, parent_counter,
4483 GFP_KERNEL);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004484 if (IS_ERR(child_counter))
4485 return child_counter;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004486 get_ctx(child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004487
4488 /*
Paul Mackerras564c2b22009-05-22 14:27:22 +10004489 * Make the child state follow the state of the parent counter,
Peter Zijlstra0d486962009-06-02 19:22:16 +02004490 * not its attr.disabled bit. We hold the parent's mutex,
Ingo Molnar22a4f652009-06-01 10:13:37 +02004491 * so we won't race with perf_counter_{en, dis}able_family.
Paul Mackerras564c2b22009-05-22 14:27:22 +10004492 */
4493 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
4494 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
4495 else
4496 child_counter->state = PERF_COUNTER_STATE_OFF;
4497
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004498 if (parent_counter->attr.freq)
4499 child_counter->hw.sample_period = parent_counter->hw.sample_period;
4500
Paul Mackerras564c2b22009-05-22 14:27:22 +10004501 /*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004502 * Link it up in the child's context:
4503 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +11004504 add_counter_to_ctx(child_counter, child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004505
Ingo Molnar9b51f662008-12-12 13:49:45 +01004506 /*
4507 * Get a reference to the parent filp - we will fput it
4508 * when the child counter exits. This is safe to do because
4509 * we are in the parent and we know that the filp still
4510 * exists and has a nonzero count:
4511 */
4512 atomic_long_inc(&parent_counter->filp->f_count);
4513
Paul Mackerrasd859e292009-01-17 18:10:22 +11004514 /*
4515 * Link this into the parent counter's child list
4516 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004517 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004518 mutex_lock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004519 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004520 mutex_unlock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004521
4522 return child_counter;
4523}
4524
4525static int inherit_group(struct perf_counter *parent_counter,
4526 struct task_struct *parent,
4527 struct perf_counter_context *parent_ctx,
4528 struct task_struct *child,
4529 struct perf_counter_context *child_ctx)
4530{
4531 struct perf_counter *leader;
4532 struct perf_counter *sub;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004533 struct perf_counter *child_ctr;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004534
4535 leader = inherit_counter(parent_counter, parent, parent_ctx,
4536 child, NULL, child_ctx);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004537 if (IS_ERR(leader))
4538 return PTR_ERR(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004539 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004540 child_ctr = inherit_counter(sub, parent, parent_ctx,
4541 child, leader, child_ctx);
4542 if (IS_ERR(child_ctr))
4543 return PTR_ERR(child_ctr);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004544 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004545 return 0;
4546}
4547
Paul Mackerrasd859e292009-01-17 18:10:22 +11004548static void sync_child_counter(struct perf_counter *child_counter,
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004549 struct task_struct *child)
Paul Mackerrasd859e292009-01-17 18:10:22 +11004550{
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004551 struct perf_counter *parent_counter = child_counter->parent;
Peter Zijlstra8bc20952009-05-15 20:45:59 +02004552 u64 child_val;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004553
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02004554 if (child_counter->attr.inherit_stat)
4555 perf_counter_read_event(child_counter, child);
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004556
Paul Mackerrasd859e292009-01-17 18:10:22 +11004557 child_val = atomic64_read(&child_counter->count);
4558
4559 /*
4560 * Add back the child's count to the parent's count:
4561 */
4562 atomic64_add(child_val, &parent_counter->count);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11004563 atomic64_add(child_counter->total_time_enabled,
4564 &parent_counter->child_total_time_enabled);
4565 atomic64_add(child_counter->total_time_running,
4566 &parent_counter->child_total_time_running);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004567
4568 /*
4569 * Remove this counter from the parent's list
4570 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004571 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004572 mutex_lock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004573 list_del_init(&child_counter->child_list);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004574 mutex_unlock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004575
4576 /*
4577 * Release the parent counter, if this was the last
4578 * reference to it.
4579 */
4580 fput(parent_counter->filp);
4581}
4582
Ingo Molnar9b51f662008-12-12 13:49:45 +01004583static void
Peter Zijlstrabbbee902009-05-29 14:25:58 +02004584__perf_counter_exit_task(struct perf_counter *child_counter,
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004585 struct perf_counter_context *child_ctx,
4586 struct task_struct *child)
Ingo Molnar9b51f662008-12-12 13:49:45 +01004587{
4588 struct perf_counter *parent_counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004589
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004590 update_counter_times(child_counter);
Peter Zijlstraaa9c67f2009-05-23 18:28:59 +02004591 perf_counter_remove_from_context(child_counter);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01004592
Ingo Molnar9b51f662008-12-12 13:49:45 +01004593 parent_counter = child_counter->parent;
4594 /*
4595 * It can happen that parent exits first, and has counters
4596 * that are still around due to the child reference. These
4597 * counters need to be zapped - but otherwise linger.
4598 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11004599 if (parent_counter) {
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004600 sync_child_counter(child_counter, child);
Peter Zijlstraf1600952009-03-19 20:26:16 +01004601 free_counter(child_counter);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01004602 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004603}
4604
4605/*
Paul Mackerrasd859e292009-01-17 18:10:22 +11004606 * When a child task exits, feed back counter values to parent counters.
Ingo Molnar9b51f662008-12-12 13:49:45 +01004607 */
4608void perf_counter_exit_task(struct task_struct *child)
4609{
4610 struct perf_counter *child_counter, *tmp;
4611 struct perf_counter_context *child_ctx;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004612 unsigned long flags;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004613
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004614 if (likely(!child->perf_counter_ctxp)) {
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02004615 perf_counter_task(child, NULL, 0);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004616 return;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004617 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004618
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004619 local_irq_save(flags);
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004620 /*
4621 * We can't reschedule here because interrupts are disabled,
4622 * and either child is current or it is a task that can't be
4623 * scheduled, so we are now safe from rescheduling changing
4624 * our context.
4625 */
4626 child_ctx = child->perf_counter_ctxp;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004627 __perf_counter_task_sched_out(child_ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004628
4629 /*
4630 * Take the context lock here so that if find_get_context is
4631 * reading child->perf_counter_ctxp, we wait until it has
4632 * incremented the context's refcount before we do put_ctx below.
4633 */
4634 spin_lock(&child_ctx->lock);
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02004635 child->perf_counter_ctxp = NULL;
Peter Zijlstra71a851b2009-07-10 09:06:56 +02004636 /*
4637 * If this context is a clone; unclone it so it can't get
4638 * swapped to another process while we're removing all
4639 * the counters from it.
4640 */
4641 unclone_ctx(child_ctx);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004642 spin_unlock_irqrestore(&child_ctx->lock, flags);
4643
4644 /*
4645 * Report the task dead after unscheduling the counters so that we
4646 * won't get any samples after PERF_EVENT_EXIT. We can however still
4647 * get a few PERF_EVENT_READ events.
4648 */
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02004649 perf_counter_task(child, child_ctx, 0);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004650
Peter Zijlstra66fff222009-06-10 22:53:37 +02004651 /*
4652 * We can recurse on the same lock type through:
4653 *
4654 * __perf_counter_exit_task()
4655 * sync_child_counter()
4656 * fput(parent_counter->filp)
4657 * perf_release()
4658 * mutex_lock(&ctx->mutex)
4659 *
4660 * But since its the parent context it won't be the same instance.
4661 */
4662 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004663
Peter Zijlstra8bc20952009-05-15 20:45:59 +02004664again:
Ingo Molnar9b51f662008-12-12 13:49:45 +01004665 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
4666 list_entry)
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004667 __perf_counter_exit_task(child_counter, child_ctx, child);
Peter Zijlstra8bc20952009-05-15 20:45:59 +02004668
4669 /*
4670 * If the last counter was a group counter, it will have appended all
4671 * its siblings to the list, but we obtained 'tmp' before that which
4672 * will still point to the list head terminating the iteration.
4673 */
4674 if (!list_empty(&child_ctx->counter_list))
4675 goto again;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004676
4677 mutex_unlock(&child_ctx->mutex);
4678
4679 put_ctx(child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004680}
4681
4682/*
Peter Zijlstrabbbee902009-05-29 14:25:58 +02004683 * free an unexposed, unused context as created by inheritance by
4684 * init_task below, used by fork() in case of fail.
4685 */
4686void perf_counter_free_task(struct task_struct *task)
4687{
4688 struct perf_counter_context *ctx = task->perf_counter_ctxp;
4689 struct perf_counter *counter, *tmp;
4690
4691 if (!ctx)
4692 return;
4693
4694 mutex_lock(&ctx->mutex);
4695again:
4696 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
4697 struct perf_counter *parent = counter->parent;
4698
4699 if (WARN_ON_ONCE(!parent))
4700 continue;
4701
4702 mutex_lock(&parent->child_mutex);
4703 list_del_init(&counter->child_list);
4704 mutex_unlock(&parent->child_mutex);
4705
4706 fput(parent->filp);
4707
4708 list_del_counter(counter, ctx);
4709 free_counter(counter);
4710 }
4711
4712 if (!list_empty(&ctx->counter_list))
4713 goto again;
4714
4715 mutex_unlock(&ctx->mutex);
4716
4717 put_ctx(ctx);
4718}
4719
4720/*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004721 * Initialize the perf_counter context in task_struct
4722 */
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004723int perf_counter_init_task(struct task_struct *child)
Ingo Molnar9b51f662008-12-12 13:49:45 +01004724{
4725 struct perf_counter_context *child_ctx, *parent_ctx;
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004726 struct perf_counter_context *cloned_ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004727 struct perf_counter *counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004728 struct task_struct *parent = current;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004729 int inherited_all = 1;
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004730 int ret = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004731
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004732 child->perf_counter_ctxp = NULL;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004733
Peter Zijlstra082ff5a2009-05-23 18:29:00 +02004734 mutex_init(&child->perf_counter_mutex);
4735 INIT_LIST_HEAD(&child->perf_counter_list);
4736
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004737 if (likely(!parent->perf_counter_ctxp))
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004738 return 0;
4739
Ingo Molnar9b51f662008-12-12 13:49:45 +01004740 /*
4741 * This is executed from the parent task context, so inherit
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004742 * counters that have been marked for cloning.
4743 * First allocate and initialize a context for the child.
Ingo Molnar9b51f662008-12-12 13:49:45 +01004744 */
4745
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004746 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
4747 if (!child_ctx)
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004748 return -ENOMEM;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004749
4750 __perf_counter_init_context(child_ctx, child);
4751 child->perf_counter_ctxp = child_ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004752 get_task_struct(child);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004753
Ingo Molnar9b51f662008-12-12 13:49:45 +01004754 /*
Paul Mackerras25346b932009-06-01 17:48:12 +10004755 * If the parent's context is a clone, pin it so it won't get
4756 * swapped under us.
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004757 */
Paul Mackerras25346b932009-06-01 17:48:12 +10004758 parent_ctx = perf_pin_task_context(parent);
4759
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004760 /*
4761 * No need to check if parent_ctx != NULL here; since we saw
4762 * it non-NULL earlier, the only reason for it to become NULL
4763 * is if we exit, and since we're currently in the middle of
4764 * a fork we can't be exiting at the same time.
4765 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004766
4767 /*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004768 * Lock the parent list. No need to lock the child - not PID
4769 * hashed yet and not running, so nobody can access it.
4770 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11004771 mutex_lock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004772
4773 /*
4774 * We dont have to disable NMIs - we are only looking at
4775 * the list, not manipulating it:
4776 */
Peter Zijlstrad7b629a2009-05-20 12:21:19 +02004777 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
4778 if (counter != counter->group_leader)
4779 continue;
4780
Peter Zijlstra0d486962009-06-02 19:22:16 +02004781 if (!counter->attr.inherit) {
Paul Mackerras564c2b22009-05-22 14:27:22 +10004782 inherited_all = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004783 continue;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004784 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004785
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004786 ret = inherit_group(counter, parent, parent_ctx,
4787 child, child_ctx);
4788 if (ret) {
Paul Mackerras564c2b22009-05-22 14:27:22 +10004789 inherited_all = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004790 break;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004791 }
4792 }
4793
4794 if (inherited_all) {
4795 /*
4796 * Mark the child context as a clone of the parent
4797 * context, or of whatever the parent is a clone of.
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004798 * Note that if the parent is a clone, it could get
4799 * uncloned at any point, but that doesn't matter
4800 * because the list of counters and the generation
4801 * count can't have changed since we took the mutex.
Paul Mackerras564c2b22009-05-22 14:27:22 +10004802 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004803 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
4804 if (cloned_ctx) {
4805 child_ctx->parent_ctx = cloned_ctx;
Paul Mackerras25346b932009-06-01 17:48:12 +10004806 child_ctx->parent_gen = parent_ctx->parent_gen;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004807 } else {
4808 child_ctx->parent_ctx = parent_ctx;
4809 child_ctx->parent_gen = parent_ctx->generation;
4810 }
4811 get_ctx(child_ctx->parent_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004812 }
4813
Paul Mackerrasd859e292009-01-17 18:10:22 +11004814 mutex_unlock(&parent_ctx->mutex);
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004815
Paul Mackerras25346b932009-06-01 17:48:12 +10004816 perf_unpin_context(parent_ctx);
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004817
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004818 return ret;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004819}
4820
Ingo Molnar04289bb2008-12-11 08:38:42 +01004821static void __cpuinit perf_counter_init_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004822{
Ingo Molnar04289bb2008-12-11 08:38:42 +01004823 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004824
Ingo Molnar04289bb2008-12-11 08:38:42 +01004825 cpuctx = &per_cpu(perf_cpu_context, cpu);
4826 __perf_counter_init_context(&cpuctx->ctx, NULL);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004827
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004828 spin_lock(&perf_resource_lock);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004829 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004830 spin_unlock(&perf_resource_lock);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004831
Paul Mackerras01d02872009-01-14 13:44:19 +11004832 hw_perf_counter_setup(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004833}
4834
4835#ifdef CONFIG_HOTPLUG_CPU
Ingo Molnar04289bb2008-12-11 08:38:42 +01004836static void __perf_counter_exit_cpu(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004837{
4838 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4839 struct perf_counter_context *ctx = &cpuctx->ctx;
4840 struct perf_counter *counter, *tmp;
4841
Ingo Molnar04289bb2008-12-11 08:38:42 +01004842 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4843 __perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004844}
Ingo Molnar04289bb2008-12-11 08:38:42 +01004845static void perf_counter_exit_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004846{
Paul Mackerrasd859e292009-01-17 18:10:22 +11004847 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4848 struct perf_counter_context *ctx = &cpuctx->ctx;
4849
4850 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004851 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004852 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004853}
4854#else
Ingo Molnar04289bb2008-12-11 08:38:42 +01004855static inline void perf_counter_exit_cpu(int cpu) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +01004856#endif
4857
4858static int __cpuinit
4859perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4860{
4861 unsigned int cpu = (long)hcpu;
4862
4863 switch (action) {
4864
4865 case CPU_UP_PREPARE:
4866 case CPU_UP_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01004867 perf_counter_init_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004868 break;
4869
Ingo Molnar28402972009-08-13 10:13:22 +02004870 case CPU_ONLINE:
4871 case CPU_ONLINE_FROZEN:
4872 hw_perf_counter_setup_online(cpu);
4873 break;
4874
Thomas Gleixner0793a612008-12-04 20:12:29 +01004875 case CPU_DOWN_PREPARE:
4876 case CPU_DOWN_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01004877 perf_counter_exit_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004878 break;
4879
4880 default:
4881 break;
4882 }
4883
4884 return NOTIFY_OK;
4885}
4886
Paul Mackerrasf38b0822009-06-02 21:05:16 +10004887/*
4888 * This has to have a higher priority than migration_notifier in sched.c.
4889 */
Thomas Gleixner0793a612008-12-04 20:12:29 +01004890static struct notifier_block __cpuinitdata perf_cpu_nb = {
4891 .notifier_call = perf_cpu_notify,
Paul Mackerrasf38b0822009-06-02 21:05:16 +10004892 .priority = 20,
Thomas Gleixner0793a612008-12-04 20:12:29 +01004893};
4894
Ingo Molnar0d905bc2009-05-04 19:13:30 +02004895void __init perf_counter_init(void)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004896{
4897 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4898 (void *)(long)smp_processor_id());
Ingo Molnar28402972009-08-13 10:13:22 +02004899 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4900 (void *)(long)smp_processor_id());
Thomas Gleixner0793a612008-12-04 20:12:29 +01004901 register_cpu_notifier(&perf_cpu_nb);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004902}
Thomas Gleixner0793a612008-12-04 20:12:29 +01004903
4904static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4905{
4906 return sprintf(buf, "%d\n", perf_reserved_percpu);
4907}
4908
4909static ssize_t
4910perf_set_reserve_percpu(struct sysdev_class *class,
4911 const char *buf,
4912 size_t count)
4913{
4914 struct perf_cpu_context *cpuctx;
4915 unsigned long val;
4916 int err, cpu, mpt;
4917
4918 err = strict_strtoul(buf, 10, &val);
4919 if (err)
4920 return err;
4921 if (val > perf_max_counters)
4922 return -EINVAL;
4923
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004924 spin_lock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004925 perf_reserved_percpu = val;
4926 for_each_online_cpu(cpu) {
4927 cpuctx = &per_cpu(perf_cpu_context, cpu);
4928 spin_lock_irq(&cpuctx->ctx.lock);
4929 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4930 perf_max_counters - perf_reserved_percpu);
4931 cpuctx->max_pertask = mpt;
4932 spin_unlock_irq(&cpuctx->ctx.lock);
4933 }
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004934 spin_unlock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004935
4936 return count;
4937}
4938
4939static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4940{
4941 return sprintf(buf, "%d\n", perf_overcommit);
4942}
4943
4944static ssize_t
4945perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4946{
4947 unsigned long val;
4948 int err;
4949
4950 err = strict_strtoul(buf, 10, &val);
4951 if (err)
4952 return err;
4953 if (val > 1)
4954 return -EINVAL;
4955
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004956 spin_lock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004957 perf_overcommit = val;
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004958 spin_unlock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004959
4960 return count;
4961}
4962
4963static SYSDEV_CLASS_ATTR(
4964 reserve_percpu,
4965 0644,
4966 perf_show_reserve_percpu,
4967 perf_set_reserve_percpu
4968 );
4969
4970static SYSDEV_CLASS_ATTR(
4971 overcommit,
4972 0644,
4973 perf_show_overcommit,
4974 perf_set_overcommit
4975 );
4976
4977static struct attribute *perfclass_attrs[] = {
4978 &attr_reserve_percpu.attr,
4979 &attr_overcommit.attr,
4980 NULL
4981};
4982
4983static struct attribute_group perfclass_attr_group = {
4984 .attrs = perfclass_attrs,
4985 .name = "perf_counters",
4986};
4987
4988static int __init perf_counter_sysfs_init(void)
4989{
4990 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4991 &perfclass_attr_group);
4992}
4993device_initcall(perf_counter_sysfs_init);