blob: 6944bd55ec4e91062ea3a9c14675381b756df3ea [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counter core code
3 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Paul Mackerrasc5dd0162009-04-30 09:48:16 +10007 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
Peter Zijlstra7b732a72009-03-23 18:22:10 +01008 *
9 * For licensing details see kernel-base/COPYING
Thomas Gleixner0793a612008-12-04 20:12:29 +010010 */
11
12#include <linux/fs.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010013#include <linux/mm.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010014#include <linux/cpu.h>
15#include <linux/smp.h>
Ingo Molnar04289bb2008-12-11 08:38:42 +010016#include <linux/file.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010017#include <linux/poll.h>
18#include <linux/sysfs.h>
Ingo Molnar22a4f652009-06-01 10:13:37 +020019#include <linux/dcache.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010020#include <linux/percpu.h>
Ingo Molnar22a4f652009-06-01 10:13:37 +020021#include <linux/ptrace.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010022#include <linux/vmstat.h>
23#include <linux/hardirq.h>
24#include <linux/rculist.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010025#include <linux/uaccess.h>
26#include <linux/syscalls.h>
27#include <linux/anon_inodes.h>
Ingo Molnaraa9c4c02008-12-17 14:10:57 +010028#include <linux/kernel_stat.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010029#include <linux/perf_counter.h>
30
Tim Blechmann4e193bd2009-03-14 14:29:25 +010031#include <asm/irq_regs.h>
32
Thomas Gleixner0793a612008-12-04 20:12:29 +010033/*
34 * Each CPU has a list of per CPU counters:
35 */
36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
Ingo Molnar088e2852008-12-14 20:21:00 +010038int perf_max_counters __read_mostly = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +010039static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1;
41
Peter Zijlstra7fc23a52009-05-08 18:52:21 +020042static atomic_t nr_counters __read_mostly;
Peter Zijlstra60313eb2009-06-04 16:53:44 +020043static atomic_t nr_mmap_counters __read_mostly;
Peter Zijlstra60313eb2009-06-04 16:53:44 +020044static atomic_t nr_comm_counters __read_mostly;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +020045static atomic_t nr_task_counters __read_mostly;
Peter Zijlstra9ee318a2009-04-09 10:53:44 +020046
Peter Zijlstra07647712009-06-11 11:18:36 +020047/*
Peter Zijlstradf58ab22009-06-11 11:25:05 +020048 * perf counter paranoia level:
Ingo Molnar0fbdea12009-09-02 21:46:00 +020049 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu counters for unpriv
52 * 2 - disallow kernel profiling for unpriv
Peter Zijlstra07647712009-06-11 11:18:36 +020053 */
Ingo Molnar6bb56342009-08-28 13:44:53 +020054int sysctl_perf_counter_paranoid __read_mostly = 1;
Peter Zijlstra07647712009-06-11 11:18:36 +020055
Ingo Molnar0fbdea12009-09-02 21:46:00 +020056static inline bool perf_paranoid_tracepoint_raw(void)
57{
58 return sysctl_perf_counter_paranoid > -1;
59}
60
Peter Zijlstra07647712009-06-11 11:18:36 +020061static inline bool perf_paranoid_cpu(void)
62{
63 return sysctl_perf_counter_paranoid > 0;
64}
65
66static inline bool perf_paranoid_kernel(void)
67{
68 return sysctl_perf_counter_paranoid > 1;
69}
70
Peter Zijlstra789f90f2009-05-15 15:19:27 +020071int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
Peter Zijlstradf58ab22009-06-11 11:25:05 +020072
73/*
74 * max perf counter sample rate
75 */
76int sysctl_perf_counter_sample_rate __read_mostly = 100000;
Peter Zijlstra1ccd1542009-04-09 10:53:45 +020077
Peter Zijlstraa96bbc12009-06-03 14:01:36 +020078static atomic64_t perf_counter_id;
79
Thomas Gleixner0793a612008-12-04 20:12:29 +010080/*
Ingo Molnar1dce8d92009-05-04 19:23:18 +020081 * Lock for (sysadmin-configurable) counter reservations:
Thomas Gleixner0793a612008-12-04 20:12:29 +010082 */
Ingo Molnar1dce8d92009-05-04 19:23:18 +020083static DEFINE_SPINLOCK(perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +010084
85/*
86 * Architecture provided APIs - weak aliases:
87 */
Robert Richter4aeb0b42009-04-29 12:47:03 +020088extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +010089{
Paul Mackerrasff6f0542009-01-09 16:19:25 +110090 return NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +010091}
92
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020093void __weak hw_perf_disable(void) { barrier(); }
94void __weak hw_perf_enable(void) { barrier(); }
95
Paul Mackerras01d02872009-01-14 13:44:19 +110096void __weak hw_perf_counter_setup(int cpu) { barrier(); }
Ingo Molnar28402972009-08-13 10:13:22 +020097void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
Ingo Molnar22a4f652009-06-01 10:13:37 +020098
99int __weak
100hw_perf_group_sched_in(struct perf_counter *group_leader,
Paul Mackerras3cbed422009-01-09 16:43:42 +1100101 struct perf_cpu_context *cpuctx,
102 struct perf_counter_context *ctx, int cpu)
103{
104 return 0;
105}
Thomas Gleixner0793a612008-12-04 20:12:29 +0100106
Paul Mackerras4eb96fc2009-01-09 17:24:34 +1100107void __weak perf_counter_print_debug(void) { }
108
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200109static DEFINE_PER_CPU(int, disable_count);
110
111void __perf_disable(void)
112{
113 __get_cpu_var(disable_count)++;
114}
115
116bool __perf_enable(void)
117{
118 return !--__get_cpu_var(disable_count);
119}
120
121void perf_disable(void)
122{
123 __perf_disable();
124 hw_perf_disable();
125}
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200126
127void perf_enable(void)
128{
129 if (__perf_enable())
130 hw_perf_enable();
131}
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200132
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000133static void get_ctx(struct perf_counter_context *ctx)
134{
Peter Zijlstrae5289d42009-06-19 13:22:51 +0200135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000136}
137
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000138static void free_ctx(struct rcu_head *head)
139{
140 struct perf_counter_context *ctx;
141
142 ctx = container_of(head, struct perf_counter_context, rcu_head);
143 kfree(ctx);
144}
145
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000146static void put_ctx(struct perf_counter_context *ctx)
147{
Paul Mackerras564c2b22009-05-22 14:27:22 +1000148 if (atomic_dec_and_test(&ctx->refcount)) {
149 if (ctx->parent_ctx)
150 put_ctx(ctx->parent_ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000151 if (ctx->task)
152 put_task_struct(ctx->task);
153 call_rcu(&ctx->rcu_head, free_ctx);
Paul Mackerras564c2b22009-05-22 14:27:22 +1000154 }
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000155}
156
Peter Zijlstra71a851b2009-07-10 09:06:56 +0200157static void unclone_ctx(struct perf_counter_context *ctx)
158{
159 if (ctx->parent_ctx) {
160 put_ctx(ctx->parent_ctx);
161 ctx->parent_ctx = NULL;
162 }
163}
164
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200165/*
Peter Zijlstra7f453c22009-07-21 13:19:40 +0200166 * If we inherit counters we want to return the parent counter id
167 * to userspace.
168 */
169static u64 primary_counter_id(struct perf_counter *counter)
170{
171 u64 id = counter->id;
172
173 if (counter->parent)
174 id = counter->parent->id;
175
176 return id;
177}
178
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200179/*
Paul Mackerras25346b932009-06-01 17:48:12 +1000180 * Get the perf_counter_context for a task and lock it.
181 * This has to cope with with the fact that until it is locked,
182 * the context could get moved to another task.
183 */
Ingo Molnar22a4f652009-06-01 10:13:37 +0200184static struct perf_counter_context *
185perf_lock_task_context(struct task_struct *task, unsigned long *flags)
Paul Mackerras25346b932009-06-01 17:48:12 +1000186{
187 struct perf_counter_context *ctx;
188
189 rcu_read_lock();
190 retry:
191 ctx = rcu_dereference(task->perf_counter_ctxp);
192 if (ctx) {
193 /*
194 * If this context is a clone of another, it might
195 * get swapped for another underneath us by
196 * perf_counter_task_sched_out, though the
197 * rcu_read_lock() protects us from any context
198 * getting freed. Lock the context and check if it
199 * got swapped before we could get the lock, and retry
200 * if so. If we locked the right context, then it
201 * can't get swapped on us any more.
202 */
203 spin_lock_irqsave(&ctx->lock, *flags);
204 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
205 spin_unlock_irqrestore(&ctx->lock, *flags);
206 goto retry;
207 }
Peter Zijlstrab49a9e72009-06-19 17:39:33 +0200208
209 if (!atomic_inc_not_zero(&ctx->refcount)) {
210 spin_unlock_irqrestore(&ctx->lock, *flags);
211 ctx = NULL;
212 }
Paul Mackerras25346b932009-06-01 17:48:12 +1000213 }
214 rcu_read_unlock();
215 return ctx;
216}
217
218/*
219 * Get the context for a task and increment its pin_count so it
220 * can't get swapped to another task. This also increments its
221 * reference count so that the context can't get freed.
222 */
223static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
224{
225 struct perf_counter_context *ctx;
226 unsigned long flags;
227
228 ctx = perf_lock_task_context(task, &flags);
229 if (ctx) {
230 ++ctx->pin_count;
Paul Mackerras25346b932009-06-01 17:48:12 +1000231 spin_unlock_irqrestore(&ctx->lock, flags);
232 }
233 return ctx;
234}
235
236static void perf_unpin_context(struct perf_counter_context *ctx)
237{
238 unsigned long flags;
239
240 spin_lock_irqsave(&ctx->lock, flags);
241 --ctx->pin_count;
242 spin_unlock_irqrestore(&ctx->lock, flags);
243 put_ctx(ctx);
244}
245
246/*
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200247 * Add a counter from the lists for its context.
248 * Must be called with ctx->mutex and ctx->lock held.
249 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100250static void
251list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
252{
253 struct perf_counter *group_leader = counter->group_leader;
254
255 /*
256 * Depending on whether it is a standalone or sibling counter,
257 * add it straight to the context's counter list, or to the group
258 * leader's sibling list:
259 */
Peter Zijlstra3df5eda2009-05-08 18:52:22 +0200260 if (group_leader == counter)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100261 list_add_tail(&counter->list_entry, &ctx->counter_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +0100262 else {
Ingo Molnar04289bb2008-12-11 08:38:42 +0100263 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +0100264 group_leader->nr_siblings++;
265 }
Peter Zijlstra592903c2009-03-13 12:21:36 +0100266
267 list_add_rcu(&counter->event_entry, &ctx->event_list);
Peter Zijlstra8bc20952009-05-15 20:45:59 +0200268 ctx->nr_counters++;
Peter Zijlstrabfbd3382009-06-24 21:11:59 +0200269 if (counter->attr.inherit_stat)
270 ctx->nr_stat++;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100271}
272
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000273/*
274 * Remove a counter from the lists for its context.
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200275 * Must be called with ctx->mutex and ctx->lock held.
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000276 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100277static void
278list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
279{
280 struct perf_counter *sibling, *tmp;
281
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000282 if (list_empty(&counter->list_entry))
283 return;
Peter Zijlstra8bc20952009-05-15 20:45:59 +0200284 ctx->nr_counters--;
Peter Zijlstrabfbd3382009-06-24 21:11:59 +0200285 if (counter->attr.inherit_stat)
286 ctx->nr_stat--;
Peter Zijlstra8bc20952009-05-15 20:45:59 +0200287
Ingo Molnar04289bb2008-12-11 08:38:42 +0100288 list_del_init(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +0100289 list_del_rcu(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100290
Peter Zijlstra5c148192009-03-25 12:30:23 +0100291 if (counter->group_leader != counter)
292 counter->group_leader->nr_siblings--;
293
Ingo Molnar04289bb2008-12-11 08:38:42 +0100294 /*
295 * If this was a group counter with sibling counters then
296 * upgrade the siblings to singleton counters by adding them
297 * to the context list directly:
298 */
299 list_for_each_entry_safe(sibling, tmp,
300 &counter->sibling_list, list_entry) {
301
Peter Zijlstra75564232009-03-13 12:21:29 +0100302 list_move_tail(&sibling->list_entry, &ctx->counter_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100303 sibling->group_leader = sibling;
304 }
305}
306
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100307static void
308counter_sched_out(struct perf_counter *counter,
309 struct perf_cpu_context *cpuctx,
310 struct perf_counter_context *ctx)
311{
312 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
313 return;
314
315 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra970892a2009-08-13 11:47:54 +0200316 if (counter->pending_disable) {
317 counter->pending_disable = 0;
318 counter->state = PERF_COUNTER_STATE_OFF;
319 }
Peter Zijlstra4af49982009-04-06 11:45:10 +0200320 counter->tstamp_stopped = ctx->time;
Robert Richter4aeb0b42009-04-29 12:47:03 +0200321 counter->pmu->disable(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100322 counter->oncpu = -1;
323
324 if (!is_software_counter(counter))
325 cpuctx->active_oncpu--;
326 ctx->nr_active--;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200327 if (counter->attr.exclusive || !cpuctx->active_oncpu)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100328 cpuctx->exclusive = 0;
329}
330
Paul Mackerrasd859e292009-01-17 18:10:22 +1100331static void
332group_sched_out(struct perf_counter *group_counter,
333 struct perf_cpu_context *cpuctx,
334 struct perf_counter_context *ctx)
335{
336 struct perf_counter *counter;
337
338 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
339 return;
340
341 counter_sched_out(group_counter, cpuctx, ctx);
342
343 /*
344 * Schedule out siblings (if any):
345 */
346 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
347 counter_sched_out(counter, cpuctx, ctx);
348
Peter Zijlstra0d486962009-06-02 19:22:16 +0200349 if (group_counter->attr.exclusive)
Paul Mackerrasd859e292009-01-17 18:10:22 +1100350 cpuctx->exclusive = 0;
351}
352
Thomas Gleixner0793a612008-12-04 20:12:29 +0100353/*
354 * Cross CPU call to remove a performance counter
355 *
356 * We disable the counter on the hardware level first. After that we
357 * remove it from the context list.
358 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100359static void __perf_counter_remove_from_context(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100360{
361 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
362 struct perf_counter *counter = info;
363 struct perf_counter_context *ctx = counter->ctx;
364
365 /*
366 * If this is a task context, we need to check whether it is
367 * the current task context of this cpu. If not it has been
368 * scheduled out before the smp call arrived.
369 */
Peter Zijlstra665c2142009-05-29 14:51:57 +0200370 if (ctx->task && cpuctx->task_ctx != ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100371 return;
372
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200373 spin_lock(&ctx->lock);
Ingo Molnar34adc802009-05-20 20:13:28 +0200374 /*
375 * Protect the list operation against NMI by disabling the
376 * counters on a global level.
377 */
378 perf_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100379
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100380 counter_sched_out(counter, cpuctx, ctx);
381
Ingo Molnar04289bb2008-12-11 08:38:42 +0100382 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100383
384 if (!ctx->task) {
385 /*
386 * Allow more per task counters with respect to the
387 * reservation:
388 */
389 cpuctx->max_pertask =
390 min(perf_max_counters - ctx->nr_counters,
391 perf_max_counters - perf_reserved_percpu);
392 }
393
Ingo Molnar34adc802009-05-20 20:13:28 +0200394 perf_enable();
Peter Zijlstra665c2142009-05-29 14:51:57 +0200395 spin_unlock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100396}
397
398
399/*
400 * Remove the counter from a task's (or a CPU's) list of counters.
401 *
Peter Zijlstrafccc7142009-05-23 18:28:56 +0200402 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100403 *
404 * CPU counters are removed with a smp call. For task counters we only
405 * call when the task is on a CPU.
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000406 *
407 * If counter->ctx is a cloned context, callers must make sure that
408 * every task struct that counter->ctx->task could possibly point to
409 * remains valid. This is OK when called from perf_release since
410 * that only calls us on the top-level context, which can't be a clone.
411 * When called from perf_counter_exit_task, it's OK because the
412 * context has been detached from its task.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100413 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100414static void perf_counter_remove_from_context(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100415{
416 struct perf_counter_context *ctx = counter->ctx;
417 struct task_struct *task = ctx->task;
418
419 if (!task) {
420 /*
421 * Per cpu counters are removed via an smp call and
422 * the removal is always sucessful.
423 */
424 smp_call_function_single(counter->cpu,
Ingo Molnar04289bb2008-12-11 08:38:42 +0100425 __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100426 counter, 1);
427 return;
428 }
429
430retry:
Ingo Molnar04289bb2008-12-11 08:38:42 +0100431 task_oncpu_function_call(task, __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100432 counter);
433
434 spin_lock_irq(&ctx->lock);
435 /*
436 * If the context is active we need to retry the smp call.
437 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100438 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100439 spin_unlock_irq(&ctx->lock);
440 goto retry;
441 }
442
443 /*
444 * The lock prevents that this context is scheduled in so we
Ingo Molnar04289bb2008-12-11 08:38:42 +0100445 * can remove the counter safely, if the call above did not
Thomas Gleixner0793a612008-12-04 20:12:29 +0100446 * succeed.
447 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100448 if (!list_empty(&counter->list_entry)) {
Ingo Molnar04289bb2008-12-11 08:38:42 +0100449 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100450 }
451 spin_unlock_irq(&ctx->lock);
452}
453
Peter Zijlstra4af49982009-04-06 11:45:10 +0200454static inline u64 perf_clock(void)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100455{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200456 return cpu_clock(smp_processor_id());
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100457}
458
459/*
460 * Update the record of the current time in a context.
461 */
Peter Zijlstra4af49982009-04-06 11:45:10 +0200462static void update_context_time(struct perf_counter_context *ctx)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100463{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200464 u64 now = perf_clock();
465
466 ctx->time += now - ctx->timestamp;
467 ctx->timestamp = now;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100468}
469
470/*
471 * Update the total_time_enabled and total_time_running fields for a counter.
472 */
473static void update_counter_times(struct perf_counter *counter)
474{
475 struct perf_counter_context *ctx = counter->ctx;
476 u64 run_end;
477
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000478 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
479 counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE)
Peter Zijlstra4af49982009-04-06 11:45:10 +0200480 return;
481
482 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
483
484 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
485 run_end = counter->tstamp_stopped;
486 else
487 run_end = ctx->time;
488
489 counter->total_time_running = run_end - counter->tstamp_running;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100490}
491
492/*
493 * Update total_time_enabled and total_time_running for all counters in a group.
494 */
495static void update_group_times(struct perf_counter *leader)
496{
497 struct perf_counter *counter;
498
499 update_counter_times(leader);
500 list_for_each_entry(counter, &leader->sibling_list, list_entry)
501 update_counter_times(counter);
502}
503
504/*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100505 * Cross CPU call to disable a performance counter
506 */
507static void __perf_counter_disable(void *info)
508{
509 struct perf_counter *counter = info;
510 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
511 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100512
513 /*
514 * If this is a per-task counter, need to check whether this
515 * counter's task is the current task on this cpu.
516 */
Peter Zijlstra665c2142009-05-29 14:51:57 +0200517 if (ctx->task && cpuctx->task_ctx != ctx)
Paul Mackerrasd859e292009-01-17 18:10:22 +1100518 return;
519
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200520 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100521
522 /*
523 * If the counter is on, turn it off.
524 * If it is in error state, leave it in error state.
525 */
526 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
Peter Zijlstra4af49982009-04-06 11:45:10 +0200527 update_context_time(ctx);
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000528 update_group_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100529 if (counter == counter->group_leader)
530 group_sched_out(counter, cpuctx, ctx);
531 else
532 counter_sched_out(counter, cpuctx, ctx);
533 counter->state = PERF_COUNTER_STATE_OFF;
534 }
535
Peter Zijlstra665c2142009-05-29 14:51:57 +0200536 spin_unlock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100537}
538
539/*
540 * Disable a counter.
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000541 *
542 * If counter->ctx is a cloned context, callers must make sure that
543 * every task struct that counter->ctx->task could possibly point to
544 * remains valid. This condition is satisifed when called through
545 * perf_counter_for_each_child or perf_counter_for_each because they
546 * hold the top-level counter's child_mutex, so any descendant that
547 * goes to exit will block in sync_child_counter.
548 * When called from perf_pending_counter it's OK because counter->ctx
549 * is the current context on this CPU and preemption is disabled,
550 * hence we can't get into perf_counter_task_sched_out for this context.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100551 */
552static void perf_counter_disable(struct perf_counter *counter)
553{
554 struct perf_counter_context *ctx = counter->ctx;
555 struct task_struct *task = ctx->task;
556
557 if (!task) {
558 /*
559 * Disable the counter on the cpu that it's on
560 */
561 smp_call_function_single(counter->cpu, __perf_counter_disable,
562 counter, 1);
563 return;
564 }
565
566 retry:
567 task_oncpu_function_call(task, __perf_counter_disable, counter);
568
569 spin_lock_irq(&ctx->lock);
570 /*
571 * If the counter is still active, we need to retry the cross-call.
572 */
573 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
574 spin_unlock_irq(&ctx->lock);
575 goto retry;
576 }
577
578 /*
579 * Since we have the lock this context can't be scheduled
580 * in, so we can change the state safely.
581 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100582 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000583 update_group_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100584 counter->state = PERF_COUNTER_STATE_OFF;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100585 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100586
587 spin_unlock_irq(&ctx->lock);
588}
589
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100590static int
591counter_sched_in(struct perf_counter *counter,
592 struct perf_cpu_context *cpuctx,
593 struct perf_counter_context *ctx,
594 int cpu)
595{
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100596 if (counter->state <= PERF_COUNTER_STATE_OFF)
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100597 return 0;
598
599 counter->state = PERF_COUNTER_STATE_ACTIVE;
600 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
601 /*
602 * The new state must be visible before we turn it on in the hardware:
603 */
604 smp_wmb();
605
Robert Richter4aeb0b42009-04-29 12:47:03 +0200606 if (counter->pmu->enable(counter)) {
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100607 counter->state = PERF_COUNTER_STATE_INACTIVE;
608 counter->oncpu = -1;
609 return -EAGAIN;
610 }
611
Peter Zijlstra4af49982009-04-06 11:45:10 +0200612 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100613
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100614 if (!is_software_counter(counter))
615 cpuctx->active_oncpu++;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100616 ctx->nr_active++;
617
Peter Zijlstra0d486962009-06-02 19:22:16 +0200618 if (counter->attr.exclusive)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100619 cpuctx->exclusive = 1;
620
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100621 return 0;
622}
623
Paul Mackerras6751b712009-05-11 12:08:02 +1000624static int
625group_sched_in(struct perf_counter *group_counter,
626 struct perf_cpu_context *cpuctx,
627 struct perf_counter_context *ctx,
628 int cpu)
629{
630 struct perf_counter *counter, *partial_group;
631 int ret;
632
633 if (group_counter->state == PERF_COUNTER_STATE_OFF)
634 return 0;
635
636 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
637 if (ret)
638 return ret < 0 ? ret : 0;
639
Paul Mackerras6751b712009-05-11 12:08:02 +1000640 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
641 return -EAGAIN;
642
643 /*
644 * Schedule in siblings as one group (if any):
645 */
646 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
Paul Mackerras6751b712009-05-11 12:08:02 +1000647 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
648 partial_group = counter;
649 goto group_error;
650 }
651 }
652
653 return 0;
654
655group_error:
656 /*
657 * Groups can be scheduled in as one unit only, so undo any
658 * partial group before returning:
659 */
660 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
661 if (counter == partial_group)
662 break;
663 counter_sched_out(counter, cpuctx, ctx);
664 }
665 counter_sched_out(group_counter, cpuctx, ctx);
666
667 return -EAGAIN;
668}
669
Thomas Gleixner0793a612008-12-04 20:12:29 +0100670/*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100671 * Return 1 for a group consisting entirely of software counters,
672 * 0 if the group contains any hardware counters.
673 */
674static int is_software_only_group(struct perf_counter *leader)
675{
676 struct perf_counter *counter;
677
678 if (!is_software_counter(leader))
679 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100680
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100681 list_for_each_entry(counter, &leader->sibling_list, list_entry)
682 if (!is_software_counter(counter))
683 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100684
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100685 return 1;
686}
687
688/*
689 * Work out whether we can put this counter group on the CPU now.
690 */
691static int group_can_go_on(struct perf_counter *counter,
692 struct perf_cpu_context *cpuctx,
693 int can_add_hw)
694{
695 /*
696 * Groups consisting entirely of software counters can always go on.
697 */
698 if (is_software_only_group(counter))
699 return 1;
700 /*
701 * If an exclusive group is already on, no other hardware
702 * counters can go on.
703 */
704 if (cpuctx->exclusive)
705 return 0;
706 /*
707 * If this group is exclusive and there are already
708 * counters on the CPU, it can't go on.
709 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200710 if (counter->attr.exclusive && cpuctx->active_oncpu)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100711 return 0;
712 /*
713 * Otherwise, try to add it if all previous groups were able
714 * to go on.
715 */
716 return can_add_hw;
717}
718
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100719static void add_counter_to_ctx(struct perf_counter *counter,
720 struct perf_counter_context *ctx)
721{
722 list_add_counter(counter, ctx);
Peter Zijlstra4af49982009-04-06 11:45:10 +0200723 counter->tstamp_enabled = ctx->time;
724 counter->tstamp_running = ctx->time;
725 counter->tstamp_stopped = ctx->time;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100726}
727
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100728/*
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100729 * Cross CPU call to install and enable a performance counter
Peter Zijlstra682076a2009-05-23 18:28:57 +0200730 *
731 * Must be called with ctx->mutex held
Thomas Gleixner0793a612008-12-04 20:12:29 +0100732 */
733static void __perf_install_in_context(void *info)
734{
735 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
736 struct perf_counter *counter = info;
737 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100738 struct perf_counter *leader = counter->group_leader;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100739 int cpu = smp_processor_id();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100740 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100741
742 /*
743 * If this is a task context, we need to check whether it is
744 * the current task context of this cpu. If not it has been
745 * scheduled out before the smp call arrived.
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000746 * Or possibly this is the right context but it isn't
747 * on this cpu because it had no counters.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100748 */
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000749 if (ctx->task && cpuctx->task_ctx != ctx) {
Peter Zijlstra665c2142009-05-29 14:51:57 +0200750 if (cpuctx->task_ctx || ctx->task != current)
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000751 return;
752 cpuctx->task_ctx = ctx;
753 }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100754
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200755 spin_lock(&ctx->lock);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000756 ctx->is_active = 1;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200757 update_context_time(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100758
759 /*
760 * Protect the list operation against NMI by disabling the
761 * counters on a global level. NOP for non NMI based counters.
762 */
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200763 perf_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100764
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100765 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100766
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100767 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100768 * Don't put the counter on if it is disabled or if
769 * it is in a group and the group isn't on.
770 */
771 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
772 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
773 goto unlock;
774
775 /*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100776 * An exclusive counter can't go on if there are already active
777 * hardware counters, and no hardware counter can go on if there
778 * is already an exclusive counter on.
779 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100780 if (!group_can_go_on(counter, cpuctx, 1))
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100781 err = -EEXIST;
782 else
783 err = counter_sched_in(counter, cpuctx, ctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100784
Paul Mackerrasd859e292009-01-17 18:10:22 +1100785 if (err) {
786 /*
787 * This counter couldn't go on. If it is in a group
788 * then we have to pull the whole group off.
789 * If the counter group is pinned then put it in error state.
790 */
791 if (leader != counter)
792 group_sched_out(leader, cpuctx, ctx);
Peter Zijlstra0d486962009-06-02 19:22:16 +0200793 if (leader->attr.pinned) {
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100794 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100795 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100796 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100797 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100798
799 if (!err && !ctx->task && cpuctx->max_pertask)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100800 cpuctx->max_pertask--;
801
Paul Mackerrasd859e292009-01-17 18:10:22 +1100802 unlock:
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200803 perf_enable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100804
Peter Zijlstra665c2142009-05-29 14:51:57 +0200805 spin_unlock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100806}
807
808/*
809 * Attach a performance counter to a context
810 *
811 * First we add the counter to the list with the hardware enable bit
812 * in counter->hw_config cleared.
813 *
814 * If the counter is attached to a task which is on a CPU we use a smp
815 * call to enable it in the task context. The task might have been
816 * scheduled away, but we check this in the smp call again.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100817 *
818 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100819 */
820static void
821perf_install_in_context(struct perf_counter_context *ctx,
822 struct perf_counter *counter,
823 int cpu)
824{
825 struct task_struct *task = ctx->task;
826
Thomas Gleixner0793a612008-12-04 20:12:29 +0100827 if (!task) {
828 /*
829 * Per cpu counters are installed via an smp call and
830 * the install is always sucessful.
831 */
832 smp_call_function_single(cpu, __perf_install_in_context,
833 counter, 1);
834 return;
835 }
836
Thomas Gleixner0793a612008-12-04 20:12:29 +0100837retry:
838 task_oncpu_function_call(task, __perf_install_in_context,
839 counter);
840
841 spin_lock_irq(&ctx->lock);
842 /*
Thomas Gleixner0793a612008-12-04 20:12:29 +0100843 * we need to retry the smp call.
844 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100845 if (ctx->is_active && list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100846 spin_unlock_irq(&ctx->lock);
847 goto retry;
848 }
849
850 /*
851 * The lock prevents that this context is scheduled in so we
852 * can add the counter safely, if it the call above did not
853 * succeed.
854 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100855 if (list_empty(&counter->list_entry))
856 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100857 spin_unlock_irq(&ctx->lock);
858}
859
Paul Mackerrasd859e292009-01-17 18:10:22 +1100860/*
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000861 * Put a counter into inactive state and update time fields.
862 * Enabling the leader of a group effectively enables all
863 * the group members that aren't explicitly disabled, so we
864 * have to update their ->tstamp_enabled also.
865 * Note: this works for group members as well as group leaders
866 * since the non-leader members' sibling_lists will be empty.
867 */
868static void __perf_counter_mark_enabled(struct perf_counter *counter,
869 struct perf_counter_context *ctx)
870{
871 struct perf_counter *sub;
872
873 counter->state = PERF_COUNTER_STATE_INACTIVE;
874 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
875 list_for_each_entry(sub, &counter->sibling_list, list_entry)
876 if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
877 sub->tstamp_enabled =
878 ctx->time - sub->total_time_enabled;
879}
880
881/*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100882 * Cross CPU call to enable a performance counter
883 */
884static void __perf_counter_enable(void *info)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100885{
Paul Mackerrasd859e292009-01-17 18:10:22 +1100886 struct perf_counter *counter = info;
887 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
888 struct perf_counter_context *ctx = counter->ctx;
889 struct perf_counter *leader = counter->group_leader;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100890 int err;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100891
892 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100893 * If this is a per-task counter, need to check whether this
894 * counter's task is the current task on this cpu.
Ingo Molnar04289bb2008-12-11 08:38:42 +0100895 */
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000896 if (ctx->task && cpuctx->task_ctx != ctx) {
Peter Zijlstra665c2142009-05-29 14:51:57 +0200897 if (cpuctx->task_ctx || ctx->task != current)
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000898 return;
899 cpuctx->task_ctx = ctx;
900 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100901
Ingo Molnar3f4dee22009-05-29 11:25:09 +0200902 spin_lock(&ctx->lock);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000903 ctx->is_active = 1;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200904 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100905
906 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
907 goto unlock;
Paul Mackerrasfa289be2009-08-25 15:17:20 +1000908 __perf_counter_mark_enabled(counter, ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100909
910 /*
911 * If the counter is in a group and isn't the group leader,
912 * then don't put it on unless the group is on.
913 */
914 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
915 goto unlock;
916
Paul Mackerrase758a332009-05-12 21:59:01 +1000917 if (!group_can_go_on(counter, cpuctx, 1)) {
Paul Mackerrasd859e292009-01-17 18:10:22 +1100918 err = -EEXIST;
Paul Mackerrase758a332009-05-12 21:59:01 +1000919 } else {
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200920 perf_disable();
Paul Mackerrase758a332009-05-12 21:59:01 +1000921 if (counter == leader)
922 err = group_sched_in(counter, cpuctx, ctx,
923 smp_processor_id());
924 else
925 err = counter_sched_in(counter, cpuctx, ctx,
926 smp_processor_id());
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200927 perf_enable();
Paul Mackerrase758a332009-05-12 21:59:01 +1000928 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100929
930 if (err) {
931 /*
932 * If this counter can't go on and it's part of a
933 * group, then the whole group has to come off.
934 */
935 if (leader != counter)
936 group_sched_out(leader, cpuctx, ctx);
Peter Zijlstra0d486962009-06-02 19:22:16 +0200937 if (leader->attr.pinned) {
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100938 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100939 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100940 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100941 }
942
943 unlock:
Peter Zijlstra665c2142009-05-29 14:51:57 +0200944 spin_unlock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100945}
946
947/*
948 * Enable a counter.
Paul Mackerrasc93f7662009-05-28 22:18:17 +1000949 *
950 * If counter->ctx is a cloned context, callers must make sure that
951 * every task struct that counter->ctx->task could possibly point to
952 * remains valid. This condition is satisfied when called through
953 * perf_counter_for_each_child or perf_counter_for_each as described
954 * for perf_counter_disable.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100955 */
956static void perf_counter_enable(struct perf_counter *counter)
957{
958 struct perf_counter_context *ctx = counter->ctx;
959 struct task_struct *task = ctx->task;
960
961 if (!task) {
962 /*
963 * Enable the counter on the cpu that it's on
964 */
965 smp_call_function_single(counter->cpu, __perf_counter_enable,
966 counter, 1);
967 return;
968 }
969
970 spin_lock_irq(&ctx->lock);
971 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
972 goto out;
973
974 /*
975 * If the counter is in error state, clear that first.
976 * That way, if we see the counter in error state below, we
977 * know that it has gone back into error state, as distinct
978 * from the task having been scheduled away before the
979 * cross-call arrived.
980 */
981 if (counter->state == PERF_COUNTER_STATE_ERROR)
982 counter->state = PERF_COUNTER_STATE_OFF;
983
984 retry:
985 spin_unlock_irq(&ctx->lock);
986 task_oncpu_function_call(task, __perf_counter_enable, counter);
987
988 spin_lock_irq(&ctx->lock);
989
990 /*
991 * If the context is active and the counter is still off,
992 * we need to retry the cross-call.
993 */
994 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
995 goto retry;
996
997 /*
998 * Since we have the lock this context can't be scheduled
999 * in, so we can change the state safely.
1000 */
Paul Mackerrasfa289be2009-08-25 15:17:20 +10001001 if (counter->state == PERF_COUNTER_STATE_OFF)
1002 __perf_counter_mark_enabled(counter, ctx);
1003
Paul Mackerrasd859e292009-01-17 18:10:22 +11001004 out:
1005 spin_unlock_irq(&ctx->lock);
1006}
1007
Peter Zijlstra2023b352009-05-05 17:50:26 +02001008static int perf_counter_refresh(struct perf_counter *counter, int refresh)
Peter Zijlstra79f14642009-04-06 11:45:07 +02001009{
Peter Zijlstra2023b352009-05-05 17:50:26 +02001010 /*
1011 * not supported on inherited counters
1012 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02001013 if (counter->attr.inherit)
Peter Zijlstra2023b352009-05-05 17:50:26 +02001014 return -EINVAL;
1015
Peter Zijlstra79f14642009-04-06 11:45:07 +02001016 atomic_add(refresh, &counter->event_limit);
1017 perf_counter_enable(counter);
Peter Zijlstra2023b352009-05-05 17:50:26 +02001018
1019 return 0;
Peter Zijlstra79f14642009-04-06 11:45:07 +02001020}
1021
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001022void __perf_counter_sched_out(struct perf_counter_context *ctx,
1023 struct perf_cpu_context *cpuctx)
1024{
1025 struct perf_counter *counter;
1026
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001027 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001028 ctx->is_active = 0;
1029 if (likely(!ctx->nr_counters))
1030 goto out;
Peter Zijlstra4af49982009-04-06 11:45:10 +02001031 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001032
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001033 perf_disable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001034 if (ctx->nr_active) {
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001035 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1036 if (counter != counter->group_leader)
1037 counter_sched_out(counter, cpuctx, ctx);
1038 else
1039 group_sched_out(counter, cpuctx, ctx);
1040 }
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001041 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001042 perf_enable();
Paul Mackerrasd859e292009-01-17 18:10:22 +11001043 out:
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001044 spin_unlock(&ctx->lock);
1045}
1046
Thomas Gleixner0793a612008-12-04 20:12:29 +01001047/*
Paul Mackerras564c2b22009-05-22 14:27:22 +10001048 * Test whether two contexts are equivalent, i.e. whether they
1049 * have both been cloned from the same version of the same context
1050 * and they both have the same number of enabled counters.
1051 * If the number of enabled counters is the same, then the set
1052 * of enabled counters should be the same, because these are both
1053 * inherited contexts, therefore we can't access individual counters
1054 * in them directly with an fd; we can only enable/disable all
1055 * counters via prctl, or enable/disable all counters in a family
1056 * via ioctl, which will have the same effect on both contexts.
1057 */
1058static int context_equiv(struct perf_counter_context *ctx1,
1059 struct perf_counter_context *ctx2)
1060{
1061 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001062 && ctx1->parent_gen == ctx2->parent_gen
Paul Mackerras25346b932009-06-01 17:48:12 +10001063 && !ctx1->pin_count && !ctx2->pin_count;
Paul Mackerras564c2b22009-05-22 14:27:22 +10001064}
1065
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001066static void __perf_counter_read(void *counter);
1067
1068static void __perf_counter_sync_stat(struct perf_counter *counter,
1069 struct perf_counter *next_counter)
1070{
1071 u64 value;
1072
1073 if (!counter->attr.inherit_stat)
1074 return;
1075
1076 /*
1077 * Update the counter value, we cannot use perf_counter_read()
1078 * because we're in the middle of a context switch and have IRQs
1079 * disabled, which upsets smp_call_function_single(), however
1080 * we know the counter must be on the current CPU, therefore we
1081 * don't need to use it.
1082 */
1083 switch (counter->state) {
1084 case PERF_COUNTER_STATE_ACTIVE:
1085 __perf_counter_read(counter);
1086 break;
1087
1088 case PERF_COUNTER_STATE_INACTIVE:
1089 update_counter_times(counter);
1090 break;
1091
1092 default:
1093 break;
1094 }
1095
1096 /*
1097 * In order to keep per-task stats reliable we need to flip the counter
1098 * values when we flip the contexts.
1099 */
1100 value = atomic64_read(&next_counter->count);
1101 value = atomic64_xchg(&counter->count, value);
1102 atomic64_set(&next_counter->count, value);
1103
Peter Zijlstra19d2e752009-06-26 13:10:23 +02001104 swap(counter->total_time_enabled, next_counter->total_time_enabled);
1105 swap(counter->total_time_running, next_counter->total_time_running);
1106
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001107 /*
Peter Zijlstra19d2e752009-06-26 13:10:23 +02001108 * Since we swizzled the values, update the user visible data too.
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001109 */
Peter Zijlstra19d2e752009-06-26 13:10:23 +02001110 perf_counter_update_userpage(counter);
1111 perf_counter_update_userpage(next_counter);
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001112}
1113
1114#define list_next_entry(pos, member) \
1115 list_entry(pos->member.next, typeof(*pos), member)
1116
1117static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1118 struct perf_counter_context *next_ctx)
1119{
1120 struct perf_counter *counter, *next_counter;
1121
1122 if (!ctx->nr_stat)
1123 return;
1124
1125 counter = list_first_entry(&ctx->event_list,
1126 struct perf_counter, event_entry);
1127
1128 next_counter = list_first_entry(&next_ctx->event_list,
1129 struct perf_counter, event_entry);
1130
1131 while (&counter->event_entry != &ctx->event_list &&
1132 &next_counter->event_entry != &next_ctx->event_list) {
1133
1134 __perf_counter_sync_stat(counter, next_counter);
1135
1136 counter = list_next_entry(counter, event_entry);
Peter Zijlstra10545982009-08-06 18:06:26 +02001137 next_counter = list_next_entry(next_counter, event_entry);
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001138 }
1139}
1140
Paul Mackerras564c2b22009-05-22 14:27:22 +10001141/*
Thomas Gleixner0793a612008-12-04 20:12:29 +01001142 * Called from scheduler to remove the counters of the current task,
1143 * with interrupts disabled.
1144 *
1145 * We stop each counter and update the counter value in counter->count.
1146 *
Ingo Molnar76715812008-12-17 14:20:28 +01001147 * This does not protect us against NMI, but disable()
Thomas Gleixner0793a612008-12-04 20:12:29 +01001148 * sets the disabled bit in the control field of counter _before_
1149 * accessing the counter control register. If a NMI hits, then it will
1150 * not restart the counter.
1151 */
Paul Mackerras564c2b22009-05-22 14:27:22 +10001152void perf_counter_task_sched_out(struct task_struct *task,
1153 struct task_struct *next, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001154{
1155 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001156 struct perf_counter_context *ctx = task->perf_counter_ctxp;
Paul Mackerras564c2b22009-05-22 14:27:22 +10001157 struct perf_counter_context *next_ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001158 struct perf_counter_context *parent;
Peter Zijlstra4a0deca2009-03-19 20:26:12 +01001159 struct pt_regs *regs;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001160 int do_switch = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001161
Peter Zijlstra10989fb2009-05-25 14:45:28 +02001162 regs = task_pt_regs(task);
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02001163 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
Peter Zijlstra10989fb2009-05-25 14:45:28 +02001164
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001165 if (likely(!ctx || !cpuctx->task_ctx))
Thomas Gleixner0793a612008-12-04 20:12:29 +01001166 return;
1167
Peter Zijlstrabce379b2009-04-06 11:45:13 +02001168 update_context_time(ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001169
1170 rcu_read_lock();
1171 parent = rcu_dereference(ctx->parent_ctx);
Paul Mackerras564c2b22009-05-22 14:27:22 +10001172 next_ctx = next->perf_counter_ctxp;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001173 if (parent && next_ctx &&
1174 rcu_dereference(next_ctx->parent_ctx) == parent) {
1175 /*
1176 * Looks like the two contexts are clones, so we might be
1177 * able to optimize the context switch. We lock both
1178 * contexts and check that they are clones under the
1179 * lock (including re-checking that neither has been
1180 * uncloned in the meantime). It doesn't matter which
1181 * order we take the locks because no other cpu could
1182 * be trying to lock both of these tasks.
1183 */
1184 spin_lock(&ctx->lock);
1185 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1186 if (context_equiv(ctx, next_ctx)) {
Peter Zijlstra665c2142009-05-29 14:51:57 +02001187 /*
1188 * XXX do we need a memory barrier of sorts
1189 * wrt to rcu_dereference() of perf_counter_ctxp
1190 */
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001191 task->perf_counter_ctxp = next_ctx;
1192 next->perf_counter_ctxp = ctx;
1193 ctx->task = next;
1194 next_ctx->task = task;
1195 do_switch = 0;
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001196
1197 perf_counter_sync_stat(ctx, next_ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001198 }
1199 spin_unlock(&next_ctx->lock);
1200 spin_unlock(&ctx->lock);
Paul Mackerras564c2b22009-05-22 14:27:22 +10001201 }
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001202 rcu_read_unlock();
Paul Mackerras564c2b22009-05-22 14:27:22 +10001203
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001204 if (do_switch) {
1205 __perf_counter_sched_out(ctx, cpuctx);
1206 cpuctx->task_ctx = NULL;
1207 }
Thomas Gleixner0793a612008-12-04 20:12:29 +01001208}
1209
Peter Zijlstra665c2142009-05-29 14:51:57 +02001210/*
1211 * Called with IRQs disabled
1212 */
Paul Mackerrasa08b1592009-05-11 15:46:10 +10001213static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1214{
1215 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1216
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001217 if (!cpuctx->task_ctx)
1218 return;
Ingo Molnar012b84d2009-05-17 11:08:41 +02001219
1220 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1221 return;
1222
Paul Mackerrasa08b1592009-05-11 15:46:10 +10001223 __perf_counter_sched_out(ctx, cpuctx);
1224 cpuctx->task_ctx = NULL;
1225}
1226
Peter Zijlstra665c2142009-05-29 14:51:57 +02001227/*
1228 * Called with IRQs disabled
1229 */
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001230static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
Ingo Molnar04289bb2008-12-11 08:38:42 +01001231{
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001232 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001233}
1234
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001235static void
1236__perf_counter_sched_in(struct perf_counter_context *ctx,
1237 struct perf_cpu_context *cpuctx, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001238{
Thomas Gleixner0793a612008-12-04 20:12:29 +01001239 struct perf_counter *counter;
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +11001240 int can_add_hw = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001241
Thomas Gleixner0793a612008-12-04 20:12:29 +01001242 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001243 ctx->is_active = 1;
1244 if (likely(!ctx->nr_counters))
1245 goto out;
1246
Peter Zijlstra4af49982009-04-06 11:45:10 +02001247 ctx->timestamp = perf_clock();
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001248
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001249 perf_disable();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001250
1251 /*
1252 * First go through the list and put on any pinned groups
1253 * in order to give them the best chance of going on.
1254 */
Ingo Molnar04289bb2008-12-11 08:38:42 +01001255 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001256 if (counter->state <= PERF_COUNTER_STATE_OFF ||
Peter Zijlstra0d486962009-06-02 19:22:16 +02001257 !counter->attr.pinned)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001258 continue;
1259 if (counter->cpu != -1 && counter->cpu != cpu)
1260 continue;
1261
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001262 if (counter != counter->group_leader)
1263 counter_sched_in(counter, cpuctx, ctx, cpu);
1264 else {
1265 if (group_can_go_on(counter, cpuctx, 1))
1266 group_sched_in(counter, cpuctx, ctx, cpu);
1267 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001268
1269 /*
1270 * If this pinned group hasn't been scheduled,
1271 * put it in error state.
1272 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001273 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1274 update_group_times(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001275 counter->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001276 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001277 }
1278
1279 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1280 /*
1281 * Ignore counters in OFF or ERROR state, and
1282 * ignore pinned counters since we did them already.
1283 */
1284 if (counter->state <= PERF_COUNTER_STATE_OFF ||
Peter Zijlstra0d486962009-06-02 19:22:16 +02001285 counter->attr.pinned)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001286 continue;
1287
Ingo Molnar04289bb2008-12-11 08:38:42 +01001288 /*
1289 * Listen to the 'cpu' scheduling filter constraint
1290 * of counters:
1291 */
Thomas Gleixner0793a612008-12-04 20:12:29 +01001292 if (counter->cpu != -1 && counter->cpu != cpu)
1293 continue;
1294
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001295 if (counter != counter->group_leader) {
1296 if (counter_sched_in(counter, cpuctx, ctx, cpu))
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +11001297 can_add_hw = 0;
Peter Zijlstraafedadf2009-05-20 12:21:22 +02001298 } else {
1299 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1300 if (group_sched_in(counter, cpuctx, ctx, cpu))
1301 can_add_hw = 0;
1302 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001303 }
Thomas Gleixner0793a612008-12-04 20:12:29 +01001304 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001305 perf_enable();
Paul Mackerrasd859e292009-01-17 18:10:22 +11001306 out:
Thomas Gleixner0793a612008-12-04 20:12:29 +01001307 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001308}
Ingo Molnar04289bb2008-12-11 08:38:42 +01001309
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001310/*
1311 * Called from scheduler to add the counters of the current task
1312 * with interrupts disabled.
1313 *
1314 * We restore the counter value and then enable it.
1315 *
1316 * This does not protect us against NMI, but enable()
1317 * sets the enabled bit in the control field of counter _before_
1318 * accessing the counter control register. If a NMI hits, then it will
1319 * keep the counter running.
1320 */
1321void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1322{
1323 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001324 struct perf_counter_context *ctx = task->perf_counter_ctxp;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001325
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001326 if (likely(!ctx))
1327 return;
Paul Mackerras564c2b22009-05-22 14:27:22 +10001328 if (cpuctx->task_ctx == ctx)
1329 return;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001330 __perf_counter_sched_in(ctx, cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001331 cpuctx->task_ctx = ctx;
1332}
1333
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001334static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1335{
1336 struct perf_counter_context *ctx = &cpuctx->ctx;
1337
1338 __perf_counter_sched_in(ctx, cpuctx, cpu);
1339}
1340
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001341#define MAX_INTERRUPTS (~0ULL)
1342
1343static void perf_log_throttle(struct perf_counter *counter, int enable);
Peter Zijlstra26b119b2009-05-20 12:21:20 +02001344
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001345static void perf_adjust_period(struct perf_counter *counter, u64 events)
1346{
1347 struct hw_perf_counter *hwc = &counter->hw;
1348 u64 period, sample_period;
1349 s64 delta;
1350
1351 events *= hwc->sample_period;
1352 period = div64_u64(events, counter->attr.sample_freq);
1353
1354 delta = (s64)(period - hwc->sample_period);
1355 delta = (delta + 7) / 8; /* low pass filter */
1356
1357 sample_period = hwc->sample_period + delta;
1358
1359 if (!sample_period)
1360 sample_period = 1;
1361
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001362 hwc->sample_period = sample_period;
1363}
1364
1365static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001366{
1367 struct perf_counter *counter;
Peter Zijlstra6a24ed6c2009-06-05 18:01:29 +02001368 struct hw_perf_counter *hwc;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001369 u64 interrupts, freq;
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001370
1371 spin_lock(&ctx->lock);
1372 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1373 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1374 continue;
1375
Peter Zijlstra6a24ed6c2009-06-05 18:01:29 +02001376 hwc = &counter->hw;
1377
1378 interrupts = hwc->interrupts;
1379 hwc->interrupts = 0;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001380
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001381 /*
1382 * unthrottle counters on the tick
1383 */
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001384 if (interrupts == MAX_INTERRUPTS) {
1385 perf_log_throttle(counter, 1);
1386 counter->pmu->unthrottle(counter);
Peter Zijlstradf58ab22009-06-11 11:25:05 +02001387 interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001388 }
1389
Peter Zijlstra0d486962009-06-02 19:22:16 +02001390 if (!counter->attr.freq || !counter->attr.sample_freq)
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001391 continue;
1392
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001393 /*
1394 * if the specified freq < HZ then we need to skip ticks
1395 */
Peter Zijlstra6a24ed6c2009-06-05 18:01:29 +02001396 if (counter->attr.sample_freq < HZ) {
1397 freq = counter->attr.sample_freq;
1398
1399 hwc->freq_count += freq;
1400 hwc->freq_interrupts += interrupts;
1401
1402 if (hwc->freq_count < HZ)
1403 continue;
1404
1405 interrupts = hwc->freq_interrupts;
1406 hwc->freq_interrupts = 0;
1407 hwc->freq_count -= HZ;
1408 } else
1409 freq = HZ;
1410
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001411 perf_adjust_period(counter, freq * interrupts);
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001412
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001413 /*
1414 * In order to avoid being stalled by an (accidental) huge
1415 * sample period, force reset the sample period if we didn't
1416 * get any events in this freq period.
1417 */
1418 if (!interrupts) {
1419 perf_disable();
1420 counter->pmu->disable(counter);
Paul Mackerras87847b82009-06-13 17:06:50 +10001421 atomic64_set(&hwc->period_left, 0);
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001422 counter->pmu->enable(counter);
1423 perf_enable();
1424 }
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001425 }
1426 spin_unlock(&ctx->lock);
1427}
1428
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001429/*
1430 * Round-robin a context's counters:
1431 */
1432static void rotate_ctx(struct perf_counter_context *ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001433{
Thomas Gleixner0793a612008-12-04 20:12:29 +01001434 struct perf_counter *counter;
1435
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001436 if (!ctx->nr_counters)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001437 return;
1438
Thomas Gleixner0793a612008-12-04 20:12:29 +01001439 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001440 /*
Ingo Molnar04289bb2008-12-11 08:38:42 +01001441 * Rotate the first entry last (works just fine for group counters too):
Thomas Gleixner0793a612008-12-04 20:12:29 +01001442 */
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001443 perf_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +01001444 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Peter Zijlstra75564232009-03-13 12:21:29 +01001445 list_move_tail(&counter->list_entry, &ctx->counter_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001446 break;
1447 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001448 perf_enable();
Thomas Gleixner0793a612008-12-04 20:12:29 +01001449
1450 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001451}
Thomas Gleixner0793a612008-12-04 20:12:29 +01001452
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001453void perf_counter_task_tick(struct task_struct *curr, int cpu)
1454{
Peter Zijlstra7fc23a52009-05-08 18:52:21 +02001455 struct perf_cpu_context *cpuctx;
1456 struct perf_counter_context *ctx;
1457
1458 if (!atomic_read(&nr_counters))
1459 return;
1460
1461 cpuctx = &per_cpu(perf_cpu_context, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001462 ctx = curr->perf_counter_ctxp;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001463
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001464 perf_ctx_adjust_freq(&cpuctx->ctx);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001465 if (ctx)
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001466 perf_ctx_adjust_freq(ctx);
Peter Zijlstra60db5e02009-05-15 15:19:28 +02001467
Ingo Molnarb82914c2009-05-04 18:54:32 +02001468 perf_counter_cpu_sched_out(cpuctx);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001469 if (ctx)
1470 __perf_counter_task_sched_out(ctx);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001471
Ingo Molnarb82914c2009-05-04 18:54:32 +02001472 rotate_ctx(&cpuctx->ctx);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001473 if (ctx)
1474 rotate_ctx(ctx);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001475
Ingo Molnarb82914c2009-05-04 18:54:32 +02001476 perf_counter_cpu_sched_in(cpuctx, cpu);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001477 if (ctx)
1478 perf_counter_task_sched_in(curr, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001479}
1480
1481/*
Paul Mackerras57e79862009-06-30 16:07:19 +10001482 * Enable all of a task's counters that have been marked enable-on-exec.
1483 * This expects task == current.
1484 */
1485static void perf_counter_enable_on_exec(struct task_struct *task)
1486{
1487 struct perf_counter_context *ctx;
1488 struct perf_counter *counter;
1489 unsigned long flags;
1490 int enabled = 0;
1491
1492 local_irq_save(flags);
1493 ctx = task->perf_counter_ctxp;
1494 if (!ctx || !ctx->nr_counters)
1495 goto out;
1496
1497 __perf_counter_task_sched_out(ctx);
1498
1499 spin_lock(&ctx->lock);
1500
1501 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1502 if (!counter->attr.enable_on_exec)
1503 continue;
1504 counter->attr.enable_on_exec = 0;
1505 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
1506 continue;
Paul Mackerrasfa289be2009-08-25 15:17:20 +10001507 __perf_counter_mark_enabled(counter, ctx);
Paul Mackerras57e79862009-06-30 16:07:19 +10001508 enabled = 1;
1509 }
1510
1511 /*
1512 * Unclone this context if we enabled any counter.
1513 */
Peter Zijlstra71a851b2009-07-10 09:06:56 +02001514 if (enabled)
1515 unclone_ctx(ctx);
Paul Mackerras57e79862009-06-30 16:07:19 +10001516
1517 spin_unlock(&ctx->lock);
1518
1519 perf_counter_task_sched_in(task, smp_processor_id());
1520 out:
1521 local_irq_restore(flags);
1522}
1523
1524/*
Thomas Gleixner0793a612008-12-04 20:12:29 +01001525 * Cross CPU call to read the hardware counter
1526 */
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001527static void __perf_counter_read(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001528{
Paul Mackerrase1ac3612009-08-14 15:39:10 +10001529 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001530 struct perf_counter *counter = info;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001531 struct perf_counter_context *ctx = counter->ctx;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001532 unsigned long flags;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001533
Paul Mackerrase1ac3612009-08-14 15:39:10 +10001534 /*
1535 * If this is a task context, we need to check whether it is
1536 * the current task context of this cpu. If not it has been
1537 * scheduled out before the smp call arrived. In that case
1538 * counter->count would have been updated to a recent sample
1539 * when the counter was scheduled out.
1540 */
1541 if (ctx->task && cpuctx->task_ctx != ctx)
1542 return;
1543
Peter Zijlstra849691a2009-04-06 11:45:12 +02001544 local_irq_save(flags);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001545 if (ctx->is_active)
Peter Zijlstra4af49982009-04-06 11:45:10 +02001546 update_context_time(ctx);
Robert Richter4aeb0b42009-04-29 12:47:03 +02001547 counter->pmu->read(counter);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001548 update_counter_times(counter);
Peter Zijlstra849691a2009-04-06 11:45:12 +02001549 local_irq_restore(flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001550}
1551
Ingo Molnar04289bb2008-12-11 08:38:42 +01001552static u64 perf_counter_read(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001553{
1554 /*
1555 * If counter is enabled and currently active on a CPU, update the
1556 * value in the counter structure:
1557 */
Ingo Molnar6a930702008-12-11 15:17:03 +01001558 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001559 smp_call_function_single(counter->oncpu,
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02001560 __perf_counter_read, counter, 1);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001561 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1562 update_counter_times(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001563 }
1564
Ingo Molnaree060942008-12-13 09:00:03 +01001565 return atomic64_read(&counter->count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001566}
1567
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001568/*
1569 * Initialize the perf_counter context in a task_struct:
1570 */
1571static void
1572__perf_counter_init_context(struct perf_counter_context *ctx,
1573 struct task_struct *task)
1574{
1575 memset(ctx, 0, sizeof(*ctx));
1576 spin_lock_init(&ctx->lock);
1577 mutex_init(&ctx->mutex);
1578 INIT_LIST_HEAD(&ctx->counter_list);
1579 INIT_LIST_HEAD(&ctx->event_list);
1580 atomic_set(&ctx->refcount, 1);
1581 ctx->task = task;
1582}
1583
Thomas Gleixner0793a612008-12-04 20:12:29 +01001584static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1585{
Ingo Molnar22a4f652009-06-01 10:13:37 +02001586 struct perf_counter_context *ctx;
1587 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001588 struct task_struct *task;
Paul Mackerras25346b932009-06-01 17:48:12 +10001589 unsigned long flags;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001590 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001591
1592 /*
1593 * If cpu is not a wildcard then this is a percpu counter:
1594 */
1595 if (cpu != -1) {
1596 /* Must be root to operate on a CPU counter: */
Peter Zijlstra07647712009-06-11 11:18:36 +02001597 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
Thomas Gleixner0793a612008-12-04 20:12:29 +01001598 return ERR_PTR(-EACCES);
1599
1600 if (cpu < 0 || cpu > num_possible_cpus())
1601 return ERR_PTR(-EINVAL);
1602
1603 /*
1604 * We could be clever and allow to attach a counter to an
1605 * offline CPU and activate it when the CPU comes up, but
1606 * that's for later.
1607 */
1608 if (!cpu_isset(cpu, cpu_online_map))
1609 return ERR_PTR(-ENODEV);
1610
1611 cpuctx = &per_cpu(perf_cpu_context, cpu);
1612 ctx = &cpuctx->ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001613 get_ctx(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001614
Thomas Gleixner0793a612008-12-04 20:12:29 +01001615 return ctx;
1616 }
1617
1618 rcu_read_lock();
1619 if (!pid)
1620 task = current;
1621 else
1622 task = find_task_by_vpid(pid);
1623 if (task)
1624 get_task_struct(task);
1625 rcu_read_unlock();
1626
1627 if (!task)
1628 return ERR_PTR(-ESRCH);
1629
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001630 /*
1631 * Can't attach counters to a dying task.
1632 */
1633 err = -ESRCH;
1634 if (task->flags & PF_EXITING)
1635 goto errout;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001636
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001637 /* Reuse ptrace permission checks for now. */
1638 err = -EACCES;
1639 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1640 goto errout;
1641
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001642 retry:
Paul Mackerras25346b932009-06-01 17:48:12 +10001643 ctx = perf_lock_task_context(task, &flags);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001644 if (ctx) {
Peter Zijlstra71a851b2009-07-10 09:06:56 +02001645 unclone_ctx(ctx);
Paul Mackerras25346b932009-06-01 17:48:12 +10001646 spin_unlock_irqrestore(&ctx->lock, flags);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001647 }
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001648
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001649 if (!ctx) {
1650 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001651 err = -ENOMEM;
1652 if (!ctx)
1653 goto errout;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001654 __perf_counter_init_context(ctx, task);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001655 get_ctx(ctx);
1656 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001657 /*
1658 * We raced with some other task; use
1659 * the context they set.
1660 */
1661 kfree(ctx);
Paul Mackerras25346b932009-06-01 17:48:12 +10001662 goto retry;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001663 }
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001664 get_task_struct(task);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10001665 }
1666
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001667 put_task_struct(task);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001668 return ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001669
1670 errout:
1671 put_task_struct(task);
1672 return ERR_PTR(err);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001673}
1674
Peter Zijlstra592903c2009-03-13 12:21:36 +01001675static void free_counter_rcu(struct rcu_head *head)
1676{
1677 struct perf_counter *counter;
1678
1679 counter = container_of(head, struct perf_counter, rcu_head);
Peter Zijlstra709e50c2009-06-02 14:13:15 +02001680 if (counter->ns)
1681 put_pid_ns(counter->ns);
Peter Zijlstra592903c2009-03-13 12:21:36 +01001682 kfree(counter);
1683}
1684
Peter Zijlstra925d5192009-03-30 19:07:02 +02001685static void perf_pending_sync(struct perf_counter *counter);
1686
Peter Zijlstraf1600952009-03-19 20:26:16 +01001687static void free_counter(struct perf_counter *counter)
1688{
Peter Zijlstra925d5192009-03-30 19:07:02 +02001689 perf_pending_sync(counter);
1690
Peter Zijlstraf3440112009-06-22 13:58:35 +02001691 if (!counter->parent) {
1692 atomic_dec(&nr_counters);
1693 if (counter->attr.mmap)
1694 atomic_dec(&nr_mmap_counters);
1695 if (counter->attr.comm)
1696 atomic_dec(&nr_comm_counters);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02001697 if (counter->attr.task)
1698 atomic_dec(&nr_task_counters);
Peter Zijlstraf3440112009-06-22 13:58:35 +02001699 }
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02001700
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02001701 if (counter->output) {
1702 fput(counter->output->filp);
1703 counter->output = NULL;
1704 }
1705
Peter Zijlstrae077df42009-03-19 20:26:17 +01001706 if (counter->destroy)
1707 counter->destroy(counter);
1708
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001709 put_ctx(counter->ctx);
Peter Zijlstraf1600952009-03-19 20:26:16 +01001710 call_rcu(&counter->rcu_head, free_counter_rcu);
1711}
1712
Thomas Gleixner0793a612008-12-04 20:12:29 +01001713/*
1714 * Called when the last reference to the file is gone.
1715 */
1716static int perf_release(struct inode *inode, struct file *file)
1717{
1718 struct perf_counter *counter = file->private_data;
1719 struct perf_counter_context *ctx = counter->ctx;
1720
1721 file->private_data = NULL;
1722
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001723 WARN_ON_ONCE(ctx->parent_ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001724 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001725 perf_counter_remove_from_context(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001726 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001727
Peter Zijlstra082ff5a2009-05-23 18:29:00 +02001728 mutex_lock(&counter->owner->perf_counter_mutex);
1729 list_del_init(&counter->owner_entry);
1730 mutex_unlock(&counter->owner->perf_counter_mutex);
1731 put_task_struct(counter->owner);
1732
Peter Zijlstraf1600952009-03-19 20:26:16 +01001733 free_counter(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001734
1735 return 0;
1736}
1737
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001738static int perf_counter_read_size(struct perf_counter *counter)
1739{
1740 int entry = sizeof(u64); /* value */
1741 int size = 0;
1742 int nr = 1;
1743
1744 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1745 size += sizeof(u64);
1746
1747 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1748 size += sizeof(u64);
1749
1750 if (counter->attr.read_format & PERF_FORMAT_ID)
1751 entry += sizeof(u64);
1752
1753 if (counter->attr.read_format & PERF_FORMAT_GROUP) {
1754 nr += counter->group_leader->nr_siblings;
1755 size += sizeof(u64);
1756 }
1757
1758 size += entry * nr;
1759
1760 return size;
1761}
1762
1763static u64 perf_counter_read_value(struct perf_counter *counter)
Peter Zijlstrae53c0992009-07-24 14:42:10 +02001764{
1765 struct perf_counter *child;
1766 u64 total = 0;
1767
1768 total += perf_counter_read(counter);
1769 list_for_each_entry(child, &counter->child_list, child_list)
1770 total += perf_counter_read(child);
1771
1772 return total;
1773}
1774
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001775static int perf_counter_read_entry(struct perf_counter *counter,
1776 u64 read_format, char __user *buf)
1777{
1778 int n = 0, count = 0;
1779 u64 values[2];
1780
1781 values[n++] = perf_counter_read_value(counter);
1782 if (read_format & PERF_FORMAT_ID)
1783 values[n++] = primary_counter_id(counter);
1784
1785 count = n * sizeof(u64);
1786
1787 if (copy_to_user(buf, values, count))
1788 return -EFAULT;
1789
1790 return count;
1791}
1792
1793static int perf_counter_read_group(struct perf_counter *counter,
1794 u64 read_format, char __user *buf)
1795{
1796 struct perf_counter *leader = counter->group_leader, *sub;
1797 int n = 0, size = 0, err = -EFAULT;
1798 u64 values[3];
1799
1800 values[n++] = 1 + leader->nr_siblings;
1801 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1802 values[n++] = leader->total_time_enabled +
1803 atomic64_read(&leader->child_total_time_enabled);
1804 }
1805 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1806 values[n++] = leader->total_time_running +
1807 atomic64_read(&leader->child_total_time_running);
1808 }
1809
1810 size = n * sizeof(u64);
1811
1812 if (copy_to_user(buf, values, size))
1813 return -EFAULT;
1814
1815 err = perf_counter_read_entry(leader, read_format, buf + size);
1816 if (err < 0)
1817 return err;
1818
1819 size += err;
1820
1821 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
Peter Zijlstra4464fca2009-08-21 17:19:36 +02001822 err = perf_counter_read_entry(sub, read_format,
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001823 buf + size);
1824 if (err < 0)
1825 return err;
1826
1827 size += err;
1828 }
1829
1830 return size;
1831}
1832
1833static int perf_counter_read_one(struct perf_counter *counter,
1834 u64 read_format, char __user *buf)
1835{
1836 u64 values[4];
1837 int n = 0;
1838
1839 values[n++] = perf_counter_read_value(counter);
1840 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1841 values[n++] = counter->total_time_enabled +
1842 atomic64_read(&counter->child_total_time_enabled);
1843 }
1844 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1845 values[n++] = counter->total_time_running +
1846 atomic64_read(&counter->child_total_time_running);
1847 }
1848 if (read_format & PERF_FORMAT_ID)
1849 values[n++] = primary_counter_id(counter);
1850
1851 if (copy_to_user(buf, values, n * sizeof(u64)))
1852 return -EFAULT;
1853
1854 return n * sizeof(u64);
1855}
1856
Thomas Gleixner0793a612008-12-04 20:12:29 +01001857/*
1858 * Read the performance counter - simple non blocking version for now
1859 */
1860static ssize_t
1861perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1862{
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001863 u64 read_format = counter->attr.read_format;
1864 int ret;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001865
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001866 /*
1867 * Return end-of-file for a read on a counter that is in
1868 * error state (i.e. because it was pinned but it couldn't be
1869 * scheduled on to the CPU at some point).
1870 */
1871 if (counter->state == PERF_COUNTER_STATE_ERROR)
1872 return 0;
1873
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001874 if (count < perf_counter_read_size(counter))
1875 return -ENOSPC;
1876
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001877 WARN_ON_ONCE(counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001878 mutex_lock(&counter->child_mutex);
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001879 if (read_format & PERF_FORMAT_GROUP)
1880 ret = perf_counter_read_group(counter, read_format, buf);
1881 else
1882 ret = perf_counter_read_one(counter, read_format, buf);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001883 mutex_unlock(&counter->child_mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001884
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02001885 return ret;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001886}
1887
1888static ssize_t
Thomas Gleixner0793a612008-12-04 20:12:29 +01001889perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1890{
1891 struct perf_counter *counter = file->private_data;
1892
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001893 return perf_read_hw(counter, buf, count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001894}
1895
1896static unsigned int perf_poll(struct file *file, poll_table *wait)
1897{
1898 struct perf_counter *counter = file->private_data;
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001899 struct perf_mmap_data *data;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001900 unsigned int events = POLL_HUP;
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001901
1902 rcu_read_lock();
1903 data = rcu_dereference(counter->data);
1904 if (data)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001905 events = atomic_xchg(&data->poll, 0);
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001906 rcu_read_unlock();
Thomas Gleixner0793a612008-12-04 20:12:29 +01001907
1908 poll_wait(file, &counter->waitq, wait);
1909
Thomas Gleixner0793a612008-12-04 20:12:29 +01001910 return events;
1911}
1912
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001913static void perf_counter_reset(struct perf_counter *counter)
1914{
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001915 (void)perf_counter_read(counter);
Paul Mackerras615a3f12009-05-11 15:50:21 +10001916 atomic64_set(&counter->count, 0);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001917 perf_counter_update_userpage(counter);
1918}
1919
Paul Mackerrasc93f7662009-05-28 22:18:17 +10001920/*
1921 * Holding the top-level counter's child_mutex means that any
1922 * descendant process that has inherited this counter will block
1923 * in sync_child_counter if it goes to exit, thus satisfying the
1924 * task existence requirements of perf_counter_enable/disable.
1925 */
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001926static void perf_counter_for_each_child(struct perf_counter *counter,
1927 void (*func)(struct perf_counter *))
1928{
1929 struct perf_counter *child;
1930
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10001931 WARN_ON_ONCE(counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001932 mutex_lock(&counter->child_mutex);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001933 func(counter);
1934 list_for_each_entry(child, &counter->child_list, child_list)
1935 func(child);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02001936 mutex_unlock(&counter->child_mutex);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001937}
1938
1939static void perf_counter_for_each(struct perf_counter *counter,
1940 void (*func)(struct perf_counter *))
1941{
Peter Zijlstra75f937f2009-06-15 15:05:12 +02001942 struct perf_counter_context *ctx = counter->ctx;
1943 struct perf_counter *sibling;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001944
Peter Zijlstra75f937f2009-06-15 15:05:12 +02001945 WARN_ON_ONCE(ctx->parent_ctx);
1946 mutex_lock(&ctx->mutex);
1947 counter = counter->group_leader;
1948
1949 perf_counter_for_each_child(counter, func);
1950 func(counter);
1951 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1952 perf_counter_for_each_child(counter, func);
1953 mutex_unlock(&ctx->mutex);
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001954}
1955
Peter Zijlstra08247e32009-06-02 16:46:57 +02001956static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1957{
1958 struct perf_counter_context *ctx = counter->ctx;
1959 unsigned long size;
1960 int ret = 0;
1961 u64 value;
1962
Peter Zijlstra0d486962009-06-02 19:22:16 +02001963 if (!counter->attr.sample_period)
Peter Zijlstra08247e32009-06-02 16:46:57 +02001964 return -EINVAL;
1965
1966 size = copy_from_user(&value, arg, sizeof(value));
1967 if (size != sizeof(value))
1968 return -EFAULT;
1969
1970 if (!value)
1971 return -EINVAL;
1972
1973 spin_lock_irq(&ctx->lock);
Peter Zijlstra0d486962009-06-02 19:22:16 +02001974 if (counter->attr.freq) {
Peter Zijlstradf58ab22009-06-11 11:25:05 +02001975 if (value > sysctl_perf_counter_sample_rate) {
Peter Zijlstra08247e32009-06-02 16:46:57 +02001976 ret = -EINVAL;
1977 goto unlock;
1978 }
1979
Peter Zijlstra0d486962009-06-02 19:22:16 +02001980 counter->attr.sample_freq = value;
Peter Zijlstra08247e32009-06-02 16:46:57 +02001981 } else {
Peter Zijlstra0d486962009-06-02 19:22:16 +02001982 counter->attr.sample_period = value;
Peter Zijlstra08247e32009-06-02 16:46:57 +02001983 counter->hw.sample_period = value;
Peter Zijlstra08247e32009-06-02 16:46:57 +02001984 }
1985unlock:
1986 spin_unlock_irq(&ctx->lock);
1987
1988 return ret;
1989}
1990
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02001991int perf_counter_set_output(struct perf_counter *counter, int output_fd);
1992
Paul Mackerrasd859e292009-01-17 18:10:22 +11001993static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1994{
1995 struct perf_counter *counter = file->private_data;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001996 void (*func)(struct perf_counter *);
1997 u32 flags = arg;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001998
1999 switch (cmd) {
2000 case PERF_COUNTER_IOC_ENABLE:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002001 func = perf_counter_enable;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002002 break;
2003 case PERF_COUNTER_IOC_DISABLE:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002004 func = perf_counter_disable;
Peter Zijlstra79f14642009-04-06 11:45:07 +02002005 break;
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02002006 case PERF_COUNTER_IOC_RESET:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002007 func = perf_counter_reset;
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02002008 break;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002009
2010 case PERF_COUNTER_IOC_REFRESH:
2011 return perf_counter_refresh(counter, arg);
Peter Zijlstra08247e32009-06-02 16:46:57 +02002012
2013 case PERF_COUNTER_IOC_PERIOD:
2014 return perf_counter_period(counter, (u64 __user *)arg);
2015
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002016 case PERF_COUNTER_IOC_SET_OUTPUT:
2017 return perf_counter_set_output(counter, arg);
2018
Paul Mackerrasd859e292009-01-17 18:10:22 +11002019 default:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002020 return -ENOTTY;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002021 }
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02002022
2023 if (flags & PERF_IOC_FLAG_GROUP)
2024 perf_counter_for_each(counter, func);
2025 else
2026 perf_counter_for_each_child(counter, func);
2027
2028 return 0;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002029}
2030
Peter Zijlstra771d7cd2009-05-25 14:45:26 +02002031int perf_counter_task_enable(void)
2032{
2033 struct perf_counter *counter;
2034
2035 mutex_lock(&current->perf_counter_mutex);
2036 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
2037 perf_counter_for_each_child(counter, perf_counter_enable);
2038 mutex_unlock(&current->perf_counter_mutex);
2039
2040 return 0;
2041}
2042
2043int perf_counter_task_disable(void)
2044{
2045 struct perf_counter *counter;
2046
2047 mutex_lock(&current->perf_counter_mutex);
2048 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
2049 perf_counter_for_each_child(counter, perf_counter_disable);
2050 mutex_unlock(&current->perf_counter_mutex);
2051
2052 return 0;
2053}
2054
Ingo Molnarf738eb12009-08-18 11:32:24 +02002055#ifndef PERF_COUNTER_INDEX_OFFSET
2056# define PERF_COUNTER_INDEX_OFFSET 0
2057#endif
2058
Peter Zijlstra194002b2009-06-22 16:35:24 +02002059static int perf_counter_index(struct perf_counter *counter)
2060{
2061 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2062 return 0;
2063
2064 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
2065}
2066
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002067/*
2068 * Callers need to ensure there can be no nesting of this function, otherwise
2069 * the seqlock logic goes bad. We can not serialize this because the arch
2070 * code calls this from NMI context.
2071 */
2072void perf_counter_update_userpage(struct perf_counter *counter)
Paul Mackerras37d81822009-03-23 18:22:08 +01002073{
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002074 struct perf_counter_mmap_page *userpg;
Ingo Molnar22a4f652009-06-01 10:13:37 +02002075 struct perf_mmap_data *data;
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002076
2077 rcu_read_lock();
2078 data = rcu_dereference(counter->data);
2079 if (!data)
2080 goto unlock;
2081
2082 userpg = data->user_page;
Paul Mackerras37d81822009-03-23 18:22:08 +01002083
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002084 /*
2085 * Disable preemption so as to not let the corresponding user-space
2086 * spin too long if we get preempted.
2087 */
2088 preempt_disable();
Paul Mackerras37d81822009-03-23 18:22:08 +01002089 ++userpg->lock;
Peter Zijlstra92f22a32009-04-02 11:12:04 +02002090 barrier();
Peter Zijlstra194002b2009-06-22 16:35:24 +02002091 userpg->index = perf_counter_index(counter);
Paul Mackerras37d81822009-03-23 18:22:08 +01002092 userpg->offset = atomic64_read(&counter->count);
2093 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
2094 userpg->offset -= atomic64_read(&counter->hw.prev_count);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002095
Peter Zijlstra7f8b4e42009-06-22 14:34:35 +02002096 userpg->time_enabled = counter->total_time_enabled +
2097 atomic64_read(&counter->child_total_time_enabled);
2098
2099 userpg->time_running = counter->total_time_running +
2100 atomic64_read(&counter->child_total_time_running);
2101
Peter Zijlstra92f22a32009-04-02 11:12:04 +02002102 barrier();
Paul Mackerras37d81822009-03-23 18:22:08 +01002103 ++userpg->lock;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002104 preempt_enable();
Peter Zijlstra38ff6672009-03-30 19:07:03 +02002105unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002106 rcu_read_unlock();
Paul Mackerras37d81822009-03-23 18:22:08 +01002107}
2108
2109static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2110{
2111 struct perf_counter *counter = vma->vm_file->private_data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002112 struct perf_mmap_data *data;
2113 int ret = VM_FAULT_SIGBUS;
Paul Mackerras37d81822009-03-23 18:22:08 +01002114
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002115 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2116 if (vmf->pgoff == 0)
2117 ret = 0;
2118 return ret;
2119 }
2120
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002121 rcu_read_lock();
2122 data = rcu_dereference(counter->data);
2123 if (!data)
2124 goto unlock;
Paul Mackerras37d81822009-03-23 18:22:08 +01002125
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002126 if (vmf->pgoff == 0) {
2127 vmf->page = virt_to_page(data->user_page);
2128 } else {
2129 int nr = vmf->pgoff - 1;
2130
2131 if ((unsigned)nr > data->nr_pages)
2132 goto unlock;
2133
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002134 if (vmf->flags & FAULT_FLAG_WRITE)
2135 goto unlock;
2136
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002137 vmf->page = virt_to_page(data->data_pages[nr]);
2138 }
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002139
Paul Mackerras37d81822009-03-23 18:22:08 +01002140 get_page(vmf->page);
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002141 vmf->page->mapping = vma->vm_file->f_mapping;
2142 vmf->page->index = vmf->pgoff;
2143
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002144 ret = 0;
2145unlock:
2146 rcu_read_unlock();
2147
2148 return ret;
2149}
2150
2151static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
2152{
2153 struct perf_mmap_data *data;
2154 unsigned long size;
2155 int i;
2156
2157 WARN_ON(atomic_read(&counter->mmap_count));
2158
2159 size = sizeof(struct perf_mmap_data);
2160 size += nr_pages * sizeof(void *);
2161
2162 data = kzalloc(size, GFP_KERNEL);
2163 if (!data)
2164 goto fail;
2165
2166 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
2167 if (!data->user_page)
2168 goto fail_user_page;
2169
2170 for (i = 0; i < nr_pages; i++) {
2171 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
2172 if (!data->data_pages[i])
2173 goto fail_data_pages;
2174 }
2175
2176 data->nr_pages = nr_pages;
Peter Zijlstra22c15582009-05-05 17:50:25 +02002177 atomic_set(&data->lock, -1);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002178
Peter Zijlstra2667de82009-09-17 19:01:10 +02002179 if (counter->attr.watermark) {
2180 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2181 counter->attr.wakeup_watermark);
2182 }
2183 if (!data->watermark)
2184 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2185
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002186 rcu_assign_pointer(counter->data, data);
2187
Paul Mackerras37d81822009-03-23 18:22:08 +01002188 return 0;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002189
2190fail_data_pages:
2191 for (i--; i >= 0; i--)
2192 free_page((unsigned long)data->data_pages[i]);
2193
2194 free_page((unsigned long)data->user_page);
2195
2196fail_user_page:
2197 kfree(data);
2198
2199fail:
2200 return -ENOMEM;
2201}
2202
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002203static void perf_mmap_free_page(unsigned long addr)
2204{
Kevin Cernekee5bfd7562009-07-05 12:08:19 -07002205 struct page *page = virt_to_page((void *)addr);
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002206
2207 page->mapping = NULL;
2208 __free_page(page);
2209}
2210
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002211static void __perf_mmap_data_free(struct rcu_head *rcu_head)
2212{
Ingo Molnar22a4f652009-06-01 10:13:37 +02002213 struct perf_mmap_data *data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002214 int i;
2215
Ingo Molnar22a4f652009-06-01 10:13:37 +02002216 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2217
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002218 perf_mmap_free_page((unsigned long)data->user_page);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002219 for (i = 0; i < data->nr_pages; i++)
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002220 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2221
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002222 kfree(data);
2223}
2224
2225static void perf_mmap_data_free(struct perf_counter *counter)
2226{
2227 struct perf_mmap_data *data = counter->data;
2228
2229 WARN_ON(atomic_read(&counter->mmap_count));
2230
2231 rcu_assign_pointer(counter->data, NULL);
2232 call_rcu(&data->rcu_head, __perf_mmap_data_free);
2233}
2234
2235static void perf_mmap_open(struct vm_area_struct *vma)
2236{
2237 struct perf_counter *counter = vma->vm_file->private_data;
2238
2239 atomic_inc(&counter->mmap_count);
2240}
2241
2242static void perf_mmap_close(struct vm_area_struct *vma)
2243{
2244 struct perf_counter *counter = vma->vm_file->private_data;
2245
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10002246 WARN_ON_ONCE(counter->ctx->parent_ctx);
Ingo Molnar22a4f652009-06-01 10:13:37 +02002247 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002248 struct user_struct *user = current_user();
2249
2250 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
Peter Zijlstrac5078f72009-05-05 17:50:24 +02002251 vma->vm_mm->locked_vm -= counter->data->nr_locked;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002252 perf_mmap_data_free(counter);
2253 mutex_unlock(&counter->mmap_mutex);
2254 }
Paul Mackerras37d81822009-03-23 18:22:08 +01002255}
2256
2257static struct vm_operations_struct perf_mmap_vmops = {
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002258 .open = perf_mmap_open,
2259 .close = perf_mmap_close,
2260 .fault = perf_mmap_fault,
2261 .page_mkwrite = perf_mmap_fault,
Paul Mackerras37d81822009-03-23 18:22:08 +01002262};
2263
2264static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2265{
2266 struct perf_counter *counter = file->private_data;
Ingo Molnar22a4f652009-06-01 10:13:37 +02002267 unsigned long user_locked, user_lock_limit;
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002268 struct user_struct *user = current_user();
Ingo Molnar22a4f652009-06-01 10:13:37 +02002269 unsigned long locked, lock_limit;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002270 unsigned long vma_size;
2271 unsigned long nr_pages;
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002272 long user_extra, extra;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002273 int ret = 0;
Paul Mackerras37d81822009-03-23 18:22:08 +01002274
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002275 if (!(vma->vm_flags & VM_SHARED))
Paul Mackerras37d81822009-03-23 18:22:08 +01002276 return -EINVAL;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002277
2278 vma_size = vma->vm_end - vma->vm_start;
2279 nr_pages = (vma_size / PAGE_SIZE) - 1;
2280
Peter Zijlstra7730d862009-03-25 12:48:31 +01002281 /*
2282 * If we have data pages ensure they're a power-of-two number, so we
2283 * can do bitmasks instead of modulo.
2284 */
2285 if (nr_pages != 0 && !is_power_of_2(nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01002286 return -EINVAL;
2287
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002288 if (vma_size != PAGE_SIZE * (1 + nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01002289 return -EINVAL;
2290
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002291 if (vma->vm_pgoff != 0)
2292 return -EINVAL;
Paul Mackerras37d81822009-03-23 18:22:08 +01002293
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10002294 WARN_ON_ONCE(counter->ctx->parent_ctx);
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002295 mutex_lock(&counter->mmap_mutex);
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002296 if (counter->output) {
2297 ret = -EINVAL;
2298 goto unlock;
2299 }
2300
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002301 if (atomic_inc_not_zero(&counter->mmap_count)) {
2302 if (nr_pages != counter->data->nr_pages)
2303 ret = -EINVAL;
2304 goto unlock;
2305 }
2306
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002307 user_extra = nr_pages + 1;
2308 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
Ingo Molnara3862d32009-05-24 09:02:37 +02002309
2310 /*
2311 * Increase the limit linearly with more CPUs:
2312 */
2313 user_lock_limit *= num_online_cpus();
2314
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002315 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
Peter Zijlstrac5078f72009-05-05 17:50:24 +02002316
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002317 extra = 0;
2318 if (user_locked > user_lock_limit)
2319 extra = user_locked - user_lock_limit;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002320
2321 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2322 lock_limit >>= PAGE_SHIFT;
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002323 locked = vma->vm_mm->locked_vm + extra;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002324
Ingo Molnar459ec282009-09-13 17:33:44 +02002325 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2326 !capable(CAP_IPC_LOCK)) {
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002327 ret = -EPERM;
2328 goto unlock;
2329 }
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002330
2331 WARN_ON(counter->data);
2332 ret = perf_mmap_data_alloc(counter, nr_pages);
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002333 if (ret)
2334 goto unlock;
2335
2336 atomic_set(&counter->mmap_count, 1);
Peter Zijlstra789f90f2009-05-15 15:19:27 +02002337 atomic_long_add(user_extra, &user->locked_vm);
Peter Zijlstrac5078f72009-05-05 17:50:24 +02002338 vma->vm_mm->locked_vm += extra;
2339 counter->data->nr_locked = extra;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002340 if (vma->vm_flags & VM_WRITE)
2341 counter->data->writable = 1;
2342
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02002343unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002344 mutex_unlock(&counter->mmap_mutex);
Paul Mackerras37d81822009-03-23 18:22:08 +01002345
Paul Mackerras37d81822009-03-23 18:22:08 +01002346 vma->vm_flags |= VM_RESERVED;
2347 vma->vm_ops = &perf_mmap_vmops;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002348
2349 return ret;
Paul Mackerras37d81822009-03-23 18:22:08 +01002350}
2351
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02002352static int perf_fasync(int fd, struct file *filp, int on)
2353{
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02002354 struct inode *inode = filp->f_path.dentry->d_inode;
Ingo Molnar22a4f652009-06-01 10:13:37 +02002355 struct perf_counter *counter = filp->private_data;
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02002356 int retval;
2357
2358 mutex_lock(&inode->i_mutex);
2359 retval = fasync_helper(fd, filp, on, &counter->fasync);
2360 mutex_unlock(&inode->i_mutex);
2361
2362 if (retval < 0)
2363 return retval;
2364
2365 return 0;
2366}
2367
Thomas Gleixner0793a612008-12-04 20:12:29 +01002368static const struct file_operations perf_fops = {
2369 .release = perf_release,
2370 .read = perf_read,
2371 .poll = perf_poll,
Paul Mackerrasd859e292009-01-17 18:10:22 +11002372 .unlocked_ioctl = perf_ioctl,
2373 .compat_ioctl = perf_ioctl,
Paul Mackerras37d81822009-03-23 18:22:08 +01002374 .mmap = perf_mmap,
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02002375 .fasync = perf_fasync,
Thomas Gleixner0793a612008-12-04 20:12:29 +01002376};
2377
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002378/*
Peter Zijlstra925d5192009-03-30 19:07:02 +02002379 * Perf counter wakeup
2380 *
2381 * If there's data, ensure we set the poll() state and publish everything
2382 * to user-space before waking everybody up.
2383 */
2384
2385void perf_counter_wakeup(struct perf_counter *counter)
2386{
Peter Zijlstra925d5192009-03-30 19:07:02 +02002387 wake_up_all(&counter->waitq);
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002388
2389 if (counter->pending_kill) {
2390 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
2391 counter->pending_kill = 0;
2392 }
Peter Zijlstra925d5192009-03-30 19:07:02 +02002393}
2394
2395/*
2396 * Pending wakeups
2397 *
2398 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2399 *
2400 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2401 * single linked list and use cmpxchg() to add entries lockless.
2402 */
2403
Peter Zijlstra79f14642009-04-06 11:45:07 +02002404static void perf_pending_counter(struct perf_pending_entry *entry)
2405{
2406 struct perf_counter *counter = container_of(entry,
2407 struct perf_counter, pending);
2408
2409 if (counter->pending_disable) {
2410 counter->pending_disable = 0;
Peter Zijlstra970892a2009-08-13 11:47:54 +02002411 __perf_counter_disable(counter);
Peter Zijlstra79f14642009-04-06 11:45:07 +02002412 }
2413
2414 if (counter->pending_wakeup) {
2415 counter->pending_wakeup = 0;
2416 perf_counter_wakeup(counter);
2417 }
2418}
2419
Peter Zijlstra671dec52009-04-06 11:45:02 +02002420#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02002421
Peter Zijlstra671dec52009-04-06 11:45:02 +02002422static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
Peter Zijlstra925d5192009-03-30 19:07:02 +02002423 PENDING_TAIL,
2424};
2425
Peter Zijlstra671dec52009-04-06 11:45:02 +02002426static void perf_pending_queue(struct perf_pending_entry *entry,
2427 void (*func)(struct perf_pending_entry *))
Peter Zijlstra925d5192009-03-30 19:07:02 +02002428{
Peter Zijlstra671dec52009-04-06 11:45:02 +02002429 struct perf_pending_entry **head;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002430
Peter Zijlstra671dec52009-04-06 11:45:02 +02002431 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02002432 return;
2433
Peter Zijlstra671dec52009-04-06 11:45:02 +02002434 entry->func = func;
2435
2436 head = &get_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002437
2438 do {
Peter Zijlstra671dec52009-04-06 11:45:02 +02002439 entry->next = *head;
2440 } while (cmpxchg(head, entry->next, entry) != entry->next);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002441
2442 set_perf_counter_pending();
2443
Peter Zijlstra671dec52009-04-06 11:45:02 +02002444 put_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002445}
2446
2447static int __perf_pending_run(void)
2448{
Peter Zijlstra671dec52009-04-06 11:45:02 +02002449 struct perf_pending_entry *list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002450 int nr = 0;
2451
Peter Zijlstra671dec52009-04-06 11:45:02 +02002452 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002453 while (list != PENDING_TAIL) {
Peter Zijlstra671dec52009-04-06 11:45:02 +02002454 void (*func)(struct perf_pending_entry *);
2455 struct perf_pending_entry *entry = list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002456
2457 list = list->next;
2458
Peter Zijlstra671dec52009-04-06 11:45:02 +02002459 func = entry->func;
2460 entry->next = NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002461 /*
2462 * Ensure we observe the unqueue before we issue the wakeup,
2463 * so that we won't be waiting forever.
2464 * -- see perf_not_pending().
2465 */
2466 smp_wmb();
2467
Peter Zijlstra671dec52009-04-06 11:45:02 +02002468 func(entry);
Peter Zijlstra925d5192009-03-30 19:07:02 +02002469 nr++;
2470 }
2471
2472 return nr;
2473}
2474
2475static inline int perf_not_pending(struct perf_counter *counter)
2476{
2477 /*
2478 * If we flush on whatever cpu we run, there is a chance we don't
2479 * need to wait.
2480 */
2481 get_cpu();
2482 __perf_pending_run();
2483 put_cpu();
2484
2485 /*
2486 * Ensure we see the proper queue state before going to sleep
2487 * so that we do not miss the wakeup. -- see perf_pending_handle()
2488 */
2489 smp_rmb();
Peter Zijlstra671dec52009-04-06 11:45:02 +02002490 return counter->pending.next == NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02002491}
2492
2493static void perf_pending_sync(struct perf_counter *counter)
2494{
2495 wait_event(counter->waitq, perf_not_pending(counter));
2496}
2497
2498void perf_counter_do_pending(void)
2499{
2500 __perf_pending_run();
2501}
2502
2503/*
Peter Zijlstra394ee072009-03-30 19:07:14 +02002504 * Callchain support -- arch specific
2505 */
2506
Peter Zijlstra9c03d882009-04-06 11:45:00 +02002507__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
Peter Zijlstra394ee072009-03-30 19:07:14 +02002508{
2509 return NULL;
2510}
2511
2512/*
Peter Zijlstra0322cd62009-03-19 20:26:19 +01002513 * Output
2514 */
Peter Zijlstra2667de82009-09-17 19:01:10 +02002515static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2516 unsigned long offset, unsigned long head)
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002517{
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002518 unsigned long mask;
2519
2520 if (!data->writable)
2521 return true;
2522
2523 mask = (data->nr_pages << PAGE_SHIFT) - 1;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002524
2525 offset = (offset - tail) & mask;
2526 head = (head - tail) & mask;
2527
2528 if ((int)(head - offset) < 0)
2529 return false;
2530
2531 return true;
2532}
2533
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002534static void perf_output_wakeup(struct perf_output_handle *handle)
Peter Zijlstra78d613e2009-03-30 19:07:11 +02002535{
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002536 atomic_set(&handle->data->poll, POLL_IN);
2537
Peter Zijlstra671dec52009-04-06 11:45:02 +02002538 if (handle->nmi) {
Peter Zijlstra79f14642009-04-06 11:45:07 +02002539 handle->counter->pending_wakeup = 1;
Peter Zijlstra671dec52009-04-06 11:45:02 +02002540 perf_pending_queue(&handle->counter->pending,
Peter Zijlstra79f14642009-04-06 11:45:07 +02002541 perf_pending_counter);
Peter Zijlstra671dec52009-04-06 11:45:02 +02002542 } else
Peter Zijlstra78d613e2009-03-30 19:07:11 +02002543 perf_counter_wakeup(handle->counter);
2544}
2545
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002546/*
2547 * Curious locking construct.
2548 *
2549 * We need to ensure a later event doesn't publish a head when a former
2550 * event isn't done writing. However since we need to deal with NMIs we
2551 * cannot fully serialize things.
2552 *
2553 * What we do is serialize between CPUs so we only have to deal with NMI
2554 * nesting on a single CPU.
2555 *
2556 * We only publish the head (and generate a wakeup) when the outer-most
2557 * event completes.
2558 */
2559static void perf_output_lock(struct perf_output_handle *handle)
2560{
2561 struct perf_mmap_data *data = handle->data;
2562 int cpu;
2563
2564 handle->locked = 0;
2565
2566 local_irq_save(handle->flags);
2567 cpu = smp_processor_id();
2568
2569 if (in_nmi() && atomic_read(&data->lock) == cpu)
2570 return;
2571
Peter Zijlstra22c15582009-05-05 17:50:25 +02002572 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002573 cpu_relax();
2574
2575 handle->locked = 1;
2576}
2577
2578static void perf_output_unlock(struct perf_output_handle *handle)
2579{
2580 struct perf_mmap_data *data = handle->data;
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002581 unsigned long head;
2582 int cpu;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002583
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002584 data->done_head = data->head;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002585
2586 if (!handle->locked)
2587 goto out;
2588
2589again:
2590 /*
2591 * The xchg implies a full barrier that ensures all writes are done
2592 * before we publish the new head, matched by a rmb() in userspace when
2593 * reading this position.
2594 */
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002595 while ((head = atomic_long_xchg(&data->done_head, 0)))
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002596 data->user_page->data_head = head;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002597
2598 /*
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002599 * NMI can happen here, which means we can miss a done_head update.
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002600 */
2601
Peter Zijlstra22c15582009-05-05 17:50:25 +02002602 cpu = atomic_xchg(&data->lock, -1);
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002603 WARN_ON_ONCE(cpu != smp_processor_id());
2604
2605 /*
2606 * Therefore we have to validate we did not indeed do so.
2607 */
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002608 if (unlikely(atomic_long_read(&data->done_head))) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002609 /*
2610 * Since we had it locked, we can lock it again.
2611 */
Peter Zijlstra22c15582009-05-05 17:50:25 +02002612 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002613 cpu_relax();
2614
2615 goto again;
2616 }
2617
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002618 if (atomic_xchg(&data->wakeup, 0))
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002619 perf_output_wakeup(handle);
2620out:
2621 local_irq_restore(handle->flags);
2622}
2623
Markus Metzger5622f292009-09-15 13:00:23 +02002624void perf_output_copy(struct perf_output_handle *handle,
2625 const void *buf, unsigned int len)
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002626{
2627 unsigned int pages_mask;
2628 unsigned int offset;
2629 unsigned int size;
2630 void **pages;
2631
2632 offset = handle->offset;
2633 pages_mask = handle->data->nr_pages - 1;
2634 pages = handle->data->data_pages;
2635
2636 do {
2637 unsigned int page_offset;
2638 int nr;
2639
2640 nr = (offset >> PAGE_SHIFT) & pages_mask;
2641 page_offset = offset & (PAGE_SIZE - 1);
2642 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2643
2644 memcpy(pages[nr] + page_offset, buf, size);
2645
2646 len -= size;
2647 buf += size;
2648 offset += size;
2649 } while (len);
2650
2651 handle->offset = offset;
Peter Zijlstra63e35b22009-03-25 12:30:24 +01002652
Peter Zijlstra53020fe2009-05-13 21:26:19 +02002653 /*
2654 * Check we didn't copy past our reservation window, taking the
2655 * possible unsigned int wrap into account.
2656 */
Peter Zijlstra8e3747c2009-06-02 16:16:02 +02002657 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002658}
2659
Markus Metzger5622f292009-09-15 13:00:23 +02002660int perf_output_begin(struct perf_output_handle *handle,
2661 struct perf_counter *counter, unsigned int size,
2662 int nmi, int sample)
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002663{
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002664 struct perf_counter *output_counter;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002665 struct perf_mmap_data *data;
Peter Zijlstra2667de82009-09-17 19:01:10 +02002666 unsigned long tail, offset, head;
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002667 int have_lost;
2668 struct {
2669 struct perf_event_header header;
2670 u64 id;
2671 u64 lost;
2672 } lost_event;
2673
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002674 rcu_read_lock();
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002675 /*
2676 * For inherited counters we send all the output towards the parent.
2677 */
2678 if (counter->parent)
2679 counter = counter->parent;
2680
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02002681 output_counter = rcu_dereference(counter->output);
2682 if (output_counter)
2683 counter = output_counter;
2684
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002685 data = rcu_dereference(counter->data);
2686 if (!data)
2687 goto out;
2688
2689 handle->data = data;
2690 handle->counter = counter;
2691 handle->nmi = nmi;
2692 handle->sample = sample;
2693
2694 if (!data->nr_pages)
2695 goto fail;
2696
2697 have_lost = atomic_read(&data->lost);
2698 if (have_lost)
2699 size += sizeof(lost_event);
2700
2701 perf_output_lock(handle);
2702
2703 do {
Peter Zijlstra2667de82009-09-17 19:01:10 +02002704 /*
2705 * Userspace could choose to issue a mb() before updating the
2706 * tail pointer. So that all reads will be completed before the
2707 * write is issued.
2708 */
2709 tail = ACCESS_ONCE(data->user_page->data_tail);
2710 smp_rmb();
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002711 offset = head = atomic_long_read(&data->head);
2712 head += size;
Peter Zijlstra2667de82009-09-17 19:01:10 +02002713 if (unlikely(!perf_output_space(data, tail, offset, head)))
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002714 goto fail;
2715 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2716
2717 handle->offset = offset;
2718 handle->head = head;
2719
Peter Zijlstra2667de82009-09-17 19:01:10 +02002720 if (head - tail > data->watermark)
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002721 atomic_set(&data->wakeup, 1);
2722
2723 if (have_lost) {
2724 lost_event.header.type = PERF_EVENT_LOST;
2725 lost_event.header.misc = 0;
2726 lost_event.header.size = sizeof(lost_event);
2727 lost_event.id = counter->id;
2728 lost_event.lost = atomic_xchg(&data->lost, 0);
2729
2730 perf_output_put(handle, lost_event);
2731 }
2732
2733 return 0;
2734
2735fail:
2736 atomic_inc(&data->lost);
2737 perf_output_unlock(handle);
2738out:
2739 rcu_read_unlock();
2740
2741 return -ENOSPC;
2742}
2743
Markus Metzger5622f292009-09-15 13:00:23 +02002744void perf_output_end(struct perf_output_handle *handle)
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002745{
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002746 struct perf_counter *counter = handle->counter;
2747 struct perf_mmap_data *data = handle->data;
2748
Peter Zijlstra0d486962009-06-02 19:22:16 +02002749 int wakeup_events = counter->attr.wakeup_events;
Peter Zijlstrac4578102009-04-02 11:12:01 +02002750
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01002751 if (handle->sample && wakeup_events) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002752 int events = atomic_inc_return(&data->events);
Peter Zijlstrac4578102009-04-02 11:12:01 +02002753 if (events >= wakeup_events) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002754 atomic_sub(wakeup_events, &data->events);
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02002755 atomic_set(&data->wakeup, 1);
Peter Zijlstrac4578102009-04-02 11:12:01 +02002756 }
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02002757 }
2758
2759 perf_output_unlock(handle);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01002760 rcu_read_unlock();
2761}
2762
Peter Zijlstra709e50c2009-06-02 14:13:15 +02002763static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2764{
2765 /*
2766 * only top level counters have the pid namespace they were created in
2767 */
2768 if (counter->parent)
2769 counter = counter->parent;
2770
2771 return task_tgid_nr_ns(p, counter->ns);
2772}
2773
2774static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2775{
2776 /*
2777 * only top level counters have the pid namespace they were created in
2778 */
2779 if (counter->parent)
2780 counter = counter->parent;
2781
2782 return task_pid_nr_ns(p, counter->ns);
2783}
2784
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02002785static void perf_output_read_one(struct perf_output_handle *handle,
2786 struct perf_counter *counter)
2787{
2788 u64 read_format = counter->attr.read_format;
2789 u64 values[4];
2790 int n = 0;
2791
2792 values[n++] = atomic64_read(&counter->count);
2793 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2794 values[n++] = counter->total_time_enabled +
2795 atomic64_read(&counter->child_total_time_enabled);
2796 }
2797 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2798 values[n++] = counter->total_time_running +
2799 atomic64_read(&counter->child_total_time_running);
2800 }
2801 if (read_format & PERF_FORMAT_ID)
2802 values[n++] = primary_counter_id(counter);
2803
2804 perf_output_copy(handle, values, n * sizeof(u64));
2805}
2806
2807/*
2808 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
2809 */
2810static void perf_output_read_group(struct perf_output_handle *handle,
2811 struct perf_counter *counter)
2812{
2813 struct perf_counter *leader = counter->group_leader, *sub;
2814 u64 read_format = counter->attr.read_format;
2815 u64 values[5];
2816 int n = 0;
2817
2818 values[n++] = 1 + leader->nr_siblings;
2819
2820 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2821 values[n++] = leader->total_time_enabled;
2822
2823 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2824 values[n++] = leader->total_time_running;
2825
2826 if (leader != counter)
2827 leader->pmu->read(leader);
2828
2829 values[n++] = atomic64_read(&leader->count);
2830 if (read_format & PERF_FORMAT_ID)
2831 values[n++] = primary_counter_id(leader);
2832
2833 perf_output_copy(handle, values, n * sizeof(u64));
2834
2835 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2836 n = 0;
2837
2838 if (sub != counter)
2839 sub->pmu->read(sub);
2840
2841 values[n++] = atomic64_read(&sub->count);
2842 if (read_format & PERF_FORMAT_ID)
2843 values[n++] = primary_counter_id(sub);
2844
2845 perf_output_copy(handle, values, n * sizeof(u64));
2846 }
2847}
2848
2849static void perf_output_read(struct perf_output_handle *handle,
2850 struct perf_counter *counter)
2851{
2852 if (counter->attr.read_format & PERF_FORMAT_GROUP)
2853 perf_output_read_group(handle, counter);
2854 else
2855 perf_output_read_one(handle, counter);
2856}
2857
Markus Metzger5622f292009-09-15 13:00:23 +02002858void perf_output_sample(struct perf_output_handle *handle,
2859 struct perf_event_header *header,
2860 struct perf_sample_data *data,
2861 struct perf_counter *counter)
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002862{
Markus Metzger5622f292009-09-15 13:00:23 +02002863 u64 sample_type = data->type;
2864
2865 perf_output_put(handle, *header);
2866
2867 if (sample_type & PERF_SAMPLE_IP)
2868 perf_output_put(handle, data->ip);
2869
2870 if (sample_type & PERF_SAMPLE_TID)
2871 perf_output_put(handle, data->tid_entry);
2872
2873 if (sample_type & PERF_SAMPLE_TIME)
2874 perf_output_put(handle, data->time);
2875
2876 if (sample_type & PERF_SAMPLE_ADDR)
2877 perf_output_put(handle, data->addr);
2878
2879 if (sample_type & PERF_SAMPLE_ID)
2880 perf_output_put(handle, data->id);
2881
2882 if (sample_type & PERF_SAMPLE_STREAM_ID)
2883 perf_output_put(handle, data->stream_id);
2884
2885 if (sample_type & PERF_SAMPLE_CPU)
2886 perf_output_put(handle, data->cpu_entry);
2887
2888 if (sample_type & PERF_SAMPLE_PERIOD)
2889 perf_output_put(handle, data->period);
2890
2891 if (sample_type & PERF_SAMPLE_READ)
2892 perf_output_read(handle, counter);
2893
2894 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2895 if (data->callchain) {
2896 int size = 1;
2897
2898 if (data->callchain)
2899 size += data->callchain->nr;
2900
2901 size *= sizeof(u64);
2902
2903 perf_output_copy(handle, data->callchain, size);
2904 } else {
2905 u64 nr = 0;
2906 perf_output_put(handle, nr);
2907 }
2908 }
2909
2910 if (sample_type & PERF_SAMPLE_RAW) {
2911 if (data->raw) {
2912 perf_output_put(handle, data->raw->size);
2913 perf_output_copy(handle, data->raw->data,
2914 data->raw->size);
2915 } else {
2916 struct {
2917 u32 size;
2918 u32 data;
2919 } raw = {
2920 .size = sizeof(u32),
2921 .data = 0,
2922 };
2923 perf_output_put(handle, raw);
2924 }
2925 }
2926}
2927
2928void perf_prepare_sample(struct perf_event_header *header,
2929 struct perf_sample_data *data,
2930 struct perf_counter *counter,
2931 struct pt_regs *regs)
2932{
Peter Zijlstra0d486962009-06-02 19:22:16 +02002933 u64 sample_type = counter->attr.sample_type;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002934
Markus Metzger5622f292009-09-15 13:00:23 +02002935 data->type = sample_type;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002936
Markus Metzger5622f292009-09-15 13:00:23 +02002937 header->type = PERF_EVENT_SAMPLE;
2938 header->size = sizeof(*header);
2939
2940 header->misc = 0;
2941 header->misc |= perf_misc_flags(regs);
Peter Zijlstra6fab0192009-04-08 15:01:26 +02002942
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002943 if (sample_type & PERF_SAMPLE_IP) {
Markus Metzger5622f292009-09-15 13:00:23 +02002944 data->ip = perf_instruction_pointer(regs);
2945
2946 header->size += sizeof(data->ip);
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002947 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002948
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002949 if (sample_type & PERF_SAMPLE_TID) {
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002950 /* namespace issues */
Markus Metzger5622f292009-09-15 13:00:23 +02002951 data->tid_entry.pid = perf_counter_pid(counter, current);
2952 data->tid_entry.tid = perf_counter_tid(counter, current);
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002953
Markus Metzger5622f292009-09-15 13:00:23 +02002954 header->size += sizeof(data->tid_entry);
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002955 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002956
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002957 if (sample_type & PERF_SAMPLE_TIME) {
Peter Zijlstra4d855452009-04-08 15:01:32 +02002958 /*
2959 * Maybe do better on x86 and provide cpu_clock_nmi()
2960 */
Markus Metzger5622f292009-09-15 13:00:23 +02002961 data->time = sched_clock();
Peter Zijlstra4d855452009-04-08 15:01:32 +02002962
Markus Metzger5622f292009-09-15 13:00:23 +02002963 header->size += sizeof(data->time);
Peter Zijlstra4d855452009-04-08 15:01:32 +02002964 }
2965
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002966 if (sample_type & PERF_SAMPLE_ADDR)
Markus Metzger5622f292009-09-15 13:00:23 +02002967 header->size += sizeof(data->addr);
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002968
Markus Metzger5622f292009-09-15 13:00:23 +02002969 if (sample_type & PERF_SAMPLE_ID) {
2970 data->id = primary_counter_id(counter);
Peter Zijlstraa85f61a2009-05-08 18:52:23 +02002971
Markus Metzger5622f292009-09-15 13:00:23 +02002972 header->size += sizeof(data->id);
2973 }
2974
2975 if (sample_type & PERF_SAMPLE_STREAM_ID) {
2976 data->stream_id = counter->id;
2977
2978 header->size += sizeof(data->stream_id);
2979 }
Peter Zijlstra7f453c22009-07-21 13:19:40 +02002980
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002981 if (sample_type & PERF_SAMPLE_CPU) {
Markus Metzger5622f292009-09-15 13:00:23 +02002982 data->cpu_entry.cpu = raw_smp_processor_id();
2983 data->cpu_entry.reserved = 0;
Peter Zijlstraf370e1e2009-05-08 18:52:24 +02002984
Markus Metzger5622f292009-09-15 13:00:23 +02002985 header->size += sizeof(data->cpu_entry);
Peter Zijlstraf370e1e2009-05-08 18:52:24 +02002986 }
2987
Peter Zijlstrae6e18ec2009-06-25 11:27:12 +02002988 if (sample_type & PERF_SAMPLE_PERIOD)
Markus Metzger5622f292009-09-15 13:00:23 +02002989 header->size += sizeof(data->period);
Peter Zijlstra689802b2009-06-05 15:05:43 +02002990
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02002991 if (sample_type & PERF_SAMPLE_READ)
Markus Metzger5622f292009-09-15 13:00:23 +02002992 header->size += perf_counter_read_size(counter);
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002993
Peter Zijlstrab23f3322009-06-02 15:13:03 +02002994 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
Markus Metzger5622f292009-09-15 13:00:23 +02002995 int size = 1;
Peter Zijlstra394ee072009-03-30 19:07:14 +02002996
Markus Metzger5622f292009-09-15 13:00:23 +02002997 data->callchain = perf_callchain(regs);
2998
2999 if (data->callchain)
3000 size += data->callchain->nr;
3001
3002 header->size += size * sizeof(u64);
Peter Zijlstra394ee072009-03-30 19:07:14 +02003003 }
3004
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003005 if (sample_type & PERF_SAMPLE_RAW) {
Peter Zijlstraa0445602009-08-10 11:16:52 +02003006 int size = sizeof(u32);
3007
3008 if (data->raw)
3009 size += data->raw->size;
3010 else
3011 size += sizeof(u32);
3012
3013 WARN_ON_ONCE(size & (sizeof(u64)-1));
Markus Metzger5622f292009-09-15 13:00:23 +02003014 header->size += size;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003015 }
Markus Metzger5622f292009-09-15 13:00:23 +02003016}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003017
Markus Metzger5622f292009-09-15 13:00:23 +02003018static void perf_counter_output(struct perf_counter *counter, int nmi,
3019 struct perf_sample_data *data,
3020 struct pt_regs *regs)
3021{
3022 struct perf_output_handle handle;
3023 struct perf_event_header header;
3024
3025 perf_prepare_sample(&header, data, counter, regs);
3026
3027 if (perf_output_begin(&handle, counter, header.size, nmi, 1))
Peter Zijlstra5ed00412009-03-30 19:07:12 +02003028 return;
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01003029
Markus Metzger5622f292009-09-15 13:00:23 +02003030 perf_output_sample(&handle, &header, data, counter);
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003031
Peter Zijlstra5ed00412009-03-30 19:07:12 +02003032 perf_output_end(&handle);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01003033}
3034
Peter Zijlstra0322cd62009-03-19 20:26:19 +01003035/*
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003036 * read event
3037 */
3038
3039struct perf_read_event {
3040 struct perf_event_header header;
3041
3042 u32 pid;
3043 u32 tid;
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003044};
3045
3046static void
3047perf_counter_read_event(struct perf_counter *counter,
3048 struct task_struct *task)
3049{
3050 struct perf_output_handle handle;
3051 struct perf_read_event event = {
3052 .header = {
3053 .type = PERF_EVENT_READ,
3054 .misc = 0,
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02003055 .size = sizeof(event) + perf_counter_read_size(counter),
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003056 },
3057 .pid = perf_counter_pid(counter, task),
3058 .tid = perf_counter_tid(counter, task),
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003059 };
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02003060 int ret;
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003061
3062 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
3063 if (ret)
3064 return;
3065
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02003066 perf_output_put(&handle, event);
3067 perf_output_read(&handle, counter);
3068
Peter Zijlstra38b200d2009-06-23 20:13:11 +02003069 perf_output_end(&handle);
3070}
3071
3072/*
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003073 * task tracking -- fork/exit
3074 *
3075 * enabled by: attr.comm | attr.mmap | attr.task
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003076 */
3077
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003078struct perf_task_event {
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003079 struct task_struct *task;
3080 struct perf_counter_context *task_ctx;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003081
3082 struct {
3083 struct perf_event_header header;
3084
3085 u32 pid;
3086 u32 ppid;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003087 u32 tid;
3088 u32 ptid;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003089 } event;
3090};
3091
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003092static void perf_counter_task_output(struct perf_counter *counter,
3093 struct perf_task_event *task_event)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003094{
3095 struct perf_output_handle handle;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003096 int size = task_event->event.header.size;
3097 struct task_struct *task = task_event->task;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003098 int ret = perf_output_begin(&handle, counter, size, 0, 0);
3099
3100 if (ret)
3101 return;
3102
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003103 task_event->event.pid = perf_counter_pid(counter, task);
Peter Zijlstra94d5d1b2009-08-13 16:14:42 +02003104 task_event->event.ppid = perf_counter_pid(counter, current);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003105
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003106 task_event->event.tid = perf_counter_tid(counter, task);
Peter Zijlstra94d5d1b2009-08-13 16:14:42 +02003107 task_event->event.ptid = perf_counter_tid(counter, current);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003108
3109 perf_output_put(&handle, task_event->event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003110 perf_output_end(&handle);
3111}
3112
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003113static int perf_counter_task_match(struct perf_counter *counter)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003114{
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003115 if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003116 return 1;
3117
3118 return 0;
3119}
3120
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003121static void perf_counter_task_ctx(struct perf_counter_context *ctx,
3122 struct perf_task_event *task_event)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003123{
3124 struct perf_counter *counter;
3125
3126 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3127 return;
3128
3129 rcu_read_lock();
3130 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003131 if (perf_counter_task_match(counter))
3132 perf_counter_task_output(counter, task_event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003133 }
3134 rcu_read_unlock();
3135}
3136
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003137static void perf_counter_task_event(struct perf_task_event *task_event)
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003138{
3139 struct perf_cpu_context *cpuctx;
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003140 struct perf_counter_context *ctx = task_event->task_ctx;
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003141
3142 cpuctx = &get_cpu_var(perf_cpu_context);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003143 perf_counter_task_ctx(&cpuctx->ctx, task_event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003144 put_cpu_var(perf_cpu_context);
3145
3146 rcu_read_lock();
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003147 if (!ctx)
3148 ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003149 if (ctx)
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003150 perf_counter_task_ctx(ctx, task_event);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003151 rcu_read_unlock();
3152}
3153
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003154static void perf_counter_task(struct task_struct *task,
3155 struct perf_counter_context *task_ctx,
3156 int new)
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003157{
3158 struct perf_task_event task_event;
3159
3160 if (!atomic_read(&nr_comm_counters) &&
3161 !atomic_read(&nr_mmap_counters) &&
3162 !atomic_read(&nr_task_counters))
3163 return;
3164
3165 task_event = (struct perf_task_event){
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003166 .task = task,
3167 .task_ctx = task_ctx,
3168 .event = {
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02003169 .header = {
3170 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
3171 .misc = 0,
3172 .size = sizeof(task_event.event),
3173 },
3174 /* .pid */
3175 /* .ppid */
3176 /* .tid */
3177 /* .ptid */
3178 },
3179 };
3180
3181 perf_counter_task_event(&task_event);
3182}
3183
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003184void perf_counter_fork(struct task_struct *task)
3185{
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02003186 perf_counter_task(task, NULL, 1);
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003187}
3188
3189/*
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003190 * comm tracking
3191 */
3192
3193struct perf_comm_event {
Ingo Molnar22a4f652009-06-01 10:13:37 +02003194 struct task_struct *task;
3195 char *comm;
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003196 int comm_size;
3197
3198 struct {
3199 struct perf_event_header header;
3200
3201 u32 pid;
3202 u32 tid;
3203 } event;
3204};
3205
3206static void perf_counter_comm_output(struct perf_counter *counter,
3207 struct perf_comm_event *comm_event)
3208{
3209 struct perf_output_handle handle;
3210 int size = comm_event->event.header.size;
3211 int ret = perf_output_begin(&handle, counter, size, 0, 0);
3212
3213 if (ret)
3214 return;
3215
Peter Zijlstra709e50c2009-06-02 14:13:15 +02003216 comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
3217 comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
3218
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003219 perf_output_put(&handle, comm_event->event);
3220 perf_output_copy(&handle, comm_event->comm,
3221 comm_event->comm_size);
3222 perf_output_end(&handle);
3223}
3224
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003225static int perf_counter_comm_match(struct perf_counter *counter)
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003226{
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003227 if (counter->attr.comm)
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003228 return 1;
3229
3230 return 0;
3231}
3232
3233static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
3234 struct perf_comm_event *comm_event)
3235{
3236 struct perf_counter *counter;
3237
3238 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3239 return;
3240
3241 rcu_read_lock();
3242 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003243 if (perf_counter_comm_match(counter))
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003244 perf_counter_comm_output(counter, comm_event);
3245 }
3246 rcu_read_unlock();
3247}
3248
3249static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3250{
3251 struct perf_cpu_context *cpuctx;
Peter Zijlstra665c2142009-05-29 14:51:57 +02003252 struct perf_counter_context *ctx;
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003253 unsigned int size;
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003254 char comm[TASK_COMM_LEN];
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003255
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003256 memset(comm, 0, sizeof(comm));
3257 strncpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnar888fcee2009-04-09 09:48:22 +02003258 size = ALIGN(strlen(comm)+1, sizeof(u64));
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003259
3260 comm_event->comm = comm;
3261 comm_event->comm_size = size;
3262
3263 comm_event->event.header.size = sizeof(comm_event->event) + size;
3264
3265 cpuctx = &get_cpu_var(perf_cpu_context);
3266 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
3267 put_cpu_var(perf_cpu_context);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003268
3269 rcu_read_lock();
3270 /*
3271 * doesn't really matter which of the child contexts the
3272 * events ends up in.
3273 */
3274 ctx = rcu_dereference(current->perf_counter_ctxp);
3275 if (ctx)
3276 perf_counter_comm_ctx(ctx, comm_event);
3277 rcu_read_unlock();
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003278}
3279
3280void perf_counter_comm(struct task_struct *task)
3281{
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003282 struct perf_comm_event comm_event;
3283
Paul Mackerras57e79862009-06-30 16:07:19 +10003284 if (task->perf_counter_ctxp)
3285 perf_counter_enable_on_exec(task);
3286
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003287 if (!atomic_read(&nr_comm_counters))
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003288 return;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10003289
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003290 comm_event = (struct perf_comm_event){
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003291 .task = task,
Peter Zijlstra573402d2009-07-22 11:13:50 +02003292 /* .comm */
3293 /* .comm_size */
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003294 .event = {
Peter Zijlstra573402d2009-07-22 11:13:50 +02003295 .header = {
3296 .type = PERF_EVENT_COMM,
3297 .misc = 0,
3298 /* .size */
3299 },
3300 /* .pid */
3301 /* .tid */
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02003302 },
3303 };
3304
3305 perf_counter_comm_event(&comm_event);
3306}
3307
3308/*
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003309 * mmap tracking
3310 */
3311
3312struct perf_mmap_event {
Peter Zijlstra089dd792009-06-05 14:04:55 +02003313 struct vm_area_struct *vma;
3314
3315 const char *file_name;
3316 int file_size;
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003317
3318 struct {
3319 struct perf_event_header header;
3320
3321 u32 pid;
3322 u32 tid;
3323 u64 start;
3324 u64 len;
3325 u64 pgoff;
3326 } event;
3327};
3328
3329static void perf_counter_mmap_output(struct perf_counter *counter,
3330 struct perf_mmap_event *mmap_event)
3331{
3332 struct perf_output_handle handle;
3333 int size = mmap_event->event.header.size;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02003334 int ret = perf_output_begin(&handle, counter, size, 0, 0);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003335
3336 if (ret)
3337 return;
3338
Peter Zijlstra709e50c2009-06-02 14:13:15 +02003339 mmap_event->event.pid = perf_counter_pid(counter, current);
3340 mmap_event->event.tid = perf_counter_tid(counter, current);
3341
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003342 perf_output_put(&handle, mmap_event->event);
3343 perf_output_copy(&handle, mmap_event->file_name,
3344 mmap_event->file_size);
Peter Zijlstra78d613e2009-03-30 19:07:11 +02003345 perf_output_end(&handle);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003346}
3347
3348static int perf_counter_mmap_match(struct perf_counter *counter,
3349 struct perf_mmap_event *mmap_event)
3350{
Peter Zijlstrad99e9442009-06-04 17:08:58 +02003351 if (counter->attr.mmap)
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003352 return 1;
3353
3354 return 0;
3355}
3356
3357static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
3358 struct perf_mmap_event *mmap_event)
3359{
3360 struct perf_counter *counter;
3361
3362 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3363 return;
3364
3365 rcu_read_lock();
3366 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3367 if (perf_counter_mmap_match(counter, mmap_event))
3368 perf_counter_mmap_output(counter, mmap_event);
3369 }
3370 rcu_read_unlock();
3371}
3372
3373static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3374{
3375 struct perf_cpu_context *cpuctx;
Peter Zijlstra665c2142009-05-29 14:51:57 +02003376 struct perf_counter_context *ctx;
Peter Zijlstra089dd792009-06-05 14:04:55 +02003377 struct vm_area_struct *vma = mmap_event->vma;
3378 struct file *file = vma->vm_file;
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003379 unsigned int size;
3380 char tmp[16];
3381 char *buf = NULL;
Peter Zijlstra089dd792009-06-05 14:04:55 +02003382 const char *name;
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003383
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003384 memset(tmp, 0, sizeof(tmp));
3385
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003386 if (file) {
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003387 /*
3388 * d_path works from the end of the buffer backwards, so we
3389 * need to add enough zero bytes after the string to handle
3390 * the 64bit alignment we do later.
3391 */
3392 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003393 if (!buf) {
3394 name = strncpy(tmp, "//enomem", sizeof(tmp));
3395 goto got_name;
3396 }
Peter Zijlstrad3d21c42009-04-09 10:53:46 +02003397 name = d_path(&file->f_path, buf, PATH_MAX);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003398 if (IS_ERR(name)) {
3399 name = strncpy(tmp, "//toolong", sizeof(tmp));
3400 goto got_name;
3401 }
3402 } else {
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003403 if (arch_vma_name(mmap_event->vma)) {
3404 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3405 sizeof(tmp));
Peter Zijlstra089dd792009-06-05 14:04:55 +02003406 goto got_name;
Anton Blanchard413ee3b2009-07-16 15:15:52 +02003407 }
Peter Zijlstra089dd792009-06-05 14:04:55 +02003408
3409 if (!vma->vm_mm) {
3410 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3411 goto got_name;
3412 }
3413
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003414 name = strncpy(tmp, "//anon", sizeof(tmp));
3415 goto got_name;
3416 }
3417
3418got_name:
Ingo Molnar888fcee2009-04-09 09:48:22 +02003419 size = ALIGN(strlen(name)+1, sizeof(u64));
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003420
3421 mmap_event->file_name = name;
3422 mmap_event->file_size = size;
3423
3424 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
3425
3426 cpuctx = &get_cpu_var(perf_cpu_context);
3427 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
3428 put_cpu_var(perf_cpu_context);
3429
Peter Zijlstra665c2142009-05-29 14:51:57 +02003430 rcu_read_lock();
3431 /*
3432 * doesn't really matter which of the child contexts the
3433 * events ends up in.
3434 */
3435 ctx = rcu_dereference(current->perf_counter_ctxp);
3436 if (ctx)
3437 perf_counter_mmap_ctx(ctx, mmap_event);
3438 rcu_read_unlock();
3439
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003440 kfree(buf);
3441}
3442
Peter Zijlstra089dd792009-06-05 14:04:55 +02003443void __perf_counter_mmap(struct vm_area_struct *vma)
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003444{
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003445 struct perf_mmap_event mmap_event;
3446
Peter Zijlstra60313eb2009-06-04 16:53:44 +02003447 if (!atomic_read(&nr_mmap_counters))
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02003448 return;
3449
3450 mmap_event = (struct perf_mmap_event){
Peter Zijlstra089dd792009-06-05 14:04:55 +02003451 .vma = vma,
Peter Zijlstra573402d2009-07-22 11:13:50 +02003452 /* .file_name */
3453 /* .file_size */
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003454 .event = {
Peter Zijlstra573402d2009-07-22 11:13:50 +02003455 .header = {
3456 .type = PERF_EVENT_MMAP,
3457 .misc = 0,
3458 /* .size */
3459 },
3460 /* .pid */
3461 /* .tid */
Peter Zijlstra089dd792009-06-05 14:04:55 +02003462 .start = vma->vm_start,
3463 .len = vma->vm_end - vma->vm_start,
3464 .pgoff = vma->vm_pgoff,
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003465 },
3466 };
3467
3468 perf_counter_mmap_event(&mmap_event);
3469}
3470
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02003471/*
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003472 * IRQ throttle logging
3473 */
3474
3475static void perf_log_throttle(struct perf_counter *counter, int enable)
3476{
3477 struct perf_output_handle handle;
3478 int ret;
3479
3480 struct {
3481 struct perf_event_header header;
3482 u64 time;
Peter Zijlstracca3f452009-06-11 14:57:55 +02003483 u64 id;
Peter Zijlstra7f453c22009-07-21 13:19:40 +02003484 u64 stream_id;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003485 } throttle_event = {
3486 .header = {
Anton Blanchard966ee4d2009-07-22 23:05:46 +10003487 .type = PERF_EVENT_THROTTLE,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003488 .misc = 0,
3489 .size = sizeof(throttle_event),
3490 },
Peter Zijlstra7f453c22009-07-21 13:19:40 +02003491 .time = sched_clock(),
3492 .id = primary_counter_id(counter),
3493 .stream_id = counter->id,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003494 };
3495
Anton Blanchard966ee4d2009-07-22 23:05:46 +10003496 if (enable)
3497 throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
3498
Ingo Molnar0127c3e2009-05-25 22:03:26 +02003499 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003500 if (ret)
3501 return;
3502
3503 perf_output_put(&handle, throttle_event);
3504 perf_output_end(&handle);
3505}
3506
3507/*
Peter Zijlstra43a21ea2009-03-25 19:39:37 +01003508 * Generic counter overflow handling, sampling.
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003509 */
3510
Peter Zijlstra850bc732009-09-17 18:47:11 +02003511static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
Markus Metzger5622f292009-09-15 13:00:23 +02003512 int throttle, struct perf_sample_data *data,
3513 struct pt_regs *regs)
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003514{
Peter Zijlstra79f14642009-04-06 11:45:07 +02003515 int events = atomic_read(&counter->event_limit);
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003516 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra79f14642009-04-06 11:45:07 +02003517 int ret = 0;
3518
Peter Zijlstra850bc732009-09-17 18:47:11 +02003519 throttle = (throttle && counter->pmu->unthrottle != NULL);
3520
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003521 if (!throttle) {
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003522 hwc->interrupts++;
Ingo Molnar128f0482009-06-03 22:19:36 +02003523 } else {
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003524 if (hwc->interrupts != MAX_INTERRUPTS) {
3525 hwc->interrupts++;
Peter Zijlstradf58ab22009-06-11 11:25:05 +02003526 if (HZ * hwc->interrupts >
3527 (u64)sysctl_perf_counter_sample_rate) {
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003528 hwc->interrupts = MAX_INTERRUPTS;
Ingo Molnar128f0482009-06-03 22:19:36 +02003529 perf_log_throttle(counter, 0);
3530 ret = 1;
3531 }
3532 } else {
3533 /*
3534 * Keep re-disabling counters even though on the previous
3535 * pass we disabled it - just in case we raced with a
3536 * sched-in and the counter got enabled again:
3537 */
Peter Zijlstraa78ac322009-05-25 17:39:05 +02003538 ret = 1;
3539 }
3540 }
Peter Zijlstra60db5e02009-05-15 15:19:28 +02003541
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02003542 if (counter->attr.freq) {
3543 u64 now = sched_clock();
3544 s64 delta = now - hwc->freq_stamp;
3545
3546 hwc->freq_stamp = now;
3547
3548 if (delta > 0 && delta < TICK_NSEC)
3549 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
3550 }
3551
Peter Zijlstra2023b352009-05-05 17:50:26 +02003552 /*
3553 * XXX event_limit might not quite work as expected on inherited
3554 * counters
3555 */
3556
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02003557 counter->pending_kill = POLL_IN;
Peter Zijlstra79f14642009-04-06 11:45:07 +02003558 if (events && atomic_dec_and_test(&counter->event_limit)) {
3559 ret = 1;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02003560 counter->pending_kill = POLL_HUP;
Peter Zijlstra79f14642009-04-06 11:45:07 +02003561 if (nmi) {
3562 counter->pending_disable = 1;
3563 perf_pending_queue(&counter->pending,
3564 perf_pending_counter);
3565 } else
3566 perf_counter_disable(counter);
3567 }
3568
Markus Metzger5622f292009-09-15 13:00:23 +02003569 perf_counter_output(counter, nmi, data, regs);
Peter Zijlstra79f14642009-04-06 11:45:07 +02003570 return ret;
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003571}
3572
Peter Zijlstra850bc732009-09-17 18:47:11 +02003573int perf_counter_overflow(struct perf_counter *counter, int nmi,
Markus Metzger5622f292009-09-15 13:00:23 +02003574 struct perf_sample_data *data,
3575 struct pt_regs *regs)
Peter Zijlstra850bc732009-09-17 18:47:11 +02003576{
Markus Metzger5622f292009-09-15 13:00:23 +02003577 return __perf_counter_overflow(counter, nmi, 1, data, regs);
Peter Zijlstra850bc732009-09-17 18:47:11 +02003578}
3579
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02003580/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003581 * Generic software counter infrastructure
3582 */
3583
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003584/*
3585 * We directly increment counter->count and keep a second value in
3586 * counter->hw.period_left to count intervals. This period counter
3587 * is kept in the range [-sample_period, 0] so that we can use the
3588 * sign as trigger.
3589 */
3590
3591static u64 perf_swcounter_set_period(struct perf_counter *counter)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003592{
3593 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003594 u64 period = hwc->last_period;
3595 u64 nr, offset;
3596 s64 old, val;
3597
3598 hwc->last_period = hwc->sample_period;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003599
3600again:
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003601 old = val = atomic64_read(&hwc->period_left);
3602 if (val < 0)
3603 return 0;
3604
3605 nr = div64_u64(period + val, period);
3606 offset = nr * period;
3607 val -= offset;
3608 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003609 goto again;
3610
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003611 return nr;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003612}
3613
3614static void perf_swcounter_overflow(struct perf_counter *counter,
Markus Metzger5622f292009-09-15 13:00:23 +02003615 int nmi, struct perf_sample_data *data,
3616 struct pt_regs *regs)
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003617{
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003618 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra850bc732009-09-17 18:47:11 +02003619 int throttle = 0;
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003620 u64 overflow;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02003621
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003622 data->period = counter->hw.last_period;
3623 overflow = perf_swcounter_set_period(counter);
3624
3625 if (hwc->interrupts == MAX_INTERRUPTS)
3626 return;
3627
3628 for (; overflow; overflow--) {
Markus Metzger5622f292009-09-15 13:00:23 +02003629 if (__perf_counter_overflow(counter, nmi, throttle,
3630 data, regs)) {
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003631 /*
3632 * We inhibit the overflow from happening when
3633 * hwc->interrupts == MAX_INTERRUPTS.
3634 */
3635 break;
3636 }
Peter Zijlstracf450a72009-09-18 12:18:14 +02003637 throttle = 1;
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003638 }
3639}
3640
3641static void perf_swcounter_unthrottle(struct perf_counter *counter)
3642{
3643 /*
3644 * Nothing to do, we already reset hwc->interrupts.
3645 */
3646}
3647
3648static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
Markus Metzger5622f292009-09-15 13:00:23 +02003649 int nmi, struct perf_sample_data *data,
3650 struct pt_regs *regs)
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003651{
3652 struct hw_perf_counter *hwc = &counter->hw;
3653
3654 atomic64_add(nr, &counter->count);
3655
3656 if (!hwc->sample_period)
3657 return;
3658
Markus Metzger5622f292009-09-15 13:00:23 +02003659 if (!regs)
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003660 return;
3661
3662 if (!atomic64_add_negative(nr, &hwc->period_left))
Markus Metzger5622f292009-09-15 13:00:23 +02003663 perf_swcounter_overflow(counter, nmi, data, regs);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003664}
3665
Paul Mackerras880ca152009-06-01 17:49:14 +10003666static int perf_swcounter_is_counting(struct perf_counter *counter)
3667{
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003668 /*
3669 * The counter is active, we're good!
3670 */
Paul Mackerras880ca152009-06-01 17:49:14 +10003671 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3672 return 1;
3673
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003674 /*
3675 * The counter is off/error, not counting.
3676 */
Paul Mackerras880ca152009-06-01 17:49:14 +10003677 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3678 return 0;
3679
3680 /*
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003681 * The counter is inactive, if the context is active
3682 * we're part of a group that didn't make it on the 'pmu',
3683 * not counting.
Paul Mackerras880ca152009-06-01 17:49:14 +10003684 */
Peter Zijlstrabcfc2602009-08-13 09:51:55 +02003685 if (counter->ctx->is_active)
3686 return 0;
3687
3688 /*
3689 * We're inactive and the context is too, this means the
3690 * task is scheduled out, we're counting events that happen
3691 * to us, like migration events.
3692 */
3693 return 1;
Paul Mackerras880ca152009-06-01 17:49:14 +10003694}
3695
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003696static int perf_swcounter_match(struct perf_counter *counter,
Peter Zijlstra1c432d82009-06-11 13:19:29 +02003697 enum perf_type_id type,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003698 u32 event, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003699{
Paul Mackerras880ca152009-06-01 17:49:14 +10003700 if (!perf_swcounter_is_counting(counter))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003701 return 0;
3702
Ingo Molnara21ca2c2009-06-06 09:58:57 +02003703 if (counter->attr.type != type)
3704 return 0;
3705 if (counter->attr.config != event)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003706 return 0;
3707
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003708 if (regs) {
Peter Zijlstra0d486962009-06-02 19:22:16 +02003709 if (counter->attr.exclude_user && user_mode(regs))
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003710 return 0;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003711
Peter Zijlstra0d486962009-06-02 19:22:16 +02003712 if (counter->attr.exclude_kernel && !user_mode(regs))
Paul Mackerras3f731ca2009-06-01 17:52:30 +10003713 return 0;
3714 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003715
3716 return 1;
3717}
3718
3719static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003720 enum perf_type_id type,
3721 u32 event, u64 nr, int nmi,
Markus Metzger5622f292009-09-15 13:00:23 +02003722 struct perf_sample_data *data,
3723 struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003724{
3725 struct perf_counter *counter;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003726
Peter Zijlstra01ef09d2009-03-19 20:26:11 +01003727 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003728 return;
3729
Peter Zijlstra592903c2009-03-13 12:21:36 +01003730 rcu_read_lock();
3731 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Markus Metzger5622f292009-09-15 13:00:23 +02003732 if (perf_swcounter_match(counter, type, event, regs))
3733 perf_swcounter_add(counter, nr, nmi, data, regs);
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003734 }
Peter Zijlstra592903c2009-03-13 12:21:36 +01003735 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003736}
3737
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003738static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3739{
3740 if (in_nmi())
3741 return &cpuctx->recursion[3];
3742
3743 if (in_irq())
3744 return &cpuctx->recursion[2];
3745
3746 if (in_softirq())
3747 return &cpuctx->recursion[1];
3748
3749 return &cpuctx->recursion[0];
3750}
3751
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003752static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3753 u64 nr, int nmi,
Markus Metzger5622f292009-09-15 13:00:23 +02003754 struct perf_sample_data *data,
3755 struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003756{
3757 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003758 int *recursion = perf_swcounter_recursion_context(cpuctx);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003759 struct perf_counter_context *ctx;
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003760
3761 if (*recursion)
3762 goto out;
3763
3764 (*recursion)++;
3765 barrier();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003766
Peter Zijlstra78f13e92009-04-08 15:01:33 +02003767 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
Markus Metzger5622f292009-09-15 13:00:23 +02003768 nr, nmi, data, regs);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003769 rcu_read_lock();
3770 /*
3771 * doesn't really matter which of the child contexts the
3772 * events ends up in.
3773 */
3774 ctx = rcu_dereference(current->perf_counter_ctxp);
3775 if (ctx)
Markus Metzger5622f292009-09-15 13:00:23 +02003776 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs);
Peter Zijlstra665c2142009-05-29 14:51:57 +02003777 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003778
Peter Zijlstra96f6d442009-03-23 18:22:07 +01003779 barrier();
3780 (*recursion)--;
3781
3782out:
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003783 put_cpu_var(perf_cpu_context);
3784}
3785
Peter Zijlstraf29ac752009-06-19 18:27:26 +02003786void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3787 struct pt_regs *regs, u64 addr)
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003788{
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003789 struct perf_sample_data data = {
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003790 .addr = addr,
3791 };
3792
Markus Metzger5622f292009-09-15 13:00:23 +02003793 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi,
3794 &data, regs);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003795}
3796
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003797static void perf_swcounter_read(struct perf_counter *counter)
3798{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003799}
3800
3801static int perf_swcounter_enable(struct perf_counter *counter)
3802{
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003803 struct hw_perf_counter *hwc = &counter->hw;
3804
3805 if (hwc->sample_period) {
3806 hwc->last_period = hwc->sample_period;
3807 perf_swcounter_set_period(counter);
3808 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003809 return 0;
3810}
3811
3812static void perf_swcounter_disable(struct perf_counter *counter)
3813{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003814}
3815
Robert Richter4aeb0b42009-04-29 12:47:03 +02003816static const struct pmu perf_ops_generic = {
Peter Zijlstraac17dc82009-03-13 12:21:34 +01003817 .enable = perf_swcounter_enable,
3818 .disable = perf_swcounter_disable,
3819 .read = perf_swcounter_read,
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003820 .unthrottle = perf_swcounter_unthrottle,
Peter Zijlstraac17dc82009-03-13 12:21:34 +01003821};
3822
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003823/*
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003824 * hrtimer based swcounter callback
3825 */
3826
3827static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3828{
3829 enum hrtimer_restart ret = HRTIMER_RESTART;
3830 struct perf_sample_data data;
Markus Metzger5622f292009-09-15 13:00:23 +02003831 struct pt_regs *regs;
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003832 struct perf_counter *counter;
3833 u64 period;
3834
3835 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3836 counter->pmu->read(counter);
3837
3838 data.addr = 0;
Markus Metzger5622f292009-09-15 13:00:23 +02003839 regs = get_irq_regs();
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003840 /*
3841 * In case we exclude kernel IPs or are somehow not in interrupt
3842 * context, provide the next best thing, the user IP.
3843 */
Markus Metzger5622f292009-09-15 13:00:23 +02003844 if ((counter->attr.exclude_kernel || !regs) &&
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003845 !counter->attr.exclude_user)
Markus Metzger5622f292009-09-15 13:00:23 +02003846 regs = task_pt_regs(current);
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003847
Markus Metzger5622f292009-09-15 13:00:23 +02003848 if (regs) {
3849 if (perf_counter_overflow(counter, 0, &data, regs))
Peter Zijlstra7b4b6652009-07-22 09:29:32 +02003850 ret = HRTIMER_NORESTART;
3851 }
3852
3853 period = max_t(u64, 10000, counter->hw.sample_period);
3854 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3855
3856 return ret;
3857}
3858
3859/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003860 * Software counter: cpu wall time clock
3861 */
3862
Paul Mackerras9abf8a02009-01-09 16:26:43 +11003863static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3864{
3865 int cpu = raw_smp_processor_id();
3866 s64 prev;
3867 u64 now;
3868
3869 now = cpu_clock(cpu);
3870 prev = atomic64_read(&counter->hw.prev_count);
3871 atomic64_set(&counter->hw.prev_count, now);
3872 atomic64_add(now - prev, &counter->count);
3873}
3874
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003875static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3876{
3877 struct hw_perf_counter *hwc = &counter->hw;
3878 int cpu = raw_smp_processor_id();
3879
3880 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
Peter Zijlstra039fc912009-03-13 16:43:47 +01003881 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3882 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003883 if (hwc->sample_period) {
3884 u64 period = max_t(u64, 10000, hwc->sample_period);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003885 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstra60db5e02009-05-15 15:19:28 +02003886 ns_to_ktime(period), 0,
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003887 HRTIMER_MODE_REL, 0);
3888 }
3889
3890 return 0;
3891}
3892
Ingo Molnar5c92d122008-12-11 13:21:10 +01003893static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3894{
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003895 if (counter->hw.sample_period)
Peter Zijlstrab986d7e2009-05-20 12:21:21 +02003896 hrtimer_cancel(&counter->hw.hrtimer);
Paul Mackerras9abf8a02009-01-09 16:26:43 +11003897 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01003898}
3899
3900static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3901{
Paul Mackerras9abf8a02009-01-09 16:26:43 +11003902 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01003903}
3904
Robert Richter4aeb0b42009-04-29 12:47:03 +02003905static const struct pmu perf_ops_cpu_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01003906 .enable = cpu_clock_perf_counter_enable,
3907 .disable = cpu_clock_perf_counter_disable,
3908 .read = cpu_clock_perf_counter_read,
Ingo Molnar5c92d122008-12-11 13:21:10 +01003909};
3910
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01003911/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01003912 * Software counter: task time clock
3913 */
3914
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003915static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
Ingo Molnarbae43c92008-12-11 14:03:20 +01003916{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003917 u64 prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003918 s64 delta;
Ingo Molnarbae43c92008-12-11 14:03:20 +01003919
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02003920 prev = atomic64_xchg(&counter->hw.prev_count, now);
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003921 delta = now - prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003922 atomic64_add(delta, &counter->count);
Ingo Molnarbae43c92008-12-11 14:03:20 +01003923}
3924
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01003925static int task_clock_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003926{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003927 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02003928 u64 now;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003929
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02003930 now = counter->ctx->time;
3931
3932 atomic64_set(&hwc->prev_count, now);
Peter Zijlstra039fc912009-03-13 16:43:47 +01003933 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3934 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003935 if (hwc->sample_period) {
3936 u64 period = max_t(u64, 10000, hwc->sample_period);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003937 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstra60db5e02009-05-15 15:19:28 +02003938 ns_to_ktime(period), 0,
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003939 HRTIMER_MODE_REL, 0);
3940 }
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01003941
3942 return 0;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01003943}
3944
3945static void task_clock_perf_counter_disable(struct perf_counter *counter)
3946{
Peter Zijlstrab23f3322009-06-02 15:13:03 +02003947 if (counter->hw.sample_period)
Peter Zijlstrab986d7e2009-05-20 12:21:21 +02003948 hrtimer_cancel(&counter->hw.hrtimer);
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003949 task_clock_perf_counter_update(counter, counter->ctx->time);
3950
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003951}
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01003952
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01003953static void task_clock_perf_counter_read(struct perf_counter *counter)
3954{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02003955 u64 time;
3956
3957 if (!in_nmi()) {
3958 update_context_time(counter->ctx);
3959 time = counter->ctx->time;
3960 } else {
3961 u64 now = perf_clock();
3962 u64 delta = now - counter->ctx->timestamp;
3963 time = counter->ctx->time + delta;
3964 }
3965
3966 task_clock_perf_counter_update(counter, time);
Ingo Molnarbae43c92008-12-11 14:03:20 +01003967}
3968
Robert Richter4aeb0b42009-04-29 12:47:03 +02003969static const struct pmu perf_ops_task_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01003970 .enable = task_clock_perf_counter_enable,
3971 .disable = task_clock_perf_counter_disable,
3972 .read = task_clock_perf_counter_read,
Ingo Molnarbae43c92008-12-11 14:03:20 +01003973};
3974
Peter Zijlstrae077df42009-03-19 20:26:17 +01003975#ifdef CONFIG_EVENT_PROFILE
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003976void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3977 int entry_size)
Peter Zijlstrae077df42009-03-19 20:26:17 +01003978{
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003979 struct perf_raw_record raw = {
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003980 .size = entry_size,
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003981 .data = record,
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02003982 };
3983
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003984 struct perf_sample_data data = {
Peter Zijlstra3a659302009-07-21 17:34:57 +02003985 .addr = addr,
Frederic Weisbecker3a43ce62009-08-08 04:26:37 +02003986 .raw = &raw,
Peter Zijlstra92bf3092009-06-19 18:11:53 +02003987 };
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003988
Markus Metzger5622f292009-09-15 13:00:23 +02003989 struct pt_regs *regs = get_irq_regs();
Peter Zijlstrab8e83512009-03-19 20:26:18 +01003990
Markus Metzger5622f292009-09-15 13:00:23 +02003991 if (!regs)
3992 regs = task_pt_regs(current);
3993
3994 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
3995 &data, regs);
Peter Zijlstrae077df42009-03-19 20:26:17 +01003996}
Steven Whitehouseff7b1b42009-04-15 16:55:05 +01003997EXPORT_SYMBOL_GPL(perf_tpcounter_event);
Peter Zijlstrae077df42009-03-19 20:26:17 +01003998
3999extern int ftrace_profile_enable(int);
4000extern void ftrace_profile_disable(int);
4001
4002static void tp_perf_counter_destroy(struct perf_counter *counter)
4003{
Chris Wilsond4d7d0b2009-07-06 09:31:33 +01004004 ftrace_profile_disable(counter->attr.config);
Peter Zijlstrae077df42009-03-19 20:26:17 +01004005}
4006
Robert Richter4aeb0b42009-04-29 12:47:03 +02004007static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
Peter Zijlstrae077df42009-03-19 20:26:17 +01004008{
Peter Zijlstraa4e95fc2009-08-10 11:20:12 +02004009 /*
4010 * Raw tracepoint data is a severe data leak, only allow root to
4011 * have these.
4012 */
4013 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
Ingo Molnar0fbdea12009-09-02 21:46:00 +02004014 perf_paranoid_tracepoint_raw() &&
Peter Zijlstraa4e95fc2009-08-10 11:20:12 +02004015 !capable(CAP_SYS_ADMIN))
4016 return ERR_PTR(-EPERM);
4017
Chris Wilsond4d7d0b2009-07-06 09:31:33 +01004018 if (ftrace_profile_enable(counter->attr.config))
Peter Zijlstrae077df42009-03-19 20:26:17 +01004019 return NULL;
4020
4021 counter->destroy = tp_perf_counter_destroy;
4022
4023 return &perf_ops_generic;
4024}
4025#else
Robert Richter4aeb0b42009-04-29 12:47:03 +02004026static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
Peter Zijlstrae077df42009-03-19 20:26:17 +01004027{
4028 return NULL;
4029}
4030#endif
4031
Peter Zijlstraf29ac752009-06-19 18:27:26 +02004032atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
4033
4034static void sw_perf_counter_destroy(struct perf_counter *counter)
4035{
4036 u64 event = counter->attr.config;
4037
Peter Zijlstraf3440112009-06-22 13:58:35 +02004038 WARN_ON(counter->parent);
4039
Peter Zijlstraf29ac752009-06-19 18:27:26 +02004040 atomic_dec(&perf_swcounter_enabled[event]);
4041}
4042
Robert Richter4aeb0b42009-04-29 12:47:03 +02004043static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar5c92d122008-12-11 13:21:10 +01004044{
Robert Richter4aeb0b42009-04-29 12:47:03 +02004045 const struct pmu *pmu = NULL;
Peter Zijlstraf29ac752009-06-19 18:27:26 +02004046 u64 event = counter->attr.config;
Ingo Molnar5c92d122008-12-11 13:21:10 +01004047
Paul Mackerras0475f9e2009-02-11 14:35:35 +11004048 /*
4049 * Software counters (currently) can't in general distinguish
4050 * between user, kernel and hypervisor events.
4051 * However, context switches and cpu migrations are considered
4052 * to be kernel events, and page faults are never hypervisor
4053 * events.
4054 */
Peter Zijlstraf29ac752009-06-19 18:27:26 +02004055 switch (event) {
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02004056 case PERF_COUNT_SW_CPU_CLOCK:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004057 pmu = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01004058
Ingo Molnar5c92d122008-12-11 13:21:10 +01004059 break;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02004060 case PERF_COUNT_SW_TASK_CLOCK:
Paul Mackerras23a185c2009-02-09 22:42:47 +11004061 /*
4062 * If the user instantiates this as a per-cpu counter,
4063 * use the cpu_clock counter instead.
4064 */
4065 if (counter->ctx->task)
Robert Richter4aeb0b42009-04-29 12:47:03 +02004066 pmu = &perf_ops_task_clock;
Paul Mackerras23a185c2009-02-09 22:42:47 +11004067 else
Robert Richter4aeb0b42009-04-29 12:47:03 +02004068 pmu = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01004069
Ingo Molnarbae43c92008-12-11 14:03:20 +01004070 break;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02004071 case PERF_COUNT_SW_PAGE_FAULTS:
4072 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
4073 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4074 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4075 case PERF_COUNT_SW_CPU_MIGRATIONS:
Peter Zijlstraf3440112009-06-22 13:58:35 +02004076 if (!counter->parent) {
4077 atomic_inc(&perf_swcounter_enabled[event]);
4078 counter->destroy = sw_perf_counter_destroy;
4079 }
Paul Mackerras3f731ca2009-06-01 17:52:30 +10004080 pmu = &perf_ops_generic;
Ingo Molnar6c594c22008-12-14 12:34:15 +01004081 break;
Ingo Molnar5c92d122008-12-11 13:21:10 +01004082 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01004083
Robert Richter4aeb0b42009-04-29 12:47:03 +02004084 return pmu;
Ingo Molnar5c92d122008-12-11 13:21:10 +01004085}
4086
Thomas Gleixner0793a612008-12-04 20:12:29 +01004087/*
4088 * Allocate and initialize a counter structure
4089 */
4090static struct perf_counter *
Peter Zijlstra0d486962009-06-02 19:22:16 +02004091perf_counter_alloc(struct perf_counter_attr *attr,
Ingo Molnar04289bb2008-12-11 08:38:42 +01004092 int cpu,
Paul Mackerras23a185c2009-02-09 22:42:47 +11004093 struct perf_counter_context *ctx,
Ingo Molnar9b51f662008-12-12 13:49:45 +01004094 struct perf_counter *group_leader,
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004095 struct perf_counter *parent_counter,
Ingo Molnar9b51f662008-12-12 13:49:45 +01004096 gfp_t gfpflags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004097{
Robert Richter4aeb0b42009-04-29 12:47:03 +02004098 const struct pmu *pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01004099 struct perf_counter *counter;
Peter Zijlstra60db5e02009-05-15 15:19:28 +02004100 struct hw_perf_counter *hwc;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004101 long err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004102
Ingo Molnar9b51f662008-12-12 13:49:45 +01004103 counter = kzalloc(sizeof(*counter), gfpflags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004104 if (!counter)
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004105 return ERR_PTR(-ENOMEM);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004106
Ingo Molnar04289bb2008-12-11 08:38:42 +01004107 /*
4108 * Single counters are their own group leaders, with an
4109 * empty sibling list:
4110 */
4111 if (!group_leader)
4112 group_leader = counter;
4113
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004114 mutex_init(&counter->child_mutex);
4115 INIT_LIST_HEAD(&counter->child_list);
4116
Ingo Molnar04289bb2008-12-11 08:38:42 +01004117 INIT_LIST_HEAD(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +01004118 INIT_LIST_HEAD(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004119 INIT_LIST_HEAD(&counter->sibling_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004120 init_waitqueue_head(&counter->waitq);
4121
Peter Zijlstra7b732a72009-03-23 18:22:10 +01004122 mutex_init(&counter->mmap_mutex);
4123
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004124 counter->cpu = cpu;
Peter Zijlstra0d486962009-06-02 19:22:16 +02004125 counter->attr = *attr;
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004126 counter->group_leader = group_leader;
4127 counter->pmu = NULL;
4128 counter->ctx = ctx;
4129 counter->oncpu = -1;
Ingo Molnar329d8762009-05-26 08:10:00 +02004130
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004131 counter->parent = parent_counter;
4132
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004133 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
4134 counter->id = atomic64_inc_return(&perf_counter_id);
4135
4136 counter->state = PERF_COUNTER_STATE_INACTIVE;
4137
Peter Zijlstra0d486962009-06-02 19:22:16 +02004138 if (attr->disabled)
Ingo Molnara86ed502008-12-17 00:43:10 +01004139 counter->state = PERF_COUNTER_STATE_OFF;
4140
Robert Richter4aeb0b42009-04-29 12:47:03 +02004141 pmu = NULL;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004142
Peter Zijlstra60db5e02009-05-15 15:19:28 +02004143 hwc = &counter->hw;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004144 hwc->sample_period = attr->sample_period;
Peter Zijlstra0d486962009-06-02 19:22:16 +02004145 if (attr->freq && attr->sample_freq)
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004146 hwc->sample_period = 1;
Peter Zijlstraeced1df2009-08-28 17:10:47 +02004147 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004148
4149 atomic64_set(&hwc->period_left, hwc->sample_period);
Peter Zijlstra60db5e02009-05-15 15:19:28 +02004150
Peter Zijlstra2023b352009-05-05 17:50:26 +02004151 /*
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02004152 * we currently do not support PERF_FORMAT_GROUP on inherited counters
Peter Zijlstra2023b352009-05-05 17:50:26 +02004153 */
Peter Zijlstra3dab77f2009-08-13 11:47:53 +02004154 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
Peter Zijlstra2023b352009-05-05 17:50:26 +02004155 goto done;
4156
Ingo Molnara21ca2c2009-06-06 09:58:57 +02004157 switch (attr->type) {
Peter Zijlstra081fad82009-06-11 17:57:21 +02004158 case PERF_TYPE_RAW:
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004159 case PERF_TYPE_HARDWARE:
Ingo Molnar8326f442009-06-05 20:22:46 +02004160 case PERF_TYPE_HW_CACHE:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004161 pmu = hw_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004162 break;
4163
4164 case PERF_TYPE_SOFTWARE:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004165 pmu = sw_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004166 break;
4167
4168 case PERF_TYPE_TRACEPOINT:
Robert Richter4aeb0b42009-04-29 12:47:03 +02004169 pmu = tp_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004170 break;
Peter Zijlstra974802e2009-06-12 12:46:55 +02004171
4172 default:
4173 break;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01004174 }
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01004175done:
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004176 err = 0;
Robert Richter4aeb0b42009-04-29 12:47:03 +02004177 if (!pmu)
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004178 err = -EINVAL;
Robert Richter4aeb0b42009-04-29 12:47:03 +02004179 else if (IS_ERR(pmu))
4180 err = PTR_ERR(pmu);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004181
4182 if (err) {
Peter Zijlstraa96bbc12009-06-03 14:01:36 +02004183 if (counter->ns)
4184 put_pid_ns(counter->ns);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004185 kfree(counter);
4186 return ERR_PTR(err);
4187 }
4188
Robert Richter4aeb0b42009-04-29 12:47:03 +02004189 counter->pmu = pmu;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004190
Peter Zijlstraf3440112009-06-22 13:58:35 +02004191 if (!counter->parent) {
4192 atomic_inc(&nr_counters);
4193 if (counter->attr.mmap)
4194 atomic_inc(&nr_mmap_counters);
4195 if (counter->attr.comm)
4196 atomic_inc(&nr_comm_counters);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004197 if (counter->attr.task)
4198 atomic_inc(&nr_task_counters);
Peter Zijlstraf3440112009-06-22 13:58:35 +02004199 }
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02004200
Thomas Gleixner0793a612008-12-04 20:12:29 +01004201 return counter;
4202}
4203
Peter Zijlstra974802e2009-06-12 12:46:55 +02004204static int perf_copy_attr(struct perf_counter_attr __user *uattr,
4205 struct perf_counter_attr *attr)
4206{
4207 int ret;
4208 u32 size;
4209
4210 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4211 return -EFAULT;
4212
4213 /*
4214 * zero the full structure, so that a short copy will be nice.
4215 */
4216 memset(attr, 0, sizeof(*attr));
4217
4218 ret = get_user(size, &uattr->size);
4219 if (ret)
4220 return ret;
4221
4222 if (size > PAGE_SIZE) /* silly large */
4223 goto err_size;
4224
4225 if (!size) /* abi compat */
4226 size = PERF_ATTR_SIZE_VER0;
4227
4228 if (size < PERF_ATTR_SIZE_VER0)
4229 goto err_size;
4230
4231 /*
4232 * If we're handed a bigger struct than we know of,
4233 * ensure all the unknown bits are 0.
4234 */
4235 if (size > sizeof(*attr)) {
4236 unsigned long val;
4237 unsigned long __user *addr;
4238 unsigned long __user *end;
4239
4240 addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
4241 sizeof(unsigned long));
4242 end = PTR_ALIGN((void __user *)uattr + size,
4243 sizeof(unsigned long));
4244
4245 for (; addr < end; addr += sizeof(unsigned long)) {
4246 ret = get_user(val, addr);
4247 if (ret)
4248 return ret;
4249 if (val)
4250 goto err_size;
4251 }
4252 }
4253
4254 ret = copy_from_user(attr, uattr, size);
4255 if (ret)
4256 return -EFAULT;
4257
4258 /*
4259 * If the type exists, the corresponding creation will verify
4260 * the attr->config.
4261 */
4262 if (attr->type >= PERF_TYPE_MAX)
4263 return -EINVAL;
4264
4265 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
4266 return -EINVAL;
4267
4268 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4269 return -EINVAL;
4270
4271 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4272 return -EINVAL;
4273
4274out:
4275 return ret;
4276
4277err_size:
4278 put_user(sizeof(*attr), &uattr->size);
4279 ret = -E2BIG;
4280 goto out;
4281}
4282
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004283int perf_counter_set_output(struct perf_counter *counter, int output_fd)
4284{
4285 struct perf_counter *output_counter = NULL;
4286 struct file *output_file = NULL;
4287 struct perf_counter *old_output;
4288 int fput_needed = 0;
4289 int ret = -EINVAL;
4290
4291 if (!output_fd)
4292 goto set;
4293
4294 output_file = fget_light(output_fd, &fput_needed);
4295 if (!output_file)
4296 return -EBADF;
4297
4298 if (output_file->f_op != &perf_fops)
4299 goto out;
4300
4301 output_counter = output_file->private_data;
4302
4303 /* Don't chain output fds */
4304 if (output_counter->output)
4305 goto out;
4306
4307 /* Don't set an output fd when we already have an output channel */
4308 if (counter->data)
4309 goto out;
4310
4311 atomic_long_inc(&output_file->f_count);
4312
4313set:
4314 mutex_lock(&counter->mmap_mutex);
4315 old_output = counter->output;
4316 rcu_assign_pointer(counter->output, output_counter);
4317 mutex_unlock(&counter->mmap_mutex);
4318
4319 if (old_output) {
4320 /*
4321 * we need to make sure no existing perf_output_*()
4322 * is still referencing this counter.
4323 */
4324 synchronize_rcu();
4325 fput(old_output->filp);
4326 }
4327
4328 ret = 0;
4329out:
4330 fput_light(output_file, fput_needed);
4331 return ret;
4332}
4333
Thomas Gleixner0793a612008-12-04 20:12:29 +01004334/**
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004335 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
Ingo Molnar9f66a382008-12-10 12:33:23 +01004336 *
Peter Zijlstra0d486962009-06-02 19:22:16 +02004337 * @attr_uptr: event type attributes for monitoring/sampling
Thomas Gleixner0793a612008-12-04 20:12:29 +01004338 * @pid: target pid
Ingo Molnar9f66a382008-12-10 12:33:23 +01004339 * @cpu: target cpu
4340 * @group_fd: group leader counter fd
Thomas Gleixner0793a612008-12-04 20:12:29 +01004341 */
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004342SYSCALL_DEFINE5(perf_counter_open,
Peter Zijlstra974802e2009-06-12 12:46:55 +02004343 struct perf_counter_attr __user *, attr_uptr,
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004344 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004345{
Ingo Molnar04289bb2008-12-11 08:38:42 +01004346 struct perf_counter *counter, *group_leader;
Peter Zijlstra0d486962009-06-02 19:22:16 +02004347 struct perf_counter_attr attr;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004348 struct perf_counter_context *ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004349 struct file *counter_file = NULL;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004350 struct file *group_file = NULL;
4351 int fput_needed = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004352 int fput_needed2 = 0;
Ingo Molnardc86cab2009-09-03 18:03:00 +02004353 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004354
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004355 /* for future expandability... */
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004356 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
Paul Mackerras2743a5b2009-03-04 20:36:51 +11004357 return -EINVAL;
4358
Ingo Molnardc86cab2009-09-03 18:03:00 +02004359 err = perf_copy_attr(attr_uptr, &attr);
4360 if (err)
4361 return err;
Thomas Gleixnereab656a2008-12-08 19:26:59 +01004362
Peter Zijlstra07647712009-06-11 11:18:36 +02004363 if (!attr.exclude_kernel) {
4364 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4365 return -EACCES;
4366 }
4367
Peter Zijlstradf58ab22009-06-11 11:25:05 +02004368 if (attr.freq) {
4369 if (attr.sample_freq > sysctl_perf_counter_sample_rate)
4370 return -EINVAL;
4371 }
4372
Ingo Molnar04289bb2008-12-11 08:38:42 +01004373 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01004374 * Get the target context (task or percpu):
4375 */
4376 ctx = find_get_context(pid, cpu);
4377 if (IS_ERR(ctx))
4378 return PTR_ERR(ctx);
4379
4380 /*
4381 * Look up the group leader (we will attach this counter to it):
Ingo Molnar04289bb2008-12-11 08:38:42 +01004382 */
4383 group_leader = NULL;
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004384 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
Ingo Molnardc86cab2009-09-03 18:03:00 +02004385 err = -EINVAL;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004386 group_file = fget_light(group_fd, &fput_needed);
4387 if (!group_file)
Ingo Molnarccff2862008-12-11 11:26:29 +01004388 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004389 if (group_file->f_op != &perf_fops)
Ingo Molnarccff2862008-12-11 11:26:29 +01004390 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004391
4392 group_leader = group_file->private_data;
4393 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01004394 * Do not allow a recursive hierarchy (this new sibling
4395 * becoming part of another group-sibling):
Ingo Molnar04289bb2008-12-11 08:38:42 +01004396 */
Ingo Molnarccff2862008-12-11 11:26:29 +01004397 if (group_leader->group_leader != group_leader)
4398 goto err_put_context;
4399 /*
4400 * Do not allow to attach to a group in a different
4401 * task or CPU context:
4402 */
4403 if (group_leader->ctx != ctx)
4404 goto err_put_context;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11004405 /*
4406 * Only a group leader can be exclusive or pinned
4407 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02004408 if (attr.exclusive || attr.pinned)
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11004409 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01004410 }
4411
Peter Zijlstra0d486962009-06-02 19:22:16 +02004412 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004413 NULL, GFP_KERNEL);
Ingo Molnardc86cab2009-09-03 18:03:00 +02004414 err = PTR_ERR(counter);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004415 if (IS_ERR(counter))
Thomas Gleixner0793a612008-12-04 20:12:29 +01004416 goto err_put_context;
4417
Ingo Molnardc86cab2009-09-03 18:03:00 +02004418 err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
4419 if (err < 0)
Ingo Molnar9b51f662008-12-12 13:49:45 +01004420 goto err_free_put_context;
4421
Ingo Molnardc86cab2009-09-03 18:03:00 +02004422 counter_file = fget_light(err, &fput_needed2);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004423 if (!counter_file)
4424 goto err_free_put_context;
4425
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004426 if (flags & PERF_FLAG_FD_OUTPUT) {
Ingo Molnardc86cab2009-09-03 18:03:00 +02004427 err = perf_counter_set_output(counter, group_fd);
4428 if (err)
4429 goto err_fput_free_put_context;
Peter Zijlstraa4be7c22009-08-19 11:18:27 +02004430 }
4431
Ingo Molnar9b51f662008-12-12 13:49:45 +01004432 counter->filp = counter_file;
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004433 WARN_ON_ONCE(ctx->parent_ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004434 mutex_lock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004435 perf_install_in_context(ctx, counter, cpu);
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004436 ++ctx->generation;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004437 mutex_unlock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004438
Peter Zijlstra082ff5a2009-05-23 18:29:00 +02004439 counter->owner = current;
4440 get_task_struct(current);
4441 mutex_lock(&current->perf_counter_mutex);
4442 list_add_tail(&counter->owner_entry, &current->perf_counter_list);
4443 mutex_unlock(&current->perf_counter_mutex);
4444
Ingo Molnardc86cab2009-09-03 18:03:00 +02004445err_fput_free_put_context:
Ingo Molnar9b51f662008-12-12 13:49:45 +01004446 fput_light(counter_file, fput_needed2);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004447
Ingo Molnar9b51f662008-12-12 13:49:45 +01004448err_free_put_context:
Ingo Molnardc86cab2009-09-03 18:03:00 +02004449 if (err < 0)
4450 kfree(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004451
4452err_put_context:
Ingo Molnardc86cab2009-09-03 18:03:00 +02004453 if (err < 0)
4454 put_ctx(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004455
Ingo Molnardc86cab2009-09-03 18:03:00 +02004456 fput_light(group_file, fput_needed);
4457
4458 return err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004459}
4460
Ingo Molnar9b51f662008-12-12 13:49:45 +01004461/*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004462 * inherit a counter from parent task to child task:
4463 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11004464static struct perf_counter *
Ingo Molnar9b51f662008-12-12 13:49:45 +01004465inherit_counter(struct perf_counter *parent_counter,
4466 struct task_struct *parent,
4467 struct perf_counter_context *parent_ctx,
4468 struct task_struct *child,
Paul Mackerrasd859e292009-01-17 18:10:22 +11004469 struct perf_counter *group_leader,
Ingo Molnar9b51f662008-12-12 13:49:45 +01004470 struct perf_counter_context *child_ctx)
4471{
4472 struct perf_counter *child_counter;
4473
Paul Mackerrasd859e292009-01-17 18:10:22 +11004474 /*
4475 * Instead of creating recursive hierarchies of counters,
4476 * we link inherited counters back to the original parent,
4477 * which has a filp for sure, which we use as the reference
4478 * count:
4479 */
4480 if (parent_counter->parent)
4481 parent_counter = parent_counter->parent;
4482
Peter Zijlstra0d486962009-06-02 19:22:16 +02004483 child_counter = perf_counter_alloc(&parent_counter->attr,
Paul Mackerras23a185c2009-02-09 22:42:47 +11004484 parent_counter->cpu, child_ctx,
Peter Zijlstrab84fbc92009-06-22 13:57:40 +02004485 group_leader, parent_counter,
4486 GFP_KERNEL);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004487 if (IS_ERR(child_counter))
4488 return child_counter;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004489 get_ctx(child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004490
4491 /*
Paul Mackerras564c2b22009-05-22 14:27:22 +10004492 * Make the child state follow the state of the parent counter,
Peter Zijlstra0d486962009-06-02 19:22:16 +02004493 * not its attr.disabled bit. We hold the parent's mutex,
Ingo Molnar22a4f652009-06-01 10:13:37 +02004494 * so we won't race with perf_counter_{en, dis}able_family.
Paul Mackerras564c2b22009-05-22 14:27:22 +10004495 */
4496 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
4497 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
4498 else
4499 child_counter->state = PERF_COUNTER_STATE_OFF;
4500
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02004501 if (parent_counter->attr.freq)
4502 child_counter->hw.sample_period = parent_counter->hw.sample_period;
4503
Paul Mackerras564c2b22009-05-22 14:27:22 +10004504 /*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004505 * Link it up in the child's context:
4506 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +11004507 add_counter_to_ctx(child_counter, child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004508
Ingo Molnar9b51f662008-12-12 13:49:45 +01004509 /*
4510 * Get a reference to the parent filp - we will fput it
4511 * when the child counter exits. This is safe to do because
4512 * we are in the parent and we know that the filp still
4513 * exists and has a nonzero count:
4514 */
4515 atomic_long_inc(&parent_counter->filp->f_count);
4516
Paul Mackerrasd859e292009-01-17 18:10:22 +11004517 /*
4518 * Link this into the parent counter's child list
4519 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004520 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004521 mutex_lock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004522 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004523 mutex_unlock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004524
4525 return child_counter;
4526}
4527
4528static int inherit_group(struct perf_counter *parent_counter,
4529 struct task_struct *parent,
4530 struct perf_counter_context *parent_ctx,
4531 struct task_struct *child,
4532 struct perf_counter_context *child_ctx)
4533{
4534 struct perf_counter *leader;
4535 struct perf_counter *sub;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004536 struct perf_counter *child_ctr;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004537
4538 leader = inherit_counter(parent_counter, parent, parent_ctx,
4539 child, NULL, child_ctx);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004540 if (IS_ERR(leader))
4541 return PTR_ERR(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004542 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02004543 child_ctr = inherit_counter(sub, parent, parent_ctx,
4544 child, leader, child_ctx);
4545 if (IS_ERR(child_ctr))
4546 return PTR_ERR(child_ctr);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004547 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004548 return 0;
4549}
4550
Paul Mackerrasd859e292009-01-17 18:10:22 +11004551static void sync_child_counter(struct perf_counter *child_counter,
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004552 struct task_struct *child)
Paul Mackerrasd859e292009-01-17 18:10:22 +11004553{
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004554 struct perf_counter *parent_counter = child_counter->parent;
Peter Zijlstra8bc20952009-05-15 20:45:59 +02004555 u64 child_val;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004556
Peter Zijlstrabfbd3382009-06-24 21:11:59 +02004557 if (child_counter->attr.inherit_stat)
4558 perf_counter_read_event(child_counter, child);
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004559
Paul Mackerrasd859e292009-01-17 18:10:22 +11004560 child_val = atomic64_read(&child_counter->count);
4561
4562 /*
4563 * Add back the child's count to the parent's count:
4564 */
4565 atomic64_add(child_val, &parent_counter->count);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11004566 atomic64_add(child_counter->total_time_enabled,
4567 &parent_counter->child_total_time_enabled);
4568 atomic64_add(child_counter->total_time_running,
4569 &parent_counter->child_total_time_running);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004570
4571 /*
4572 * Remove this counter from the parent's list
4573 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004574 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004575 mutex_lock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004576 list_del_init(&child_counter->child_list);
Peter Zijlstrafccc7142009-05-23 18:28:56 +02004577 mutex_unlock(&parent_counter->child_mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004578
4579 /*
4580 * Release the parent counter, if this was the last
4581 * reference to it.
4582 */
4583 fput(parent_counter->filp);
4584}
4585
Ingo Molnar9b51f662008-12-12 13:49:45 +01004586static void
Peter Zijlstrabbbee902009-05-29 14:25:58 +02004587__perf_counter_exit_task(struct perf_counter *child_counter,
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004588 struct perf_counter_context *child_ctx,
4589 struct task_struct *child)
Ingo Molnar9b51f662008-12-12 13:49:45 +01004590{
4591 struct perf_counter *parent_counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004592
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004593 update_counter_times(child_counter);
Peter Zijlstraaa9c67f2009-05-23 18:28:59 +02004594 perf_counter_remove_from_context(child_counter);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01004595
Ingo Molnar9b51f662008-12-12 13:49:45 +01004596 parent_counter = child_counter->parent;
4597 /*
4598 * It can happen that parent exits first, and has counters
4599 * that are still around due to the child reference. These
4600 * counters need to be zapped - but otherwise linger.
4601 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11004602 if (parent_counter) {
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004603 sync_child_counter(child_counter, child);
Peter Zijlstraf1600952009-03-19 20:26:16 +01004604 free_counter(child_counter);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01004605 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004606}
4607
4608/*
Paul Mackerrasd859e292009-01-17 18:10:22 +11004609 * When a child task exits, feed back counter values to parent counters.
Ingo Molnar9b51f662008-12-12 13:49:45 +01004610 */
4611void perf_counter_exit_task(struct task_struct *child)
4612{
4613 struct perf_counter *child_counter, *tmp;
4614 struct perf_counter_context *child_ctx;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004615 unsigned long flags;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004616
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004617 if (likely(!child->perf_counter_ctxp)) {
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02004618 perf_counter_task(child, NULL, 0);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004619 return;
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004620 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004621
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004622 local_irq_save(flags);
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004623 /*
4624 * We can't reschedule here because interrupts are disabled,
4625 * and either child is current or it is a task that can't be
4626 * scheduled, so we are now safe from rescheduling changing
4627 * our context.
4628 */
4629 child_ctx = child->perf_counter_ctxp;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004630 __perf_counter_task_sched_out(child_ctx);
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004631
4632 /*
4633 * Take the context lock here so that if find_get_context is
4634 * reading child->perf_counter_ctxp, we wait until it has
4635 * incremented the context's refcount before we do put_ctx below.
4636 */
4637 spin_lock(&child_ctx->lock);
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02004638 child->perf_counter_ctxp = NULL;
Peter Zijlstra71a851b2009-07-10 09:06:56 +02004639 /*
4640 * If this context is a clone; unclone it so it can't get
4641 * swapped to another process while we're removing all
4642 * the counters from it.
4643 */
4644 unclone_ctx(child_ctx);
Peter Zijlstra9f498cc2009-07-23 14:46:33 +02004645 spin_unlock_irqrestore(&child_ctx->lock, flags);
4646
4647 /*
4648 * Report the task dead after unscheduling the counters so that we
4649 * won't get any samples after PERF_EVENT_EXIT. We can however still
4650 * get a few PERF_EVENT_READ events.
4651 */
Peter Zijlstra3a80b4a2009-08-07 19:49:01 +02004652 perf_counter_task(child, child_ctx, 0);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004653
Peter Zijlstra66fff222009-06-10 22:53:37 +02004654 /*
4655 * We can recurse on the same lock type through:
4656 *
4657 * __perf_counter_exit_task()
4658 * sync_child_counter()
4659 * fput(parent_counter->filp)
4660 * perf_release()
4661 * mutex_lock(&ctx->mutex)
4662 *
4663 * But since its the parent context it won't be the same instance.
4664 */
4665 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004666
Peter Zijlstra8bc20952009-05-15 20:45:59 +02004667again:
Ingo Molnar9b51f662008-12-12 13:49:45 +01004668 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
4669 list_entry)
Peter Zijlstra38b200d2009-06-23 20:13:11 +02004670 __perf_counter_exit_task(child_counter, child_ctx, child);
Peter Zijlstra8bc20952009-05-15 20:45:59 +02004671
4672 /*
4673 * If the last counter was a group counter, it will have appended all
4674 * its siblings to the list, but we obtained 'tmp' before that which
4675 * will still point to the list head terminating the iteration.
4676 */
4677 if (!list_empty(&child_ctx->counter_list))
4678 goto again;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004679
4680 mutex_unlock(&child_ctx->mutex);
4681
4682 put_ctx(child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004683}
4684
4685/*
Peter Zijlstrabbbee902009-05-29 14:25:58 +02004686 * free an unexposed, unused context as created by inheritance by
4687 * init_task below, used by fork() in case of fail.
4688 */
4689void perf_counter_free_task(struct task_struct *task)
4690{
4691 struct perf_counter_context *ctx = task->perf_counter_ctxp;
4692 struct perf_counter *counter, *tmp;
4693
4694 if (!ctx)
4695 return;
4696
4697 mutex_lock(&ctx->mutex);
4698again:
4699 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
4700 struct perf_counter *parent = counter->parent;
4701
4702 if (WARN_ON_ONCE(!parent))
4703 continue;
4704
4705 mutex_lock(&parent->child_mutex);
4706 list_del_init(&counter->child_list);
4707 mutex_unlock(&parent->child_mutex);
4708
4709 fput(parent->filp);
4710
4711 list_del_counter(counter, ctx);
4712 free_counter(counter);
4713 }
4714
4715 if (!list_empty(&ctx->counter_list))
4716 goto again;
4717
4718 mutex_unlock(&ctx->mutex);
4719
4720 put_ctx(ctx);
4721}
4722
4723/*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004724 * Initialize the perf_counter context in task_struct
4725 */
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004726int perf_counter_init_task(struct task_struct *child)
Ingo Molnar9b51f662008-12-12 13:49:45 +01004727{
4728 struct perf_counter_context *child_ctx, *parent_ctx;
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004729 struct perf_counter_context *cloned_ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +11004730 struct perf_counter *counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004731 struct task_struct *parent = current;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004732 int inherited_all = 1;
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004733 int ret = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004734
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004735 child->perf_counter_ctxp = NULL;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004736
Peter Zijlstra082ff5a2009-05-23 18:29:00 +02004737 mutex_init(&child->perf_counter_mutex);
4738 INIT_LIST_HEAD(&child->perf_counter_list);
4739
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004740 if (likely(!parent->perf_counter_ctxp))
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004741 return 0;
4742
Ingo Molnar9b51f662008-12-12 13:49:45 +01004743 /*
4744 * This is executed from the parent task context, so inherit
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004745 * counters that have been marked for cloning.
4746 * First allocate and initialize a context for the child.
Ingo Molnar9b51f662008-12-12 13:49:45 +01004747 */
4748
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004749 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
4750 if (!child_ctx)
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004751 return -ENOMEM;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004752
4753 __perf_counter_init_context(child_ctx, child);
4754 child->perf_counter_ctxp = child_ctx;
Paul Mackerrasc93f7662009-05-28 22:18:17 +10004755 get_task_struct(child);
Paul Mackerrasa63eaf32009-05-22 14:17:31 +10004756
Ingo Molnar9b51f662008-12-12 13:49:45 +01004757 /*
Paul Mackerras25346b932009-06-01 17:48:12 +10004758 * If the parent's context is a clone, pin it so it won't get
4759 * swapped under us.
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004760 */
Paul Mackerras25346b932009-06-01 17:48:12 +10004761 parent_ctx = perf_pin_task_context(parent);
4762
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004763 /*
4764 * No need to check if parent_ctx != NULL here; since we saw
4765 * it non-NULL earlier, the only reason for it to become NULL
4766 * is if we exit, and since we're currently in the middle of
4767 * a fork we can't be exiting at the same time.
4768 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004769
4770 /*
Ingo Molnar9b51f662008-12-12 13:49:45 +01004771 * Lock the parent list. No need to lock the child - not PID
4772 * hashed yet and not running, so nobody can access it.
4773 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11004774 mutex_lock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004775
4776 /*
4777 * We dont have to disable NMIs - we are only looking at
4778 * the list, not manipulating it:
4779 */
Peter Zijlstrad7b629a2009-05-20 12:21:19 +02004780 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
4781 if (counter != counter->group_leader)
4782 continue;
4783
Peter Zijlstra0d486962009-06-02 19:22:16 +02004784 if (!counter->attr.inherit) {
Paul Mackerras564c2b22009-05-22 14:27:22 +10004785 inherited_all = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004786 continue;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004787 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01004788
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004789 ret = inherit_group(counter, parent, parent_ctx,
4790 child, child_ctx);
4791 if (ret) {
Paul Mackerras564c2b22009-05-22 14:27:22 +10004792 inherited_all = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004793 break;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004794 }
4795 }
4796
4797 if (inherited_all) {
4798 /*
4799 * Mark the child context as a clone of the parent
4800 * context, or of whatever the parent is a clone of.
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004801 * Note that if the parent is a clone, it could get
4802 * uncloned at any point, but that doesn't matter
4803 * because the list of counters and the generation
4804 * count can't have changed since we took the mutex.
Paul Mackerras564c2b22009-05-22 14:27:22 +10004805 */
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004806 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
4807 if (cloned_ctx) {
4808 child_ctx->parent_ctx = cloned_ctx;
Paul Mackerras25346b932009-06-01 17:48:12 +10004809 child_ctx->parent_gen = parent_ctx->parent_gen;
Paul Mackerras564c2b22009-05-22 14:27:22 +10004810 } else {
4811 child_ctx->parent_ctx = parent_ctx;
4812 child_ctx->parent_gen = parent_ctx->generation;
4813 }
4814 get_ctx(child_ctx->parent_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01004815 }
4816
Paul Mackerrasd859e292009-01-17 18:10:22 +11004817 mutex_unlock(&parent_ctx->mutex);
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004818
Paul Mackerras25346b932009-06-01 17:48:12 +10004819 perf_unpin_context(parent_ctx);
Paul Mackerrasad3a37d2009-05-29 16:06:20 +10004820
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02004821 return ret;
Ingo Molnar9b51f662008-12-12 13:49:45 +01004822}
4823
Ingo Molnar04289bb2008-12-11 08:38:42 +01004824static void __cpuinit perf_counter_init_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004825{
Ingo Molnar04289bb2008-12-11 08:38:42 +01004826 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01004827
Ingo Molnar04289bb2008-12-11 08:38:42 +01004828 cpuctx = &per_cpu(perf_cpu_context, cpu);
4829 __perf_counter_init_context(&cpuctx->ctx, NULL);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004830
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004831 spin_lock(&perf_resource_lock);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004832 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004833 spin_unlock(&perf_resource_lock);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004834
Paul Mackerras01d02872009-01-14 13:44:19 +11004835 hw_perf_counter_setup(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004836}
4837
4838#ifdef CONFIG_HOTPLUG_CPU
Ingo Molnar04289bb2008-12-11 08:38:42 +01004839static void __perf_counter_exit_cpu(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004840{
4841 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4842 struct perf_counter_context *ctx = &cpuctx->ctx;
4843 struct perf_counter *counter, *tmp;
4844
Ingo Molnar04289bb2008-12-11 08:38:42 +01004845 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4846 __perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004847}
Ingo Molnar04289bb2008-12-11 08:38:42 +01004848static void perf_counter_exit_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004849{
Paul Mackerrasd859e292009-01-17 18:10:22 +11004850 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4851 struct perf_counter_context *ctx = &cpuctx->ctx;
4852
4853 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01004854 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
Paul Mackerrasd859e292009-01-17 18:10:22 +11004855 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004856}
4857#else
Ingo Molnar04289bb2008-12-11 08:38:42 +01004858static inline void perf_counter_exit_cpu(int cpu) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +01004859#endif
4860
4861static int __cpuinit
4862perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4863{
4864 unsigned int cpu = (long)hcpu;
4865
4866 switch (action) {
4867
4868 case CPU_UP_PREPARE:
4869 case CPU_UP_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01004870 perf_counter_init_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004871 break;
4872
Ingo Molnar28402972009-08-13 10:13:22 +02004873 case CPU_ONLINE:
4874 case CPU_ONLINE_FROZEN:
4875 hw_perf_counter_setup_online(cpu);
4876 break;
4877
Thomas Gleixner0793a612008-12-04 20:12:29 +01004878 case CPU_DOWN_PREPARE:
4879 case CPU_DOWN_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01004880 perf_counter_exit_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004881 break;
4882
4883 default:
4884 break;
4885 }
4886
4887 return NOTIFY_OK;
4888}
4889
Paul Mackerrasf38b0822009-06-02 21:05:16 +10004890/*
4891 * This has to have a higher priority than migration_notifier in sched.c.
4892 */
Thomas Gleixner0793a612008-12-04 20:12:29 +01004893static struct notifier_block __cpuinitdata perf_cpu_nb = {
4894 .notifier_call = perf_cpu_notify,
Paul Mackerrasf38b0822009-06-02 21:05:16 +10004895 .priority = 20,
Thomas Gleixner0793a612008-12-04 20:12:29 +01004896};
4897
Ingo Molnar0d905bc2009-05-04 19:13:30 +02004898void __init perf_counter_init(void)
Thomas Gleixner0793a612008-12-04 20:12:29 +01004899{
4900 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4901 (void *)(long)smp_processor_id());
Ingo Molnar28402972009-08-13 10:13:22 +02004902 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4903 (void *)(long)smp_processor_id());
Thomas Gleixner0793a612008-12-04 20:12:29 +01004904 register_cpu_notifier(&perf_cpu_nb);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004905}
Thomas Gleixner0793a612008-12-04 20:12:29 +01004906
4907static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4908{
4909 return sprintf(buf, "%d\n", perf_reserved_percpu);
4910}
4911
4912static ssize_t
4913perf_set_reserve_percpu(struct sysdev_class *class,
4914 const char *buf,
4915 size_t count)
4916{
4917 struct perf_cpu_context *cpuctx;
4918 unsigned long val;
4919 int err, cpu, mpt;
4920
4921 err = strict_strtoul(buf, 10, &val);
4922 if (err)
4923 return err;
4924 if (val > perf_max_counters)
4925 return -EINVAL;
4926
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004927 spin_lock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004928 perf_reserved_percpu = val;
4929 for_each_online_cpu(cpu) {
4930 cpuctx = &per_cpu(perf_cpu_context, cpu);
4931 spin_lock_irq(&cpuctx->ctx.lock);
4932 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4933 perf_max_counters - perf_reserved_percpu);
4934 cpuctx->max_pertask = mpt;
4935 spin_unlock_irq(&cpuctx->ctx.lock);
4936 }
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004937 spin_unlock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004938
4939 return count;
4940}
4941
4942static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4943{
4944 return sprintf(buf, "%d\n", perf_overcommit);
4945}
4946
4947static ssize_t
4948perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4949{
4950 unsigned long val;
4951 int err;
4952
4953 err = strict_strtoul(buf, 10, &val);
4954 if (err)
4955 return err;
4956 if (val > 1)
4957 return -EINVAL;
4958
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004959 spin_lock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004960 perf_overcommit = val;
Ingo Molnar1dce8d92009-05-04 19:23:18 +02004961 spin_unlock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01004962
4963 return count;
4964}
4965
4966static SYSDEV_CLASS_ATTR(
4967 reserve_percpu,
4968 0644,
4969 perf_show_reserve_percpu,
4970 perf_set_reserve_percpu
4971 );
4972
4973static SYSDEV_CLASS_ATTR(
4974 overcommit,
4975 0644,
4976 perf_show_overcommit,
4977 perf_set_overcommit
4978 );
4979
4980static struct attribute *perfclass_attrs[] = {
4981 &attr_reserve_percpu.attr,
4982 &attr_overcommit.attr,
4983 NULL
4984};
4985
4986static struct attribute_group perfclass_attr_group = {
4987 .attrs = perfclass_attrs,
4988 .name = "perf_counters",
4989};
4990
4991static int __init perf_counter_sysfs_init(void)
4992{
4993 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4994 &perfclass_attr_group);
4995}
4996device_initcall(perf_counter_sysfs_init);