blob: ca14fc41ccdf79c82ab85a79c18cca78ab4a8482 [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counter core code
3 *
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6 *
7 * For licencing details see kernel-base/COPYING
8 */
9
10#include <linux/fs.h>
11#include <linux/cpu.h>
12#include <linux/smp.h>
Ingo Molnar04289bb2008-12-11 08:38:42 +010013#include <linux/file.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010014#include <linux/poll.h>
15#include <linux/sysfs.h>
16#include <linux/ptrace.h>
17#include <linux/percpu.h>
18#include <linux/uaccess.h>
19#include <linux/syscalls.h>
20#include <linux/anon_inodes.h>
Ingo Molnaraa9c4c02008-12-17 14:10:57 +010021#include <linux/kernel_stat.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010022#include <linux/perf_counter.h>
Paul Mackerras23a185c2009-02-09 22:42:47 +110023#include <linux/mm.h>
24#include <linux/vmstat.h>
Peter Zijlstra592903c2009-03-13 12:21:36 +010025#include <linux/rculist.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010026
Tim Blechmann4e193bd2009-03-14 14:29:25 +010027#include <asm/irq_regs.h>
28
Thomas Gleixner0793a612008-12-04 20:12:29 +010029/*
30 * Each CPU has a list of per CPU counters:
31 */
32DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
33
Ingo Molnar088e2852008-12-14 20:21:00 +010034int perf_max_counters __read_mostly = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +010035static int perf_reserved_percpu __read_mostly;
36static int perf_overcommit __read_mostly = 1;
37
38/*
39 * Mutex for (sysadmin-configurable) counter reservations:
40 */
41static DEFINE_MUTEX(perf_resource_mutex);
42
43/*
44 * Architecture provided APIs - weak aliases:
45 */
Ingo Molnar5c92d122008-12-11 13:21:10 +010046extern __weak const struct hw_perf_counter_ops *
Ingo Molnar621a01e2008-12-11 12:46:46 +010047hw_perf_counter_init(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +010048{
Paul Mackerrasff6f0542009-01-09 16:19:25 +110049 return NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +010050}
51
Ingo Molnar01b28382008-12-11 13:45:51 +010052u64 __weak hw_perf_save_disable(void) { return 0; }
Yinghai Lu01ea1cc2008-12-26 21:05:06 -080053void __weak hw_perf_restore(u64 ctrl) { barrier(); }
Paul Mackerras01d02872009-01-14 13:44:19 +110054void __weak hw_perf_counter_setup(int cpu) { barrier(); }
Paul Mackerras3cbed422009-01-09 16:43:42 +110055int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
56 struct perf_cpu_context *cpuctx,
57 struct perf_counter_context *ctx, int cpu)
58{
59 return 0;
60}
Thomas Gleixner0793a612008-12-04 20:12:29 +010061
Paul Mackerras4eb96fc2009-01-09 17:24:34 +110062void __weak perf_counter_print_debug(void) { }
63
Ingo Molnar04289bb2008-12-11 08:38:42 +010064static void
65list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
66{
67 struct perf_counter *group_leader = counter->group_leader;
68
69 /*
70 * Depending on whether it is a standalone or sibling counter,
71 * add it straight to the context's counter list, or to the group
72 * leader's sibling list:
73 */
74 if (counter->group_leader == counter)
75 list_add_tail(&counter->list_entry, &ctx->counter_list);
76 else
77 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
Peter Zijlstra592903c2009-03-13 12:21:36 +010078
79 list_add_rcu(&counter->event_entry, &ctx->event_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +010080}
81
82static void
83list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
84{
85 struct perf_counter *sibling, *tmp;
86
87 list_del_init(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +010088 list_del_rcu(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +010089
Ingo Molnar04289bb2008-12-11 08:38:42 +010090 /*
91 * If this was a group counter with sibling counters then
92 * upgrade the siblings to singleton counters by adding them
93 * to the context list directly:
94 */
95 list_for_each_entry_safe(sibling, tmp,
96 &counter->sibling_list, list_entry) {
97
Peter Zijlstra75564232009-03-13 12:21:29 +010098 list_move_tail(&sibling->list_entry, &ctx->counter_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +010099 sibling->group_leader = sibling;
100 }
101}
102
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100103static void
104counter_sched_out(struct perf_counter *counter,
105 struct perf_cpu_context *cpuctx,
106 struct perf_counter_context *ctx)
107{
108 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
109 return;
110
111 counter->state = PERF_COUNTER_STATE_INACTIVE;
112 counter->hw_ops->disable(counter);
113 counter->oncpu = -1;
114
115 if (!is_software_counter(counter))
116 cpuctx->active_oncpu--;
117 ctx->nr_active--;
118 if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
119 cpuctx->exclusive = 0;
120}
121
Paul Mackerrasd859e292009-01-17 18:10:22 +1100122static void
123group_sched_out(struct perf_counter *group_counter,
124 struct perf_cpu_context *cpuctx,
125 struct perf_counter_context *ctx)
126{
127 struct perf_counter *counter;
128
129 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
130 return;
131
132 counter_sched_out(group_counter, cpuctx, ctx);
133
134 /*
135 * Schedule out siblings (if any):
136 */
137 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
138 counter_sched_out(counter, cpuctx, ctx);
139
140 if (group_counter->hw_event.exclusive)
141 cpuctx->exclusive = 0;
142}
143
Thomas Gleixner0793a612008-12-04 20:12:29 +0100144/*
145 * Cross CPU call to remove a performance counter
146 *
147 * We disable the counter on the hardware level first. After that we
148 * remove it from the context list.
149 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100150static void __perf_counter_remove_from_context(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100151{
152 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
153 struct perf_counter *counter = info;
154 struct perf_counter_context *ctx = counter->ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +0100155 unsigned long flags;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100156 u64 perf_flags;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100157
158 /*
159 * If this is a task context, we need to check whether it is
160 * the current task context of this cpu. If not it has been
161 * scheduled out before the smp call arrived.
162 */
163 if (ctx->task && cpuctx->task_ctx != ctx)
164 return;
165
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100166 curr_rq_lock_irq_save(&flags);
167 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100168
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100169 counter_sched_out(counter, cpuctx, ctx);
170
171 counter->task = NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100172 ctx->nr_counters--;
173
174 /*
175 * Protect the list operation against NMI by disabling the
176 * counters on a global level. NOP for non NMI based counters.
177 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100178 perf_flags = hw_perf_save_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +0100179 list_del_counter(counter, ctx);
Ingo Molnar01b28382008-12-11 13:45:51 +0100180 hw_perf_restore(perf_flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100181
182 if (!ctx->task) {
183 /*
184 * Allow more per task counters with respect to the
185 * reservation:
186 */
187 cpuctx->max_pertask =
188 min(perf_max_counters - ctx->nr_counters,
189 perf_max_counters - perf_reserved_percpu);
190 }
191
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100192 spin_unlock(&ctx->lock);
193 curr_rq_unlock_irq_restore(&flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100194}
195
196
197/*
198 * Remove the counter from a task's (or a CPU's) list of counters.
199 *
Paul Mackerrasd859e292009-01-17 18:10:22 +1100200 * Must be called with counter->mutex and ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100201 *
202 * CPU counters are removed with a smp call. For task counters we only
203 * call when the task is on a CPU.
204 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100205static void perf_counter_remove_from_context(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100206{
207 struct perf_counter_context *ctx = counter->ctx;
208 struct task_struct *task = ctx->task;
209
210 if (!task) {
211 /*
212 * Per cpu counters are removed via an smp call and
213 * the removal is always sucessful.
214 */
215 smp_call_function_single(counter->cpu,
Ingo Molnar04289bb2008-12-11 08:38:42 +0100216 __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100217 counter, 1);
218 return;
219 }
220
221retry:
Ingo Molnar04289bb2008-12-11 08:38:42 +0100222 task_oncpu_function_call(task, __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100223 counter);
224
225 spin_lock_irq(&ctx->lock);
226 /*
227 * If the context is active we need to retry the smp call.
228 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100229 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100230 spin_unlock_irq(&ctx->lock);
231 goto retry;
232 }
233
234 /*
235 * The lock prevents that this context is scheduled in so we
Ingo Molnar04289bb2008-12-11 08:38:42 +0100236 * can remove the counter safely, if the call above did not
Thomas Gleixner0793a612008-12-04 20:12:29 +0100237 * succeed.
238 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100239 if (!list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100240 ctx->nr_counters--;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100241 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100242 counter->task = NULL;
243 }
244 spin_unlock_irq(&ctx->lock);
245}
246
Paul Mackerrasd859e292009-01-17 18:10:22 +1100247/*
248 * Cross CPU call to disable a performance counter
249 */
250static void __perf_counter_disable(void *info)
251{
252 struct perf_counter *counter = info;
253 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
254 struct perf_counter_context *ctx = counter->ctx;
255 unsigned long flags;
256
257 /*
258 * If this is a per-task counter, need to check whether this
259 * counter's task is the current task on this cpu.
260 */
261 if (ctx->task && cpuctx->task_ctx != ctx)
262 return;
263
264 curr_rq_lock_irq_save(&flags);
265 spin_lock(&ctx->lock);
266
267 /*
268 * If the counter is on, turn it off.
269 * If it is in error state, leave it in error state.
270 */
271 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
272 if (counter == counter->group_leader)
273 group_sched_out(counter, cpuctx, ctx);
274 else
275 counter_sched_out(counter, cpuctx, ctx);
276 counter->state = PERF_COUNTER_STATE_OFF;
277 }
278
279 spin_unlock(&ctx->lock);
280 curr_rq_unlock_irq_restore(&flags);
281}
282
283/*
284 * Disable a counter.
285 */
286static void perf_counter_disable(struct perf_counter *counter)
287{
288 struct perf_counter_context *ctx = counter->ctx;
289 struct task_struct *task = ctx->task;
290
291 if (!task) {
292 /*
293 * Disable the counter on the cpu that it's on
294 */
295 smp_call_function_single(counter->cpu, __perf_counter_disable,
296 counter, 1);
297 return;
298 }
299
300 retry:
301 task_oncpu_function_call(task, __perf_counter_disable, counter);
302
303 spin_lock_irq(&ctx->lock);
304 /*
305 * If the counter is still active, we need to retry the cross-call.
306 */
307 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
308 spin_unlock_irq(&ctx->lock);
309 goto retry;
310 }
311
312 /*
313 * Since we have the lock this context can't be scheduled
314 * in, so we can change the state safely.
315 */
316 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
317 counter->state = PERF_COUNTER_STATE_OFF;
318
319 spin_unlock_irq(&ctx->lock);
320}
321
322/*
323 * Disable a counter and all its children.
324 */
325static void perf_counter_disable_family(struct perf_counter *counter)
326{
327 struct perf_counter *child;
328
329 perf_counter_disable(counter);
330
331 /*
332 * Lock the mutex to protect the list of children
333 */
334 mutex_lock(&counter->mutex);
335 list_for_each_entry(child, &counter->child_list, child_list)
336 perf_counter_disable(child);
337 mutex_unlock(&counter->mutex);
338}
339
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100340static int
341counter_sched_in(struct perf_counter *counter,
342 struct perf_cpu_context *cpuctx,
343 struct perf_counter_context *ctx,
344 int cpu)
345{
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100346 if (counter->state <= PERF_COUNTER_STATE_OFF)
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100347 return 0;
348
349 counter->state = PERF_COUNTER_STATE_ACTIVE;
350 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
351 /*
352 * The new state must be visible before we turn it on in the hardware:
353 */
354 smp_wmb();
355
356 if (counter->hw_ops->enable(counter)) {
357 counter->state = PERF_COUNTER_STATE_INACTIVE;
358 counter->oncpu = -1;
359 return -EAGAIN;
360 }
361
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100362 if (!is_software_counter(counter))
363 cpuctx->active_oncpu++;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100364 ctx->nr_active++;
365
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100366 if (counter->hw_event.exclusive)
367 cpuctx->exclusive = 1;
368
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100369 return 0;
370}
371
Thomas Gleixner0793a612008-12-04 20:12:29 +0100372/*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100373 * Return 1 for a group consisting entirely of software counters,
374 * 0 if the group contains any hardware counters.
375 */
376static int is_software_only_group(struct perf_counter *leader)
377{
378 struct perf_counter *counter;
379
380 if (!is_software_counter(leader))
381 return 0;
382 list_for_each_entry(counter, &leader->sibling_list, list_entry)
383 if (!is_software_counter(counter))
384 return 0;
385 return 1;
386}
387
388/*
389 * Work out whether we can put this counter group on the CPU now.
390 */
391static int group_can_go_on(struct perf_counter *counter,
392 struct perf_cpu_context *cpuctx,
393 int can_add_hw)
394{
395 /*
396 * Groups consisting entirely of software counters can always go on.
397 */
398 if (is_software_only_group(counter))
399 return 1;
400 /*
401 * If an exclusive group is already on, no other hardware
402 * counters can go on.
403 */
404 if (cpuctx->exclusive)
405 return 0;
406 /*
407 * If this group is exclusive and there are already
408 * counters on the CPU, it can't go on.
409 */
410 if (counter->hw_event.exclusive && cpuctx->active_oncpu)
411 return 0;
412 /*
413 * Otherwise, try to add it if all previous groups were able
414 * to go on.
415 */
416 return can_add_hw;
417}
418
419/*
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100420 * Cross CPU call to install and enable a performance counter
Thomas Gleixner0793a612008-12-04 20:12:29 +0100421 */
422static void __perf_install_in_context(void *info)
423{
424 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
425 struct perf_counter *counter = info;
426 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100427 struct perf_counter *leader = counter->group_leader;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100428 int cpu = smp_processor_id();
Ingo Molnar9b51f662008-12-12 13:49:45 +0100429 unsigned long flags;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100430 u64 perf_flags;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100431 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100432
433 /*
434 * If this is a task context, we need to check whether it is
435 * the current task context of this cpu. If not it has been
436 * scheduled out before the smp call arrived.
437 */
438 if (ctx->task && cpuctx->task_ctx != ctx)
439 return;
440
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100441 curr_rq_lock_irq_save(&flags);
442 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100443
444 /*
445 * Protect the list operation against NMI by disabling the
446 * counters on a global level. NOP for non NMI based counters.
447 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100448 perf_flags = hw_perf_save_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100449
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100450 list_add_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100451 ctx->nr_counters++;
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100452 counter->prev_state = PERF_COUNTER_STATE_OFF;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100453
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100454 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100455 * Don't put the counter on if it is disabled or if
456 * it is in a group and the group isn't on.
457 */
458 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
459 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
460 goto unlock;
461
462 /*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100463 * An exclusive counter can't go on if there are already active
464 * hardware counters, and no hardware counter can go on if there
465 * is already an exclusive counter on.
466 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100467 if (!group_can_go_on(counter, cpuctx, 1))
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100468 err = -EEXIST;
469 else
470 err = counter_sched_in(counter, cpuctx, ctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100471
Paul Mackerrasd859e292009-01-17 18:10:22 +1100472 if (err) {
473 /*
474 * This counter couldn't go on. If it is in a group
475 * then we have to pull the whole group off.
476 * If the counter group is pinned then put it in error state.
477 */
478 if (leader != counter)
479 group_sched_out(leader, cpuctx, ctx);
480 if (leader->hw_event.pinned)
481 leader->state = PERF_COUNTER_STATE_ERROR;
482 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100483
484 if (!err && !ctx->task && cpuctx->max_pertask)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100485 cpuctx->max_pertask--;
486
Paul Mackerrasd859e292009-01-17 18:10:22 +1100487 unlock:
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100488 hw_perf_restore(perf_flags);
489
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100490 spin_unlock(&ctx->lock);
491 curr_rq_unlock_irq_restore(&flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100492}
493
494/*
495 * Attach a performance counter to a context
496 *
497 * First we add the counter to the list with the hardware enable bit
498 * in counter->hw_config cleared.
499 *
500 * If the counter is attached to a task which is on a CPU we use a smp
501 * call to enable it in the task context. The task might have been
502 * scheduled away, but we check this in the smp call again.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100503 *
504 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100505 */
506static void
507perf_install_in_context(struct perf_counter_context *ctx,
508 struct perf_counter *counter,
509 int cpu)
510{
511 struct task_struct *task = ctx->task;
512
Thomas Gleixner0793a612008-12-04 20:12:29 +0100513 if (!task) {
514 /*
515 * Per cpu counters are installed via an smp call and
516 * the install is always sucessful.
517 */
518 smp_call_function_single(cpu, __perf_install_in_context,
519 counter, 1);
520 return;
521 }
522
523 counter->task = task;
524retry:
525 task_oncpu_function_call(task, __perf_install_in_context,
526 counter);
527
528 spin_lock_irq(&ctx->lock);
529 /*
Thomas Gleixner0793a612008-12-04 20:12:29 +0100530 * we need to retry the smp call.
531 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100532 if (ctx->is_active && list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100533 spin_unlock_irq(&ctx->lock);
534 goto retry;
535 }
536
537 /*
538 * The lock prevents that this context is scheduled in so we
539 * can add the counter safely, if it the call above did not
540 * succeed.
541 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100542 if (list_empty(&counter->list_entry)) {
543 list_add_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100544 ctx->nr_counters++;
545 }
546 spin_unlock_irq(&ctx->lock);
547}
548
Paul Mackerrasd859e292009-01-17 18:10:22 +1100549/*
550 * Cross CPU call to enable a performance counter
551 */
552static void __perf_counter_enable(void *info)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100553{
Paul Mackerrasd859e292009-01-17 18:10:22 +1100554 struct perf_counter *counter = info;
555 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
556 struct perf_counter_context *ctx = counter->ctx;
557 struct perf_counter *leader = counter->group_leader;
558 unsigned long flags;
559 int err;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100560
561 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100562 * If this is a per-task counter, need to check whether this
563 * counter's task is the current task on this cpu.
Ingo Molnar04289bb2008-12-11 08:38:42 +0100564 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100565 if (ctx->task && cpuctx->task_ctx != ctx)
566 return;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100567
Paul Mackerrasd859e292009-01-17 18:10:22 +1100568 curr_rq_lock_irq_save(&flags);
569 spin_lock(&ctx->lock);
570
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100571 counter->prev_state = counter->state;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100572 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
573 goto unlock;
574 counter->state = PERF_COUNTER_STATE_INACTIVE;
575
576 /*
577 * If the counter is in a group and isn't the group leader,
578 * then don't put it on unless the group is on.
579 */
580 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
581 goto unlock;
582
583 if (!group_can_go_on(counter, cpuctx, 1))
584 err = -EEXIST;
585 else
586 err = counter_sched_in(counter, cpuctx, ctx,
587 smp_processor_id());
588
589 if (err) {
590 /*
591 * If this counter can't go on and it's part of a
592 * group, then the whole group has to come off.
593 */
594 if (leader != counter)
595 group_sched_out(leader, cpuctx, ctx);
596 if (leader->hw_event.pinned)
597 leader->state = PERF_COUNTER_STATE_ERROR;
598 }
599
600 unlock:
601 spin_unlock(&ctx->lock);
602 curr_rq_unlock_irq_restore(&flags);
603}
604
605/*
606 * Enable a counter.
607 */
608static void perf_counter_enable(struct perf_counter *counter)
609{
610 struct perf_counter_context *ctx = counter->ctx;
611 struct task_struct *task = ctx->task;
612
613 if (!task) {
614 /*
615 * Enable the counter on the cpu that it's on
616 */
617 smp_call_function_single(counter->cpu, __perf_counter_enable,
618 counter, 1);
619 return;
620 }
621
622 spin_lock_irq(&ctx->lock);
623 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
624 goto out;
625
626 /*
627 * If the counter is in error state, clear that first.
628 * That way, if we see the counter in error state below, we
629 * know that it has gone back into error state, as distinct
630 * from the task having been scheduled away before the
631 * cross-call arrived.
632 */
633 if (counter->state == PERF_COUNTER_STATE_ERROR)
634 counter->state = PERF_COUNTER_STATE_OFF;
635
636 retry:
637 spin_unlock_irq(&ctx->lock);
638 task_oncpu_function_call(task, __perf_counter_enable, counter);
639
640 spin_lock_irq(&ctx->lock);
641
642 /*
643 * If the context is active and the counter is still off,
644 * we need to retry the cross-call.
645 */
646 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
647 goto retry;
648
649 /*
650 * Since we have the lock this context can't be scheduled
651 * in, so we can change the state safely.
652 */
653 if (counter->state == PERF_COUNTER_STATE_OFF)
654 counter->state = PERF_COUNTER_STATE_INACTIVE;
655 out:
656 spin_unlock_irq(&ctx->lock);
657}
658
659/*
660 * Enable a counter and all its children.
661 */
662static void perf_counter_enable_family(struct perf_counter *counter)
663{
664 struct perf_counter *child;
665
666 perf_counter_enable(counter);
667
668 /*
669 * Lock the mutex to protect the list of children
670 */
671 mutex_lock(&counter->mutex);
672 list_for_each_entry(child, &counter->child_list, child_list)
673 perf_counter_enable(child);
674 mutex_unlock(&counter->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100675}
676
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100677void __perf_counter_sched_out(struct perf_counter_context *ctx,
678 struct perf_cpu_context *cpuctx)
679{
680 struct perf_counter *counter;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100681 u64 flags;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100682
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100683 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100684 ctx->is_active = 0;
685 if (likely(!ctx->nr_counters))
686 goto out;
687
Paul Mackerras3cbed422009-01-09 16:43:42 +1100688 flags = hw_perf_save_disable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100689 if (ctx->nr_active) {
690 list_for_each_entry(counter, &ctx->counter_list, list_entry)
691 group_sched_out(counter, cpuctx, ctx);
692 }
Paul Mackerras3cbed422009-01-09 16:43:42 +1100693 hw_perf_restore(flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100694 out:
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100695 spin_unlock(&ctx->lock);
696}
697
Thomas Gleixner0793a612008-12-04 20:12:29 +0100698/*
699 * Called from scheduler to remove the counters of the current task,
700 * with interrupts disabled.
701 *
702 * We stop each counter and update the counter value in counter->count.
703 *
Ingo Molnar76715812008-12-17 14:20:28 +0100704 * This does not protect us against NMI, but disable()
Thomas Gleixner0793a612008-12-04 20:12:29 +0100705 * sets the disabled bit in the control field of counter _before_
706 * accessing the counter control register. If a NMI hits, then it will
707 * not restart the counter.
708 */
709void perf_counter_task_sched_out(struct task_struct *task, int cpu)
710{
711 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
712 struct perf_counter_context *ctx = &task->perf_counter_ctx;
Peter Zijlstra4a0deca2009-03-19 20:26:12 +0100713 struct pt_regs *regs;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100714
715 if (likely(!cpuctx->task_ctx))
716 return;
717
Peter Zijlstra4a0deca2009-03-19 20:26:12 +0100718 regs = task_pt_regs(task);
719 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100720 __perf_counter_sched_out(ctx, cpuctx);
721
Thomas Gleixner0793a612008-12-04 20:12:29 +0100722 cpuctx->task_ctx = NULL;
723}
724
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100725static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100726{
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100727 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100728}
729
Ingo Molnar79958882008-12-17 08:54:56 +0100730static int
Ingo Molnar04289bb2008-12-11 08:38:42 +0100731group_sched_in(struct perf_counter *group_counter,
732 struct perf_cpu_context *cpuctx,
733 struct perf_counter_context *ctx,
734 int cpu)
735{
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100736 struct perf_counter *counter, *partial_group;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100737 int ret;
738
739 if (group_counter->state == PERF_COUNTER_STATE_OFF)
740 return 0;
741
742 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
743 if (ret)
744 return ret < 0 ? ret : 0;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100745
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100746 group_counter->prev_state = group_counter->state;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100747 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
748 return -EAGAIN;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100749
750 /*
751 * Schedule in siblings as one group (if any):
752 */
Ingo Molnar79958882008-12-17 08:54:56 +0100753 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100754 counter->prev_state = counter->state;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100755 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
756 partial_group = counter;
757 goto group_error;
758 }
Ingo Molnar79958882008-12-17 08:54:56 +0100759 }
760
Paul Mackerras3cbed422009-01-09 16:43:42 +1100761 return 0;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100762
763group_error:
764 /*
765 * Groups can be scheduled in as one unit only, so undo any
766 * partial group before returning:
767 */
768 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
769 if (counter == partial_group)
770 break;
771 counter_sched_out(counter, cpuctx, ctx);
772 }
773 counter_sched_out(group_counter, cpuctx, ctx);
774
775 return -EAGAIN;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100776}
777
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100778static void
779__perf_counter_sched_in(struct perf_counter_context *ctx,
780 struct perf_cpu_context *cpuctx, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100781{
Thomas Gleixner0793a612008-12-04 20:12:29 +0100782 struct perf_counter *counter;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100783 u64 flags;
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +1100784 int can_add_hw = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100785
Thomas Gleixner0793a612008-12-04 20:12:29 +0100786 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100787 ctx->is_active = 1;
788 if (likely(!ctx->nr_counters))
789 goto out;
790
Paul Mackerras3cbed422009-01-09 16:43:42 +1100791 flags = hw_perf_save_disable();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100792
793 /*
794 * First go through the list and put on any pinned groups
795 * in order to give them the best chance of going on.
796 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100797 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100798 if (counter->state <= PERF_COUNTER_STATE_OFF ||
799 !counter->hw_event.pinned)
800 continue;
801 if (counter->cpu != -1 && counter->cpu != cpu)
802 continue;
803
804 if (group_can_go_on(counter, cpuctx, 1))
805 group_sched_in(counter, cpuctx, ctx, cpu);
806
807 /*
808 * If this pinned group hasn't been scheduled,
809 * put it in error state.
810 */
811 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
812 counter->state = PERF_COUNTER_STATE_ERROR;
813 }
814
815 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
816 /*
817 * Ignore counters in OFF or ERROR state, and
818 * ignore pinned counters since we did them already.
819 */
820 if (counter->state <= PERF_COUNTER_STATE_OFF ||
821 counter->hw_event.pinned)
822 continue;
823
Ingo Molnar04289bb2008-12-11 08:38:42 +0100824 /*
825 * Listen to the 'cpu' scheduling filter constraint
826 * of counters:
827 */
Thomas Gleixner0793a612008-12-04 20:12:29 +0100828 if (counter->cpu != -1 && counter->cpu != cpu)
829 continue;
830
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100831 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +1100832 if (group_sched_in(counter, cpuctx, ctx, cpu))
833 can_add_hw = 0;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100834 }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100835 }
Paul Mackerras3cbed422009-01-09 16:43:42 +1100836 hw_perf_restore(flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100837 out:
Thomas Gleixner0793a612008-12-04 20:12:29 +0100838 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100839}
Ingo Molnar04289bb2008-12-11 08:38:42 +0100840
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100841/*
842 * Called from scheduler to add the counters of the current task
843 * with interrupts disabled.
844 *
845 * We restore the counter value and then enable it.
846 *
847 * This does not protect us against NMI, but enable()
848 * sets the enabled bit in the control field of counter _before_
849 * accessing the counter control register. If a NMI hits, then it will
850 * keep the counter running.
851 */
852void perf_counter_task_sched_in(struct task_struct *task, int cpu)
853{
854 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
855 struct perf_counter_context *ctx = &task->perf_counter_ctx;
856
857 __perf_counter_sched_in(ctx, cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100858 cpuctx->task_ctx = ctx;
859}
860
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100861static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
862{
863 struct perf_counter_context *ctx = &cpuctx->ctx;
864
865 __perf_counter_sched_in(ctx, cpuctx, cpu);
866}
867
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100868int perf_counter_task_disable(void)
869{
870 struct task_struct *curr = current;
871 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
872 struct perf_counter *counter;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100873 unsigned long flags;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100874 u64 perf_flags;
875 int cpu;
876
877 if (likely(!ctx->nr_counters))
878 return 0;
879
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100880 curr_rq_lock_irq_save(&flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100881 cpu = smp_processor_id();
882
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100883 /* force the update of the task clock: */
884 __task_delta_exec(curr, 1);
885
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100886 perf_counter_task_sched_out(curr, cpu);
887
888 spin_lock(&ctx->lock);
889
890 /*
891 * Disable all the counters:
892 */
893 perf_flags = hw_perf_save_disable();
894
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100895 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
896 if (counter->state != PERF_COUNTER_STATE_ERROR)
897 counter->state = PERF_COUNTER_STATE_OFF;
898 }
Ingo Molnar9b51f662008-12-12 13:49:45 +0100899
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100900 hw_perf_restore(perf_flags);
901
902 spin_unlock(&ctx->lock);
903
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100904 curr_rq_unlock_irq_restore(&flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100905
906 return 0;
907}
908
909int perf_counter_task_enable(void)
910{
911 struct task_struct *curr = current;
912 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
913 struct perf_counter *counter;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100914 unsigned long flags;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100915 u64 perf_flags;
916 int cpu;
917
918 if (likely(!ctx->nr_counters))
919 return 0;
920
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100921 curr_rq_lock_irq_save(&flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100922 cpu = smp_processor_id();
923
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100924 /* force the update of the task clock: */
925 __task_delta_exec(curr, 1);
926
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100927 perf_counter_task_sched_out(curr, cpu);
928
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100929 spin_lock(&ctx->lock);
930
931 /*
932 * Disable all the counters:
933 */
934 perf_flags = hw_perf_save_disable();
935
936 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100937 if (counter->state > PERF_COUNTER_STATE_OFF)
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100938 continue;
Ingo Molnar6a930702008-12-11 15:17:03 +0100939 counter->state = PERF_COUNTER_STATE_INACTIVE;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100940 counter->hw_event.disabled = 0;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100941 }
942 hw_perf_restore(perf_flags);
943
944 spin_unlock(&ctx->lock);
945
946 perf_counter_task_sched_in(curr, cpu);
947
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100948 curr_rq_unlock_irq_restore(&flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100949
950 return 0;
951}
952
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100953/*
954 * Round-robin a context's counters:
955 */
956static void rotate_ctx(struct perf_counter_context *ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100957{
Thomas Gleixner0793a612008-12-04 20:12:29 +0100958 struct perf_counter *counter;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100959 u64 perf_flags;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100960
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100961 if (!ctx->nr_counters)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100962 return;
963
Thomas Gleixner0793a612008-12-04 20:12:29 +0100964 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100965 /*
Ingo Molnar04289bb2008-12-11 08:38:42 +0100966 * Rotate the first entry last (works just fine for group counters too):
Thomas Gleixner0793a612008-12-04 20:12:29 +0100967 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100968 perf_flags = hw_perf_save_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +0100969 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Peter Zijlstra75564232009-03-13 12:21:29 +0100970 list_move_tail(&counter->list_entry, &ctx->counter_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100971 break;
972 }
Ingo Molnar01b28382008-12-11 13:45:51 +0100973 hw_perf_restore(perf_flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100974
975 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100976}
Thomas Gleixner0793a612008-12-04 20:12:29 +0100977
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100978void perf_counter_task_tick(struct task_struct *curr, int cpu)
979{
980 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
981 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
982 const int rotate_percpu = 0;
983
984 if (rotate_percpu)
985 perf_counter_cpu_sched_out(cpuctx);
986 perf_counter_task_sched_out(curr, cpu);
987
988 if (rotate_percpu)
989 rotate_ctx(&cpuctx->ctx);
990 rotate_ctx(ctx);
991
992 if (rotate_percpu)
993 perf_counter_cpu_sched_in(cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100994 perf_counter_task_sched_in(curr, cpu);
995}
996
997/*
Thomas Gleixner0793a612008-12-04 20:12:29 +0100998 * Cross CPU call to read the hardware counter
999 */
Ingo Molnar76715812008-12-17 14:20:28 +01001000static void __read(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001001{
Ingo Molnar621a01e2008-12-11 12:46:46 +01001002 struct perf_counter *counter = info;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001003 unsigned long flags;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001004
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001005 curr_rq_lock_irq_save(&flags);
Ingo Molnar76715812008-12-17 14:20:28 +01001006 counter->hw_ops->read(counter);
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001007 curr_rq_unlock_irq_restore(&flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001008}
1009
Ingo Molnar04289bb2008-12-11 08:38:42 +01001010static u64 perf_counter_read(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001011{
1012 /*
1013 * If counter is enabled and currently active on a CPU, update the
1014 * value in the counter structure:
1015 */
Ingo Molnar6a930702008-12-11 15:17:03 +01001016 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001017 smp_call_function_single(counter->oncpu,
Ingo Molnar76715812008-12-17 14:20:28 +01001018 __read, counter, 1);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001019 }
1020
Ingo Molnaree060942008-12-13 09:00:03 +01001021 return atomic64_read(&counter->count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001022}
1023
1024/*
1025 * Cross CPU call to switch performance data pointers
1026 */
1027static void __perf_switch_irq_data(void *info)
1028{
1029 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1030 struct perf_counter *counter = info;
1031 struct perf_counter_context *ctx = counter->ctx;
1032 struct perf_data *oldirqdata = counter->irqdata;
1033
1034 /*
1035 * If this is a task context, we need to check whether it is
1036 * the current task context of this cpu. If not it has been
1037 * scheduled out before the smp call arrived.
1038 */
1039 if (ctx->task) {
1040 if (cpuctx->task_ctx != ctx)
1041 return;
1042 spin_lock(&ctx->lock);
1043 }
1044
1045 /* Change the pointer NMI safe */
1046 atomic_long_set((atomic_long_t *)&counter->irqdata,
1047 (unsigned long) counter->usrdata);
1048 counter->usrdata = oldirqdata;
1049
1050 if (ctx->task)
1051 spin_unlock(&ctx->lock);
1052}
1053
1054static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
1055{
1056 struct perf_counter_context *ctx = counter->ctx;
1057 struct perf_data *oldirqdata = counter->irqdata;
1058 struct task_struct *task = ctx->task;
1059
1060 if (!task) {
1061 smp_call_function_single(counter->cpu,
1062 __perf_switch_irq_data,
1063 counter, 1);
1064 return counter->usrdata;
1065 }
1066
1067retry:
1068 spin_lock_irq(&ctx->lock);
Ingo Molnar6a930702008-12-11 15:17:03 +01001069 if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001070 counter->irqdata = counter->usrdata;
1071 counter->usrdata = oldirqdata;
1072 spin_unlock_irq(&ctx->lock);
1073 return oldirqdata;
1074 }
1075 spin_unlock_irq(&ctx->lock);
1076 task_oncpu_function_call(task, __perf_switch_irq_data, counter);
1077 /* Might have failed, because task was scheduled out */
1078 if (counter->irqdata == oldirqdata)
1079 goto retry;
1080
1081 return counter->usrdata;
1082}
1083
1084static void put_context(struct perf_counter_context *ctx)
1085{
1086 if (ctx->task)
1087 put_task_struct(ctx->task);
1088}
1089
1090static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1091{
1092 struct perf_cpu_context *cpuctx;
1093 struct perf_counter_context *ctx;
1094 struct task_struct *task;
1095
1096 /*
1097 * If cpu is not a wildcard then this is a percpu counter:
1098 */
1099 if (cpu != -1) {
1100 /* Must be root to operate on a CPU counter: */
1101 if (!capable(CAP_SYS_ADMIN))
1102 return ERR_PTR(-EACCES);
1103
1104 if (cpu < 0 || cpu > num_possible_cpus())
1105 return ERR_PTR(-EINVAL);
1106
1107 /*
1108 * We could be clever and allow to attach a counter to an
1109 * offline CPU and activate it when the CPU comes up, but
1110 * that's for later.
1111 */
1112 if (!cpu_isset(cpu, cpu_online_map))
1113 return ERR_PTR(-ENODEV);
1114
1115 cpuctx = &per_cpu(perf_cpu_context, cpu);
1116 ctx = &cpuctx->ctx;
1117
Thomas Gleixner0793a612008-12-04 20:12:29 +01001118 return ctx;
1119 }
1120
1121 rcu_read_lock();
1122 if (!pid)
1123 task = current;
1124 else
1125 task = find_task_by_vpid(pid);
1126 if (task)
1127 get_task_struct(task);
1128 rcu_read_unlock();
1129
1130 if (!task)
1131 return ERR_PTR(-ESRCH);
1132
1133 ctx = &task->perf_counter_ctx;
1134 ctx->task = task;
1135
1136 /* Reuse ptrace permission checks for now. */
1137 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1138 put_context(ctx);
1139 return ERR_PTR(-EACCES);
1140 }
1141
1142 return ctx;
1143}
1144
Peter Zijlstra592903c2009-03-13 12:21:36 +01001145static void free_counter_rcu(struct rcu_head *head)
1146{
1147 struct perf_counter *counter;
1148
1149 counter = container_of(head, struct perf_counter, rcu_head);
1150 kfree(counter);
1151}
1152
Peter Zijlstraf1600952009-03-19 20:26:16 +01001153static void free_counter(struct perf_counter *counter)
1154{
Peter Zijlstrae077df42009-03-19 20:26:17 +01001155 if (counter->destroy)
1156 counter->destroy(counter);
1157
Peter Zijlstraf1600952009-03-19 20:26:16 +01001158 call_rcu(&counter->rcu_head, free_counter_rcu);
1159}
1160
Thomas Gleixner0793a612008-12-04 20:12:29 +01001161/*
1162 * Called when the last reference to the file is gone.
1163 */
1164static int perf_release(struct inode *inode, struct file *file)
1165{
1166 struct perf_counter *counter = file->private_data;
1167 struct perf_counter_context *ctx = counter->ctx;
1168
1169 file->private_data = NULL;
1170
Paul Mackerrasd859e292009-01-17 18:10:22 +11001171 mutex_lock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001172 mutex_lock(&counter->mutex);
1173
Ingo Molnar04289bb2008-12-11 08:38:42 +01001174 perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001175
1176 mutex_unlock(&counter->mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001177 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001178
Peter Zijlstraf1600952009-03-19 20:26:16 +01001179 free_counter(counter);
Mike Galbraith5af75912009-02-11 10:53:37 +01001180 put_context(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001181
1182 return 0;
1183}
1184
1185/*
1186 * Read the performance counter - simple non blocking version for now
1187 */
1188static ssize_t
1189perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1190{
1191 u64 cntval;
1192
1193 if (count != sizeof(cntval))
1194 return -EINVAL;
1195
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001196 /*
1197 * Return end-of-file for a read on a counter that is in
1198 * error state (i.e. because it was pinned but it couldn't be
1199 * scheduled on to the CPU at some point).
1200 */
1201 if (counter->state == PERF_COUNTER_STATE_ERROR)
1202 return 0;
1203
Thomas Gleixner0793a612008-12-04 20:12:29 +01001204 mutex_lock(&counter->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001205 cntval = perf_counter_read(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001206 mutex_unlock(&counter->mutex);
1207
1208 return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
1209}
1210
1211static ssize_t
1212perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
1213{
1214 if (!usrdata->len)
1215 return 0;
1216
1217 count = min(count, (size_t)usrdata->len);
1218 if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
1219 return -EFAULT;
1220
1221 /* Adjust the counters */
1222 usrdata->len -= count;
1223 if (!usrdata->len)
1224 usrdata->rd_idx = 0;
1225 else
1226 usrdata->rd_idx += count;
1227
1228 return count;
1229}
1230
1231static ssize_t
1232perf_read_irq_data(struct perf_counter *counter,
1233 char __user *buf,
1234 size_t count,
1235 int nonblocking)
1236{
1237 struct perf_data *irqdata, *usrdata;
1238 DECLARE_WAITQUEUE(wait, current);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001239 ssize_t res, res2;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001240
1241 irqdata = counter->irqdata;
1242 usrdata = counter->usrdata;
1243
1244 if (usrdata->len + irqdata->len >= count)
1245 goto read_pending;
1246
1247 if (nonblocking)
1248 return -EAGAIN;
1249
1250 spin_lock_irq(&counter->waitq.lock);
1251 __add_wait_queue(&counter->waitq, &wait);
1252 for (;;) {
1253 set_current_state(TASK_INTERRUPTIBLE);
1254 if (usrdata->len + irqdata->len >= count)
1255 break;
1256
1257 if (signal_pending(current))
1258 break;
1259
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001260 if (counter->state == PERF_COUNTER_STATE_ERROR)
1261 break;
1262
Thomas Gleixner0793a612008-12-04 20:12:29 +01001263 spin_unlock_irq(&counter->waitq.lock);
1264 schedule();
1265 spin_lock_irq(&counter->waitq.lock);
1266 }
1267 __remove_wait_queue(&counter->waitq, &wait);
1268 __set_current_state(TASK_RUNNING);
1269 spin_unlock_irq(&counter->waitq.lock);
1270
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001271 if (usrdata->len + irqdata->len < count &&
1272 counter->state != PERF_COUNTER_STATE_ERROR)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001273 return -ERESTARTSYS;
1274read_pending:
1275 mutex_lock(&counter->mutex);
1276
1277 /* Drain pending data first: */
1278 res = perf_copy_usrdata(usrdata, buf, count);
1279 if (res < 0 || res == count)
1280 goto out;
1281
1282 /* Switch irq buffer: */
1283 usrdata = perf_switch_irq_data(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001284 res2 = perf_copy_usrdata(usrdata, buf + res, count - res);
1285 if (res2 < 0) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001286 if (!res)
1287 res = -EFAULT;
1288 } else {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001289 res += res2;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001290 }
1291out:
1292 mutex_unlock(&counter->mutex);
1293
1294 return res;
1295}
1296
1297static ssize_t
1298perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1299{
1300 struct perf_counter *counter = file->private_data;
1301
Ingo Molnar9f66a382008-12-10 12:33:23 +01001302 switch (counter->hw_event.record_type) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001303 case PERF_RECORD_SIMPLE:
1304 return perf_read_hw(counter, buf, count);
1305
1306 case PERF_RECORD_IRQ:
1307 case PERF_RECORD_GROUP:
1308 return perf_read_irq_data(counter, buf, count,
1309 file->f_flags & O_NONBLOCK);
1310 }
1311 return -EINVAL;
1312}
1313
1314static unsigned int perf_poll(struct file *file, poll_table *wait)
1315{
1316 struct perf_counter *counter = file->private_data;
1317 unsigned int events = 0;
1318 unsigned long flags;
1319
1320 poll_wait(file, &counter->waitq, wait);
1321
1322 spin_lock_irqsave(&counter->waitq.lock, flags);
1323 if (counter->usrdata->len || counter->irqdata->len)
1324 events |= POLLIN;
1325 spin_unlock_irqrestore(&counter->waitq.lock, flags);
1326
1327 return events;
1328}
1329
Paul Mackerrasd859e292009-01-17 18:10:22 +11001330static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1331{
1332 struct perf_counter *counter = file->private_data;
1333 int err = 0;
1334
1335 switch (cmd) {
1336 case PERF_COUNTER_IOC_ENABLE:
1337 perf_counter_enable_family(counter);
1338 break;
1339 case PERF_COUNTER_IOC_DISABLE:
1340 perf_counter_disable_family(counter);
1341 break;
1342 default:
1343 err = -ENOTTY;
1344 }
1345 return err;
1346}
1347
Thomas Gleixner0793a612008-12-04 20:12:29 +01001348static const struct file_operations perf_fops = {
1349 .release = perf_release,
1350 .read = perf_read,
1351 .poll = perf_poll,
Paul Mackerrasd859e292009-01-17 18:10:22 +11001352 .unlocked_ioctl = perf_ioctl,
1353 .compat_ioctl = perf_ioctl,
Thomas Gleixner0793a612008-12-04 20:12:29 +01001354};
1355
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001356/*
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001357 * Output
1358 */
1359
1360static void perf_counter_store_irq(struct perf_counter *counter, u64 data)
1361{
1362 struct perf_data *irqdata = counter->irqdata;
1363
1364 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
1365 irqdata->overrun++;
1366 } else {
1367 u64 *p = (u64 *) &irqdata->data[irqdata->len];
1368
1369 *p = data;
1370 irqdata->len += sizeof(u64);
1371 }
1372}
1373
1374static void perf_counter_handle_group(struct perf_counter *counter)
1375{
1376 struct perf_counter *leader, *sub;
1377
1378 leader = counter->group_leader;
1379 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1380 if (sub != counter)
1381 sub->hw_ops->read(sub);
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001382 perf_counter_store_irq(counter, sub->hw_event.config);
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001383 perf_counter_store_irq(counter, atomic64_read(&sub->count));
1384 }
1385}
1386
1387void perf_counter_output(struct perf_counter *counter,
1388 int nmi, struct pt_regs *regs)
1389{
1390 switch (counter->hw_event.record_type) {
1391 case PERF_RECORD_SIMPLE:
1392 return;
1393
1394 case PERF_RECORD_IRQ:
1395 perf_counter_store_irq(counter, instruction_pointer(regs));
1396 break;
1397
1398 case PERF_RECORD_GROUP:
1399 perf_counter_handle_group(counter);
1400 break;
1401 }
1402
1403 if (nmi) {
1404 counter->wakeup_pending = 1;
1405 set_perf_counter_pending();
1406 } else
1407 wake_up(&counter->waitq);
1408}
1409
1410/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001411 * Generic software counter infrastructure
1412 */
1413
1414static void perf_swcounter_update(struct perf_counter *counter)
1415{
1416 struct hw_perf_counter *hwc = &counter->hw;
1417 u64 prev, now;
1418 s64 delta;
1419
1420again:
1421 prev = atomic64_read(&hwc->prev_count);
1422 now = atomic64_read(&hwc->count);
1423 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
1424 goto again;
1425
1426 delta = now - prev;
1427
1428 atomic64_add(delta, &counter->count);
1429 atomic64_sub(delta, &hwc->period_left);
1430}
1431
1432static void perf_swcounter_set_period(struct perf_counter *counter)
1433{
1434 struct hw_perf_counter *hwc = &counter->hw;
1435 s64 left = atomic64_read(&hwc->period_left);
1436 s64 period = hwc->irq_period;
1437
1438 if (unlikely(left <= -period)) {
1439 left = period;
1440 atomic64_set(&hwc->period_left, left);
1441 }
1442
1443 if (unlikely(left <= 0)) {
1444 left += period;
1445 atomic64_add(period, &hwc->period_left);
1446 }
1447
1448 atomic64_set(&hwc->prev_count, -left);
1449 atomic64_set(&hwc->count, -left);
1450}
1451
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001452static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1453{
1454 struct perf_counter *counter;
1455 struct pt_regs *regs;
1456
1457 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
1458 counter->hw_ops->read(counter);
1459
1460 regs = get_irq_regs();
1461 /*
1462 * In case we exclude kernel IPs or are somehow not in interrupt
1463 * context, provide the next best thing, the user IP.
1464 */
1465 if ((counter->hw_event.exclude_kernel || !regs) &&
1466 !counter->hw_event.exclude_user)
1467 regs = task_pt_regs(current);
1468
1469 if (regs)
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001470 perf_counter_output(counter, 0, regs);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001471
1472 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
1473
1474 return HRTIMER_RESTART;
1475}
1476
1477static void perf_swcounter_overflow(struct perf_counter *counter,
1478 int nmi, struct pt_regs *regs)
1479{
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001480 perf_swcounter_update(counter);
1481 perf_swcounter_set_period(counter);
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001482 perf_counter_output(counter, nmi, regs);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001483}
1484
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001485static int perf_swcounter_match(struct perf_counter *counter,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001486 enum perf_event_types type,
1487 u32 event, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001488{
1489 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1490 return 0;
1491
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001492 if (perf_event_raw(&counter->hw_event))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001493 return 0;
1494
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001495 if (perf_event_type(&counter->hw_event) != type)
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001496 return 0;
1497
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001498 if (perf_event_id(&counter->hw_event) != event)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001499 return 0;
1500
1501 if (counter->hw_event.exclude_user && user_mode(regs))
1502 return 0;
1503
1504 if (counter->hw_event.exclude_kernel && !user_mode(regs))
1505 return 0;
1506
1507 return 1;
1508}
1509
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001510static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
1511 int nmi, struct pt_regs *regs)
1512{
1513 int neg = atomic64_add_negative(nr, &counter->hw.count);
1514 if (counter->hw.irq_period && !neg)
1515 perf_swcounter_overflow(counter, nmi, regs);
1516}
1517
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001518static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001519 enum perf_event_types type, u32 event,
1520 u64 nr, int nmi, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001521{
1522 struct perf_counter *counter;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001523
Peter Zijlstra01ef09d2009-03-19 20:26:11 +01001524 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001525 return;
1526
Peter Zijlstra592903c2009-03-13 12:21:36 +01001527 rcu_read_lock();
1528 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001529 if (perf_swcounter_match(counter, type, event, regs))
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001530 perf_swcounter_add(counter, nr, nmi, regs);
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001531 }
Peter Zijlstra592903c2009-03-13 12:21:36 +01001532 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001533}
1534
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001535static void __perf_swcounter_event(enum perf_event_types type, u32 event,
1536 u64 nr, int nmi, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001537{
1538 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
1539
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001540 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
1541 if (cpuctx->task_ctx) {
1542 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
1543 nr, nmi, regs);
1544 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001545
1546 put_cpu_var(perf_cpu_context);
1547}
1548
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001549void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs)
1550{
1551 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs);
1552}
1553
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001554static void perf_swcounter_read(struct perf_counter *counter)
1555{
1556 perf_swcounter_update(counter);
1557}
1558
1559static int perf_swcounter_enable(struct perf_counter *counter)
1560{
1561 perf_swcounter_set_period(counter);
1562 return 0;
1563}
1564
1565static void perf_swcounter_disable(struct perf_counter *counter)
1566{
1567 perf_swcounter_update(counter);
1568}
1569
Peter Zijlstraac17dc82009-03-13 12:21:34 +01001570static const struct hw_perf_counter_ops perf_ops_generic = {
1571 .enable = perf_swcounter_enable,
1572 .disable = perf_swcounter_disable,
1573 .read = perf_swcounter_read,
1574};
1575
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001576/*
1577 * Software counter: cpu wall time clock
1578 */
1579
Paul Mackerras9abf8a02009-01-09 16:26:43 +11001580static void cpu_clock_perf_counter_update(struct perf_counter *counter)
1581{
1582 int cpu = raw_smp_processor_id();
1583 s64 prev;
1584 u64 now;
1585
1586 now = cpu_clock(cpu);
1587 prev = atomic64_read(&counter->hw.prev_count);
1588 atomic64_set(&counter->hw.prev_count, now);
1589 atomic64_add(now - prev, &counter->count);
1590}
1591
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001592static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
1593{
1594 struct hw_perf_counter *hwc = &counter->hw;
1595 int cpu = raw_smp_processor_id();
1596
1597 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
Peter Zijlstra039fc912009-03-13 16:43:47 +01001598 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1599 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001600 if (hwc->irq_period) {
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001601 __hrtimer_start_range_ns(&hwc->hrtimer,
1602 ns_to_ktime(hwc->irq_period), 0,
1603 HRTIMER_MODE_REL, 0);
1604 }
1605
1606 return 0;
1607}
1608
Ingo Molnar5c92d122008-12-11 13:21:10 +01001609static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
1610{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001611 hrtimer_cancel(&counter->hw.hrtimer);
Paul Mackerras9abf8a02009-01-09 16:26:43 +11001612 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01001613}
1614
1615static void cpu_clock_perf_counter_read(struct perf_counter *counter)
1616{
Paul Mackerras9abf8a02009-01-09 16:26:43 +11001617 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01001618}
1619
1620static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01001621 .enable = cpu_clock_perf_counter_enable,
1622 .disable = cpu_clock_perf_counter_disable,
1623 .read = cpu_clock_perf_counter_read,
Ingo Molnar5c92d122008-12-11 13:21:10 +01001624};
1625
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001626/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001627 * Software counter: task time clock
1628 */
1629
1630/*
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001631 * Called from within the scheduler:
1632 */
1633static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
Ingo Molnarbae43c92008-12-11 14:03:20 +01001634{
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001635 struct task_struct *curr = counter->task;
1636 u64 delta;
1637
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001638 delta = __task_delta_exec(curr, update);
1639
1640 return curr->se.sum_exec_runtime + delta;
1641}
1642
1643static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
1644{
1645 u64 prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001646 s64 delta;
Ingo Molnarbae43c92008-12-11 14:03:20 +01001647
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001648 prev = atomic64_read(&counter->hw.prev_count);
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001649
1650 atomic64_set(&counter->hw.prev_count, now);
1651
1652 delta = now - prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001653
1654 atomic64_add(delta, &counter->count);
Ingo Molnarbae43c92008-12-11 14:03:20 +01001655}
1656
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001657static int task_clock_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001658{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001659 struct hw_perf_counter *hwc = &counter->hw;
1660
1661 atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
Peter Zijlstra039fc912009-03-13 16:43:47 +01001662 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1663 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001664 if (hwc->irq_period) {
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001665 __hrtimer_start_range_ns(&hwc->hrtimer,
1666 ns_to_ktime(hwc->irq_period), 0,
1667 HRTIMER_MODE_REL, 0);
1668 }
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001669
1670 return 0;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001671}
1672
1673static void task_clock_perf_counter_disable(struct perf_counter *counter)
1674{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001675 hrtimer_cancel(&counter->hw.hrtimer);
1676 task_clock_perf_counter_update(counter,
1677 task_clock_perf_counter_val(counter, 0));
1678}
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001679
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001680static void task_clock_perf_counter_read(struct perf_counter *counter)
1681{
1682 task_clock_perf_counter_update(counter,
1683 task_clock_perf_counter_val(counter, 1));
Ingo Molnarbae43c92008-12-11 14:03:20 +01001684}
1685
1686static const struct hw_perf_counter_ops perf_ops_task_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01001687 .enable = task_clock_perf_counter_enable,
1688 .disable = task_clock_perf_counter_disable,
1689 .read = task_clock_perf_counter_read,
Ingo Molnarbae43c92008-12-11 14:03:20 +01001690};
1691
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001692/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001693 * Software counter: cpu migrations
1694 */
1695
Paul Mackerras23a185c2009-02-09 22:42:47 +11001696static inline u64 get_cpu_migrations(struct perf_counter *counter)
Ingo Molnar6c594c22008-12-14 12:34:15 +01001697{
Paul Mackerras23a185c2009-02-09 22:42:47 +11001698 struct task_struct *curr = counter->ctx->task;
1699
1700 if (curr)
1701 return curr->se.nr_migrations;
1702 return cpu_nr_migrations(smp_processor_id());
Ingo Molnar6c594c22008-12-14 12:34:15 +01001703}
1704
1705static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
1706{
1707 u64 prev, now;
1708 s64 delta;
1709
1710 prev = atomic64_read(&counter->hw.prev_count);
Paul Mackerras23a185c2009-02-09 22:42:47 +11001711 now = get_cpu_migrations(counter);
Ingo Molnar6c594c22008-12-14 12:34:15 +01001712
1713 atomic64_set(&counter->hw.prev_count, now);
1714
1715 delta = now - prev;
Ingo Molnar6c594c22008-12-14 12:34:15 +01001716
1717 atomic64_add(delta, &counter->count);
1718}
1719
1720static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1721{
1722 cpu_migrations_perf_counter_update(counter);
1723}
1724
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001725static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar6c594c22008-12-14 12:34:15 +01001726{
Paul Mackerrasc07c99b2009-02-13 22:10:34 +11001727 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1728 atomic64_set(&counter->hw.prev_count,
1729 get_cpu_migrations(counter));
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001730 return 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01001731}
1732
1733static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1734{
1735 cpu_migrations_perf_counter_update(counter);
1736}
1737
1738static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
Ingo Molnar76715812008-12-17 14:20:28 +01001739 .enable = cpu_migrations_perf_counter_enable,
1740 .disable = cpu_migrations_perf_counter_disable,
1741 .read = cpu_migrations_perf_counter_read,
Ingo Molnar6c594c22008-12-14 12:34:15 +01001742};
1743
Peter Zijlstrae077df42009-03-19 20:26:17 +01001744#ifdef CONFIG_EVENT_PROFILE
1745void perf_tpcounter_event(int event_id)
1746{
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001747 struct pt_regs *regs = get_irq_regs();
1748
1749 if (!regs)
1750 regs = task_pt_regs(current);
1751
1752 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs);
Peter Zijlstrae077df42009-03-19 20:26:17 +01001753}
1754
1755extern int ftrace_profile_enable(int);
1756extern void ftrace_profile_disable(int);
1757
1758static void tp_perf_counter_destroy(struct perf_counter *counter)
1759{
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001760 ftrace_profile_disable(perf_event_id(&counter->hw_event));
Peter Zijlstrae077df42009-03-19 20:26:17 +01001761}
1762
1763static const struct hw_perf_counter_ops *
1764tp_perf_counter_init(struct perf_counter *counter)
1765{
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001766 int event_id = perf_event_id(&counter->hw_event);
Peter Zijlstrae077df42009-03-19 20:26:17 +01001767 int ret;
1768
1769 ret = ftrace_profile_enable(event_id);
1770 if (ret)
1771 return NULL;
1772
1773 counter->destroy = tp_perf_counter_destroy;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001774 counter->hw.irq_period = counter->hw_event.irq_period;
Peter Zijlstrae077df42009-03-19 20:26:17 +01001775
1776 return &perf_ops_generic;
1777}
1778#else
1779static const struct hw_perf_counter_ops *
1780tp_perf_counter_init(struct perf_counter *counter)
1781{
1782 return NULL;
1783}
1784#endif
1785
Ingo Molnar5c92d122008-12-11 13:21:10 +01001786static const struct hw_perf_counter_ops *
1787sw_perf_counter_init(struct perf_counter *counter)
1788{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001789 struct perf_counter_hw_event *hw_event = &counter->hw_event;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001790 const struct hw_perf_counter_ops *hw_ops = NULL;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001791 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001792
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001793 /*
1794 * Software counters (currently) can't in general distinguish
1795 * between user, kernel and hypervisor events.
1796 * However, context switches and cpu migrations are considered
1797 * to be kernel events, and page faults are never hypervisor
1798 * events.
1799 */
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001800 switch (perf_event_id(&counter->hw_event)) {
Ingo Molnar5c92d122008-12-11 13:21:10 +01001801 case PERF_COUNT_CPU_CLOCK:
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001802 hw_ops = &perf_ops_cpu_clock;
1803
1804 if (hw_event->irq_period && hw_event->irq_period < 10000)
1805 hw_event->irq_period = 10000;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001806 break;
Ingo Molnarbae43c92008-12-11 14:03:20 +01001807 case PERF_COUNT_TASK_CLOCK:
Paul Mackerras23a185c2009-02-09 22:42:47 +11001808 /*
1809 * If the user instantiates this as a per-cpu counter,
1810 * use the cpu_clock counter instead.
1811 */
1812 if (counter->ctx->task)
1813 hw_ops = &perf_ops_task_clock;
1814 else
1815 hw_ops = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001816
1817 if (hw_event->irq_period && hw_event->irq_period < 10000)
1818 hw_event->irq_period = 10000;
Ingo Molnarbae43c92008-12-11 14:03:20 +01001819 break;
Ingo Molnare06c61a2008-12-14 14:44:31 +01001820 case PERF_COUNT_PAGE_FAULTS:
Peter Zijlstraac17dc82009-03-13 12:21:34 +01001821 case PERF_COUNT_PAGE_FAULTS_MIN:
1822 case PERF_COUNT_PAGE_FAULTS_MAJ:
Ingo Molnar5d6a27d2008-12-14 12:28:33 +01001823 case PERF_COUNT_CONTEXT_SWITCHES:
Peter Zijlstra4a0deca2009-03-19 20:26:12 +01001824 hw_ops = &perf_ops_generic;
Ingo Molnar5d6a27d2008-12-14 12:28:33 +01001825 break;
Ingo Molnar6c594c22008-12-14 12:34:15 +01001826 case PERF_COUNT_CPU_MIGRATIONS:
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001827 if (!counter->hw_event.exclude_kernel)
1828 hw_ops = &perf_ops_cpu_migrations;
Ingo Molnar6c594c22008-12-14 12:34:15 +01001829 break;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001830 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001831
1832 if (hw_ops)
1833 hwc->irq_period = hw_event->irq_period;
1834
Ingo Molnar5c92d122008-12-11 13:21:10 +01001835 return hw_ops;
1836}
1837
Thomas Gleixner0793a612008-12-04 20:12:29 +01001838/*
1839 * Allocate and initialize a counter structure
1840 */
1841static struct perf_counter *
Ingo Molnar04289bb2008-12-11 08:38:42 +01001842perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1843 int cpu,
Paul Mackerras23a185c2009-02-09 22:42:47 +11001844 struct perf_counter_context *ctx,
Ingo Molnar9b51f662008-12-12 13:49:45 +01001845 struct perf_counter *group_leader,
1846 gfp_t gfpflags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001847{
Ingo Molnar5c92d122008-12-11 13:21:10 +01001848 const struct hw_perf_counter_ops *hw_ops;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001849 struct perf_counter *counter;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001850
Ingo Molnar9b51f662008-12-12 13:49:45 +01001851 counter = kzalloc(sizeof(*counter), gfpflags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001852 if (!counter)
1853 return NULL;
1854
Ingo Molnar04289bb2008-12-11 08:38:42 +01001855 /*
1856 * Single counters are their own group leaders, with an
1857 * empty sibling list:
1858 */
1859 if (!group_leader)
1860 group_leader = counter;
1861
Thomas Gleixner0793a612008-12-04 20:12:29 +01001862 mutex_init(&counter->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001863 INIT_LIST_HEAD(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +01001864 INIT_LIST_HEAD(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001865 INIT_LIST_HEAD(&counter->sibling_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001866 init_waitqueue_head(&counter->waitq);
1867
Paul Mackerrasd859e292009-01-17 18:10:22 +11001868 INIT_LIST_HEAD(&counter->child_list);
1869
Ingo Molnar9f66a382008-12-10 12:33:23 +01001870 counter->irqdata = &counter->data[0];
1871 counter->usrdata = &counter->data[1];
1872 counter->cpu = cpu;
1873 counter->hw_event = *hw_event;
1874 counter->wakeup_pending = 0;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001875 counter->group_leader = group_leader;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001876 counter->hw_ops = NULL;
Paul Mackerras23a185c2009-02-09 22:42:47 +11001877 counter->ctx = ctx;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001878
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001879 counter->state = PERF_COUNTER_STATE_INACTIVE;
Ingo Molnara86ed502008-12-17 00:43:10 +01001880 if (hw_event->disabled)
1881 counter->state = PERF_COUNTER_STATE_OFF;
1882
Ingo Molnar5c92d122008-12-11 13:21:10 +01001883 hw_ops = NULL;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001884
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001885 if (perf_event_raw(hw_event)) {
Ingo Molnar5c92d122008-12-11 13:21:10 +01001886 hw_ops = hw_perf_counter_init(counter);
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001887 goto done;
1888 }
1889
1890 switch (perf_event_type(hw_event)) {
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001891 case PERF_TYPE_HARDWARE:
1892 hw_ops = hw_perf_counter_init(counter);
1893 break;
1894
1895 case PERF_TYPE_SOFTWARE:
1896 hw_ops = sw_perf_counter_init(counter);
1897 break;
1898
1899 case PERF_TYPE_TRACEPOINT:
1900 hw_ops = tp_perf_counter_init(counter);
1901 break;
1902 }
Ingo Molnar5c92d122008-12-11 13:21:10 +01001903
Ingo Molnar621a01e2008-12-11 12:46:46 +01001904 if (!hw_ops) {
1905 kfree(counter);
1906 return NULL;
1907 }
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001908done:
Ingo Molnar621a01e2008-12-11 12:46:46 +01001909 counter->hw_ops = hw_ops;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001910
1911 return counter;
1912}
1913
1914/**
Paul Mackerras2743a5b2009-03-04 20:36:51 +11001915 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
Ingo Molnar9f66a382008-12-10 12:33:23 +01001916 *
1917 * @hw_event_uptr: event type attributes for monitoring/sampling
Thomas Gleixner0793a612008-12-04 20:12:29 +01001918 * @pid: target pid
Ingo Molnar9f66a382008-12-10 12:33:23 +01001919 * @cpu: target cpu
1920 * @group_fd: group leader counter fd
Thomas Gleixner0793a612008-12-04 20:12:29 +01001921 */
Paul Mackerras2743a5b2009-03-04 20:36:51 +11001922SYSCALL_DEFINE5(perf_counter_open,
Paul Mackerrasf3dfd262009-02-26 22:43:46 +11001923 const struct perf_counter_hw_event __user *, hw_event_uptr,
Paul Mackerras2743a5b2009-03-04 20:36:51 +11001924 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001925{
Ingo Molnar04289bb2008-12-11 08:38:42 +01001926 struct perf_counter *counter, *group_leader;
Ingo Molnar9f66a382008-12-10 12:33:23 +01001927 struct perf_counter_hw_event hw_event;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001928 struct perf_counter_context *ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +01001929 struct file *counter_file = NULL;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001930 struct file *group_file = NULL;
1931 int fput_needed = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01001932 int fput_needed2 = 0;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001933 int ret;
1934
Paul Mackerras2743a5b2009-03-04 20:36:51 +11001935 /* for future expandability... */
1936 if (flags)
1937 return -EINVAL;
1938
Ingo Molnar9f66a382008-12-10 12:33:23 +01001939 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
Thomas Gleixnereab656a2008-12-08 19:26:59 +01001940 return -EFAULT;
1941
Ingo Molnar04289bb2008-12-11 08:38:42 +01001942 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01001943 * Get the target context (task or percpu):
1944 */
1945 ctx = find_get_context(pid, cpu);
1946 if (IS_ERR(ctx))
1947 return PTR_ERR(ctx);
1948
1949 /*
1950 * Look up the group leader (we will attach this counter to it):
Ingo Molnar04289bb2008-12-11 08:38:42 +01001951 */
1952 group_leader = NULL;
1953 if (group_fd != -1) {
1954 ret = -EINVAL;
1955 group_file = fget_light(group_fd, &fput_needed);
1956 if (!group_file)
Ingo Molnarccff2862008-12-11 11:26:29 +01001957 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001958 if (group_file->f_op != &perf_fops)
Ingo Molnarccff2862008-12-11 11:26:29 +01001959 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001960
1961 group_leader = group_file->private_data;
1962 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01001963 * Do not allow a recursive hierarchy (this new sibling
1964 * becoming part of another group-sibling):
Ingo Molnar04289bb2008-12-11 08:38:42 +01001965 */
Ingo Molnarccff2862008-12-11 11:26:29 +01001966 if (group_leader->group_leader != group_leader)
1967 goto err_put_context;
1968 /*
1969 * Do not allow to attach to a group in a different
1970 * task or CPU context:
1971 */
1972 if (group_leader->ctx != ctx)
1973 goto err_put_context;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001974 /*
1975 * Only a group leader can be exclusive or pinned
1976 */
1977 if (hw_event.exclusive || hw_event.pinned)
1978 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001979 }
1980
Ingo Molnar5c92d122008-12-11 13:21:10 +01001981 ret = -EINVAL;
Paul Mackerras23a185c2009-02-09 22:42:47 +11001982 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
1983 GFP_KERNEL);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001984 if (!counter)
1985 goto err_put_context;
1986
Thomas Gleixner0793a612008-12-04 20:12:29 +01001987 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
1988 if (ret < 0)
Ingo Molnar9b51f662008-12-12 13:49:45 +01001989 goto err_free_put_context;
1990
1991 counter_file = fget_light(ret, &fput_needed2);
1992 if (!counter_file)
1993 goto err_free_put_context;
1994
1995 counter->filp = counter_file;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001996 mutex_lock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01001997 perf_install_in_context(ctx, counter, cpu);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001998 mutex_unlock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01001999
2000 fput_light(counter_file, fput_needed2);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002001
Ingo Molnar04289bb2008-12-11 08:38:42 +01002002out_fput:
2003 fput_light(group_file, fput_needed);
2004
Thomas Gleixner0793a612008-12-04 20:12:29 +01002005 return ret;
2006
Ingo Molnar9b51f662008-12-12 13:49:45 +01002007err_free_put_context:
Thomas Gleixner0793a612008-12-04 20:12:29 +01002008 kfree(counter);
2009
2010err_put_context:
2011 put_context(ctx);
2012
Ingo Molnar04289bb2008-12-11 08:38:42 +01002013 goto out_fput;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002014}
2015
Ingo Molnar9b51f662008-12-12 13:49:45 +01002016/*
2017 * Initialize the perf_counter context in a task_struct:
2018 */
2019static void
2020__perf_counter_init_context(struct perf_counter_context *ctx,
2021 struct task_struct *task)
2022{
2023 memset(ctx, 0, sizeof(*ctx));
2024 spin_lock_init(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002025 mutex_init(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002026 INIT_LIST_HEAD(&ctx->counter_list);
Peter Zijlstra592903c2009-03-13 12:21:36 +01002027 INIT_LIST_HEAD(&ctx->event_list);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002028 ctx->task = task;
2029}
2030
2031/*
2032 * inherit a counter from parent task to child task:
2033 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11002034static struct perf_counter *
Ingo Molnar9b51f662008-12-12 13:49:45 +01002035inherit_counter(struct perf_counter *parent_counter,
2036 struct task_struct *parent,
2037 struct perf_counter_context *parent_ctx,
2038 struct task_struct *child,
Paul Mackerrasd859e292009-01-17 18:10:22 +11002039 struct perf_counter *group_leader,
Ingo Molnar9b51f662008-12-12 13:49:45 +01002040 struct perf_counter_context *child_ctx)
2041{
2042 struct perf_counter *child_counter;
2043
Paul Mackerrasd859e292009-01-17 18:10:22 +11002044 /*
2045 * Instead of creating recursive hierarchies of counters,
2046 * we link inherited counters back to the original parent,
2047 * which has a filp for sure, which we use as the reference
2048 * count:
2049 */
2050 if (parent_counter->parent)
2051 parent_counter = parent_counter->parent;
2052
Ingo Molnar9b51f662008-12-12 13:49:45 +01002053 child_counter = perf_counter_alloc(&parent_counter->hw_event,
Paul Mackerras23a185c2009-02-09 22:42:47 +11002054 parent_counter->cpu, child_ctx,
2055 group_leader, GFP_KERNEL);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002056 if (!child_counter)
Paul Mackerrasd859e292009-01-17 18:10:22 +11002057 return NULL;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002058
2059 /*
2060 * Link it up in the child's context:
2061 */
Ingo Molnar9b51f662008-12-12 13:49:45 +01002062 child_counter->task = child;
2063 list_add_counter(child_counter, child_ctx);
2064 child_ctx->nr_counters++;
2065
2066 child_counter->parent = parent_counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002067 /*
2068 * inherit into child's child as well:
2069 */
2070 child_counter->hw_event.inherit = 1;
2071
2072 /*
2073 * Get a reference to the parent filp - we will fput it
2074 * when the child counter exits. This is safe to do because
2075 * we are in the parent and we know that the filp still
2076 * exists and has a nonzero count:
2077 */
2078 atomic_long_inc(&parent_counter->filp->f_count);
2079
Paul Mackerrasd859e292009-01-17 18:10:22 +11002080 /*
2081 * Link this into the parent counter's child list
2082 */
2083 mutex_lock(&parent_counter->mutex);
2084 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
2085
2086 /*
2087 * Make the child state follow the state of the parent counter,
2088 * not its hw_event.disabled bit. We hold the parent's mutex,
2089 * so we won't race with perf_counter_{en,dis}able_family.
2090 */
2091 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
2092 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
2093 else
2094 child_counter->state = PERF_COUNTER_STATE_OFF;
2095
2096 mutex_unlock(&parent_counter->mutex);
2097
2098 return child_counter;
2099}
2100
2101static int inherit_group(struct perf_counter *parent_counter,
2102 struct task_struct *parent,
2103 struct perf_counter_context *parent_ctx,
2104 struct task_struct *child,
2105 struct perf_counter_context *child_ctx)
2106{
2107 struct perf_counter *leader;
2108 struct perf_counter *sub;
2109
2110 leader = inherit_counter(parent_counter, parent, parent_ctx,
2111 child, NULL, child_ctx);
2112 if (!leader)
2113 return -ENOMEM;
2114 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
2115 if (!inherit_counter(sub, parent, parent_ctx,
2116 child, leader, child_ctx))
2117 return -ENOMEM;
2118 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01002119 return 0;
2120}
2121
Paul Mackerrasd859e292009-01-17 18:10:22 +11002122static void sync_child_counter(struct perf_counter *child_counter,
2123 struct perf_counter *parent_counter)
2124{
2125 u64 parent_val, child_val;
2126
2127 parent_val = atomic64_read(&parent_counter->count);
2128 child_val = atomic64_read(&child_counter->count);
2129
2130 /*
2131 * Add back the child's count to the parent's count:
2132 */
2133 atomic64_add(child_val, &parent_counter->count);
2134
2135 /*
2136 * Remove this counter from the parent's list
2137 */
2138 mutex_lock(&parent_counter->mutex);
2139 list_del_init(&child_counter->child_list);
2140 mutex_unlock(&parent_counter->mutex);
2141
2142 /*
2143 * Release the parent counter, if this was the last
2144 * reference to it.
2145 */
2146 fput(parent_counter->filp);
2147}
2148
Ingo Molnar9b51f662008-12-12 13:49:45 +01002149static void
2150__perf_counter_exit_task(struct task_struct *child,
2151 struct perf_counter *child_counter,
2152 struct perf_counter_context *child_ctx)
2153{
2154 struct perf_counter *parent_counter;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002155 struct perf_counter *sub, *tmp;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002156
2157 /*
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002158 * If we do not self-reap then we have to wait for the
2159 * child task to unschedule (it will happen for sure),
2160 * so that its counter is at its final count. (This
2161 * condition triggers rarely - child tasks usually get
2162 * off their CPU before the parent has a chance to
2163 * get this far into the reaping action)
Ingo Molnar9b51f662008-12-12 13:49:45 +01002164 */
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002165 if (child != current) {
2166 wait_task_inactive(child, 0);
2167 list_del_init(&child_counter->list_entry);
2168 } else {
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002169 struct perf_cpu_context *cpuctx;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002170 unsigned long flags;
2171 u64 perf_flags;
2172
2173 /*
2174 * Disable and unlink this counter.
2175 *
2176 * Be careful about zapping the list - IRQ/NMI context
2177 * could still be processing it:
2178 */
2179 curr_rq_lock_irq_save(&flags);
2180 perf_flags = hw_perf_save_disable();
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002181
2182 cpuctx = &__get_cpu_var(perf_cpu_context);
2183
Paul Mackerrasd859e292009-01-17 18:10:22 +11002184 group_sched_out(child_counter, cpuctx, child_ctx);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002185
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002186 list_del_init(&child_counter->list_entry);
2187
2188 child_ctx->nr_counters--;
2189
2190 hw_perf_restore(perf_flags);
2191 curr_rq_unlock_irq_restore(&flags);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002192 }
2193
Ingo Molnar9b51f662008-12-12 13:49:45 +01002194 parent_counter = child_counter->parent;
2195 /*
2196 * It can happen that parent exits first, and has counters
2197 * that are still around due to the child reference. These
2198 * counters need to be zapped - but otherwise linger.
2199 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11002200 if (parent_counter) {
2201 sync_child_counter(child_counter, parent_counter);
2202 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
2203 list_entry) {
Paul Mackerras4bcf3492009-02-11 13:53:19 +01002204 if (sub->parent) {
Paul Mackerrasd859e292009-01-17 18:10:22 +11002205 sync_child_counter(sub, sub->parent);
Peter Zijlstraf1600952009-03-19 20:26:16 +01002206 free_counter(sub);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01002207 }
Paul Mackerrasd859e292009-01-17 18:10:22 +11002208 }
Peter Zijlstraf1600952009-03-19 20:26:16 +01002209 free_counter(child_counter);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01002210 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01002211}
2212
2213/*
Paul Mackerrasd859e292009-01-17 18:10:22 +11002214 * When a child task exits, feed back counter values to parent counters.
Ingo Molnar9b51f662008-12-12 13:49:45 +01002215 *
Paul Mackerrasd859e292009-01-17 18:10:22 +11002216 * Note: we may be running in child context, but the PID is not hashed
Ingo Molnar9b51f662008-12-12 13:49:45 +01002217 * anymore so new counters will not be added.
2218 */
2219void perf_counter_exit_task(struct task_struct *child)
2220{
2221 struct perf_counter *child_counter, *tmp;
2222 struct perf_counter_context *child_ctx;
2223
2224 child_ctx = &child->perf_counter_ctx;
2225
2226 if (likely(!child_ctx->nr_counters))
2227 return;
2228
2229 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
2230 list_entry)
2231 __perf_counter_exit_task(child, child_counter, child_ctx);
2232}
2233
2234/*
2235 * Initialize the perf_counter context in task_struct
2236 */
2237void perf_counter_init_task(struct task_struct *child)
2238{
2239 struct perf_counter_context *child_ctx, *parent_ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002240 struct perf_counter *counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002241 struct task_struct *parent = current;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002242
2243 child_ctx = &child->perf_counter_ctx;
2244 parent_ctx = &parent->perf_counter_ctx;
2245
2246 __perf_counter_init_context(child_ctx, child);
2247
2248 /*
2249 * This is executed from the parent task context, so inherit
2250 * counters that have been marked for cloning:
2251 */
2252
2253 if (likely(!parent_ctx->nr_counters))
2254 return;
2255
2256 /*
2257 * Lock the parent list. No need to lock the child - not PID
2258 * hashed yet and not running, so nobody can access it.
2259 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11002260 mutex_lock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002261
2262 /*
2263 * We dont have to disable NMIs - we are only looking at
2264 * the list, not manipulating it:
2265 */
2266 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
Paul Mackerrasd859e292009-01-17 18:10:22 +11002267 if (!counter->hw_event.inherit)
Ingo Molnar9b51f662008-12-12 13:49:45 +01002268 continue;
2269
Paul Mackerrasd859e292009-01-17 18:10:22 +11002270 if (inherit_group(counter, parent,
Ingo Molnar9b51f662008-12-12 13:49:45 +01002271 parent_ctx, child, child_ctx))
2272 break;
2273 }
2274
Paul Mackerrasd859e292009-01-17 18:10:22 +11002275 mutex_unlock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002276}
2277
Ingo Molnar04289bb2008-12-11 08:38:42 +01002278static void __cpuinit perf_counter_init_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002279{
Ingo Molnar04289bb2008-12-11 08:38:42 +01002280 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002281
Ingo Molnar04289bb2008-12-11 08:38:42 +01002282 cpuctx = &per_cpu(perf_cpu_context, cpu);
2283 __perf_counter_init_context(&cpuctx->ctx, NULL);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002284
2285 mutex_lock(&perf_resource_mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002286 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002287 mutex_unlock(&perf_resource_mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002288
Paul Mackerras01d02872009-01-14 13:44:19 +11002289 hw_perf_counter_setup(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002290}
2291
2292#ifdef CONFIG_HOTPLUG_CPU
Ingo Molnar04289bb2008-12-11 08:38:42 +01002293static void __perf_counter_exit_cpu(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002294{
2295 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
2296 struct perf_counter_context *ctx = &cpuctx->ctx;
2297 struct perf_counter *counter, *tmp;
2298
Ingo Molnar04289bb2008-12-11 08:38:42 +01002299 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
2300 __perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002301}
Ingo Molnar04289bb2008-12-11 08:38:42 +01002302static void perf_counter_exit_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002303{
Paul Mackerrasd859e292009-01-17 18:10:22 +11002304 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
2305 struct perf_counter_context *ctx = &cpuctx->ctx;
2306
2307 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002308 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002309 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002310}
2311#else
Ingo Molnar04289bb2008-12-11 08:38:42 +01002312static inline void perf_counter_exit_cpu(int cpu) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +01002313#endif
2314
2315static int __cpuinit
2316perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
2317{
2318 unsigned int cpu = (long)hcpu;
2319
2320 switch (action) {
2321
2322 case CPU_UP_PREPARE:
2323 case CPU_UP_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01002324 perf_counter_init_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002325 break;
2326
2327 case CPU_DOWN_PREPARE:
2328 case CPU_DOWN_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01002329 perf_counter_exit_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002330 break;
2331
2332 default:
2333 break;
2334 }
2335
2336 return NOTIFY_OK;
2337}
2338
2339static struct notifier_block __cpuinitdata perf_cpu_nb = {
2340 .notifier_call = perf_cpu_notify,
2341};
2342
2343static int __init perf_counter_init(void)
2344{
2345 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
2346 (void *)(long)smp_processor_id());
2347 register_cpu_notifier(&perf_cpu_nb);
2348
2349 return 0;
2350}
2351early_initcall(perf_counter_init);
2352
2353static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
2354{
2355 return sprintf(buf, "%d\n", perf_reserved_percpu);
2356}
2357
2358static ssize_t
2359perf_set_reserve_percpu(struct sysdev_class *class,
2360 const char *buf,
2361 size_t count)
2362{
2363 struct perf_cpu_context *cpuctx;
2364 unsigned long val;
2365 int err, cpu, mpt;
2366
2367 err = strict_strtoul(buf, 10, &val);
2368 if (err)
2369 return err;
2370 if (val > perf_max_counters)
2371 return -EINVAL;
2372
2373 mutex_lock(&perf_resource_mutex);
2374 perf_reserved_percpu = val;
2375 for_each_online_cpu(cpu) {
2376 cpuctx = &per_cpu(perf_cpu_context, cpu);
2377 spin_lock_irq(&cpuctx->ctx.lock);
2378 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
2379 perf_max_counters - perf_reserved_percpu);
2380 cpuctx->max_pertask = mpt;
2381 spin_unlock_irq(&cpuctx->ctx.lock);
2382 }
2383 mutex_unlock(&perf_resource_mutex);
2384
2385 return count;
2386}
2387
2388static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
2389{
2390 return sprintf(buf, "%d\n", perf_overcommit);
2391}
2392
2393static ssize_t
2394perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
2395{
2396 unsigned long val;
2397 int err;
2398
2399 err = strict_strtoul(buf, 10, &val);
2400 if (err)
2401 return err;
2402 if (val > 1)
2403 return -EINVAL;
2404
2405 mutex_lock(&perf_resource_mutex);
2406 perf_overcommit = val;
2407 mutex_unlock(&perf_resource_mutex);
2408
2409 return count;
2410}
2411
2412static SYSDEV_CLASS_ATTR(
2413 reserve_percpu,
2414 0644,
2415 perf_show_reserve_percpu,
2416 perf_set_reserve_percpu
2417 );
2418
2419static SYSDEV_CLASS_ATTR(
2420 overcommit,
2421 0644,
2422 perf_show_overcommit,
2423 perf_set_overcommit
2424 );
2425
2426static struct attribute *perfclass_attrs[] = {
2427 &attr_reserve_percpu.attr,
2428 &attr_overcommit.attr,
2429 NULL
2430};
2431
2432static struct attribute_group perfclass_attr_group = {
2433 .attrs = perfclass_attrs,
2434 .name = "perf_counters",
2435};
2436
2437static int __init perf_counter_sysfs_init(void)
2438{
2439 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
2440 &perfclass_attr_group);
2441}
2442device_initcall(perf_counter_sysfs_init);