blob: ce34bff07bdaaf16fe8b79b5e0427fd0de13ee73 [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counter core code
3 *
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6 *
7 * For licencing details see kernel-base/COPYING
8 */
9
10#include <linux/fs.h>
11#include <linux/cpu.h>
12#include <linux/smp.h>
Ingo Molnar04289bb2008-12-11 08:38:42 +010013#include <linux/file.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010014#include <linux/poll.h>
15#include <linux/sysfs.h>
16#include <linux/ptrace.h>
17#include <linux/percpu.h>
18#include <linux/uaccess.h>
19#include <linux/syscalls.h>
20#include <linux/anon_inodes.h>
Ingo Molnaraa9c4c02008-12-17 14:10:57 +010021#include <linux/kernel_stat.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010022#include <linux/perf_counter.h>
Paul Mackerras23a185c2009-02-09 22:42:47 +110023#include <linux/mm.h>
24#include <linux/vmstat.h>
Peter Zijlstra592903c2009-03-13 12:21:36 +010025#include <linux/rculist.h>
Peter Zijlstra96f6d442009-03-23 18:22:07 +010026#include <linux/hardirq.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010027
Tim Blechmann4e193bd2009-03-14 14:29:25 +010028#include <asm/irq_regs.h>
29
Thomas Gleixner0793a612008-12-04 20:12:29 +010030/*
31 * Each CPU has a list of per CPU counters:
32 */
33DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
34
Ingo Molnar088e2852008-12-14 20:21:00 +010035int perf_max_counters __read_mostly = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +010036static int perf_reserved_percpu __read_mostly;
37static int perf_overcommit __read_mostly = 1;
38
39/*
40 * Mutex for (sysadmin-configurable) counter reservations:
41 */
42static DEFINE_MUTEX(perf_resource_mutex);
43
44/*
45 * Architecture provided APIs - weak aliases:
46 */
Ingo Molnar5c92d122008-12-11 13:21:10 +010047extern __weak const struct hw_perf_counter_ops *
Ingo Molnar621a01e2008-12-11 12:46:46 +010048hw_perf_counter_init(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +010049{
Paul Mackerrasff6f0542009-01-09 16:19:25 +110050 return NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +010051}
52
Ingo Molnar01b28382008-12-11 13:45:51 +010053u64 __weak hw_perf_save_disable(void) { return 0; }
Yinghai Lu01ea1cc2008-12-26 21:05:06 -080054void __weak hw_perf_restore(u64 ctrl) { barrier(); }
Paul Mackerras01d02872009-01-14 13:44:19 +110055void __weak hw_perf_counter_setup(int cpu) { barrier(); }
Paul Mackerras3cbed422009-01-09 16:43:42 +110056int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
57 struct perf_cpu_context *cpuctx,
58 struct perf_counter_context *ctx, int cpu)
59{
60 return 0;
61}
Thomas Gleixner0793a612008-12-04 20:12:29 +010062
Paul Mackerras4eb96fc2009-01-09 17:24:34 +110063void __weak perf_counter_print_debug(void) { }
64
Ingo Molnar04289bb2008-12-11 08:38:42 +010065static void
66list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
67{
68 struct perf_counter *group_leader = counter->group_leader;
69
70 /*
71 * Depending on whether it is a standalone or sibling counter,
72 * add it straight to the context's counter list, or to the group
73 * leader's sibling list:
74 */
75 if (counter->group_leader == counter)
76 list_add_tail(&counter->list_entry, &ctx->counter_list);
77 else
78 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
Peter Zijlstra592903c2009-03-13 12:21:36 +010079
80 list_add_rcu(&counter->event_entry, &ctx->event_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +010081}
82
83static void
84list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
85{
86 struct perf_counter *sibling, *tmp;
87
88 list_del_init(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +010089 list_del_rcu(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +010090
Ingo Molnar04289bb2008-12-11 08:38:42 +010091 /*
92 * If this was a group counter with sibling counters then
93 * upgrade the siblings to singleton counters by adding them
94 * to the context list directly:
95 */
96 list_for_each_entry_safe(sibling, tmp,
97 &counter->sibling_list, list_entry) {
98
Peter Zijlstra75564232009-03-13 12:21:29 +010099 list_move_tail(&sibling->list_entry, &ctx->counter_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100100 sibling->group_leader = sibling;
101 }
102}
103
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100104static void
105counter_sched_out(struct perf_counter *counter,
106 struct perf_cpu_context *cpuctx,
107 struct perf_counter_context *ctx)
108{
109 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
110 return;
111
112 counter->state = PERF_COUNTER_STATE_INACTIVE;
113 counter->hw_ops->disable(counter);
114 counter->oncpu = -1;
115
116 if (!is_software_counter(counter))
117 cpuctx->active_oncpu--;
118 ctx->nr_active--;
119 if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
120 cpuctx->exclusive = 0;
121}
122
Paul Mackerrasd859e292009-01-17 18:10:22 +1100123static void
124group_sched_out(struct perf_counter *group_counter,
125 struct perf_cpu_context *cpuctx,
126 struct perf_counter_context *ctx)
127{
128 struct perf_counter *counter;
129
130 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
131 return;
132
133 counter_sched_out(group_counter, cpuctx, ctx);
134
135 /*
136 * Schedule out siblings (if any):
137 */
138 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
139 counter_sched_out(counter, cpuctx, ctx);
140
141 if (group_counter->hw_event.exclusive)
142 cpuctx->exclusive = 0;
143}
144
Thomas Gleixner0793a612008-12-04 20:12:29 +0100145/*
146 * Cross CPU call to remove a performance counter
147 *
148 * We disable the counter on the hardware level first. After that we
149 * remove it from the context list.
150 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100151static void __perf_counter_remove_from_context(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100152{
153 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
154 struct perf_counter *counter = info;
155 struct perf_counter_context *ctx = counter->ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +0100156 unsigned long flags;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100157 u64 perf_flags;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100158
159 /*
160 * If this is a task context, we need to check whether it is
161 * the current task context of this cpu. If not it has been
162 * scheduled out before the smp call arrived.
163 */
164 if (ctx->task && cpuctx->task_ctx != ctx)
165 return;
166
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100167 curr_rq_lock_irq_save(&flags);
168 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100169
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100170 counter_sched_out(counter, cpuctx, ctx);
171
172 counter->task = NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100173 ctx->nr_counters--;
174
175 /*
176 * Protect the list operation against NMI by disabling the
177 * counters on a global level. NOP for non NMI based counters.
178 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100179 perf_flags = hw_perf_save_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +0100180 list_del_counter(counter, ctx);
Ingo Molnar01b28382008-12-11 13:45:51 +0100181 hw_perf_restore(perf_flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100182
183 if (!ctx->task) {
184 /*
185 * Allow more per task counters with respect to the
186 * reservation:
187 */
188 cpuctx->max_pertask =
189 min(perf_max_counters - ctx->nr_counters,
190 perf_max_counters - perf_reserved_percpu);
191 }
192
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100193 spin_unlock(&ctx->lock);
194 curr_rq_unlock_irq_restore(&flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100195}
196
197
198/*
199 * Remove the counter from a task's (or a CPU's) list of counters.
200 *
Paul Mackerrasd859e292009-01-17 18:10:22 +1100201 * Must be called with counter->mutex and ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100202 *
203 * CPU counters are removed with a smp call. For task counters we only
204 * call when the task is on a CPU.
205 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100206static void perf_counter_remove_from_context(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100207{
208 struct perf_counter_context *ctx = counter->ctx;
209 struct task_struct *task = ctx->task;
210
211 if (!task) {
212 /*
213 * Per cpu counters are removed via an smp call and
214 * the removal is always sucessful.
215 */
216 smp_call_function_single(counter->cpu,
Ingo Molnar04289bb2008-12-11 08:38:42 +0100217 __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100218 counter, 1);
219 return;
220 }
221
222retry:
Ingo Molnar04289bb2008-12-11 08:38:42 +0100223 task_oncpu_function_call(task, __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100224 counter);
225
226 spin_lock_irq(&ctx->lock);
227 /*
228 * If the context is active we need to retry the smp call.
229 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100230 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100231 spin_unlock_irq(&ctx->lock);
232 goto retry;
233 }
234
235 /*
236 * The lock prevents that this context is scheduled in so we
Ingo Molnar04289bb2008-12-11 08:38:42 +0100237 * can remove the counter safely, if the call above did not
Thomas Gleixner0793a612008-12-04 20:12:29 +0100238 * succeed.
239 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100240 if (!list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100241 ctx->nr_counters--;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100242 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100243 counter->task = NULL;
244 }
245 spin_unlock_irq(&ctx->lock);
246}
247
Paul Mackerrasd859e292009-01-17 18:10:22 +1100248/*
249 * Cross CPU call to disable a performance counter
250 */
251static void __perf_counter_disable(void *info)
252{
253 struct perf_counter *counter = info;
254 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
255 struct perf_counter_context *ctx = counter->ctx;
256 unsigned long flags;
257
258 /*
259 * If this is a per-task counter, need to check whether this
260 * counter's task is the current task on this cpu.
261 */
262 if (ctx->task && cpuctx->task_ctx != ctx)
263 return;
264
265 curr_rq_lock_irq_save(&flags);
266 spin_lock(&ctx->lock);
267
268 /*
269 * If the counter is on, turn it off.
270 * If it is in error state, leave it in error state.
271 */
272 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
273 if (counter == counter->group_leader)
274 group_sched_out(counter, cpuctx, ctx);
275 else
276 counter_sched_out(counter, cpuctx, ctx);
277 counter->state = PERF_COUNTER_STATE_OFF;
278 }
279
280 spin_unlock(&ctx->lock);
281 curr_rq_unlock_irq_restore(&flags);
282}
283
284/*
285 * Disable a counter.
286 */
287static void perf_counter_disable(struct perf_counter *counter)
288{
289 struct perf_counter_context *ctx = counter->ctx;
290 struct task_struct *task = ctx->task;
291
292 if (!task) {
293 /*
294 * Disable the counter on the cpu that it's on
295 */
296 smp_call_function_single(counter->cpu, __perf_counter_disable,
297 counter, 1);
298 return;
299 }
300
301 retry:
302 task_oncpu_function_call(task, __perf_counter_disable, counter);
303
304 spin_lock_irq(&ctx->lock);
305 /*
306 * If the counter is still active, we need to retry the cross-call.
307 */
308 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
309 spin_unlock_irq(&ctx->lock);
310 goto retry;
311 }
312
313 /*
314 * Since we have the lock this context can't be scheduled
315 * in, so we can change the state safely.
316 */
317 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
318 counter->state = PERF_COUNTER_STATE_OFF;
319
320 spin_unlock_irq(&ctx->lock);
321}
322
323/*
324 * Disable a counter and all its children.
325 */
326static void perf_counter_disable_family(struct perf_counter *counter)
327{
328 struct perf_counter *child;
329
330 perf_counter_disable(counter);
331
332 /*
333 * Lock the mutex to protect the list of children
334 */
335 mutex_lock(&counter->mutex);
336 list_for_each_entry(child, &counter->child_list, child_list)
337 perf_counter_disable(child);
338 mutex_unlock(&counter->mutex);
339}
340
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100341static int
342counter_sched_in(struct perf_counter *counter,
343 struct perf_cpu_context *cpuctx,
344 struct perf_counter_context *ctx,
345 int cpu)
346{
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100347 if (counter->state <= PERF_COUNTER_STATE_OFF)
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100348 return 0;
349
350 counter->state = PERF_COUNTER_STATE_ACTIVE;
351 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
352 /*
353 * The new state must be visible before we turn it on in the hardware:
354 */
355 smp_wmb();
356
357 if (counter->hw_ops->enable(counter)) {
358 counter->state = PERF_COUNTER_STATE_INACTIVE;
359 counter->oncpu = -1;
360 return -EAGAIN;
361 }
362
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100363 if (!is_software_counter(counter))
364 cpuctx->active_oncpu++;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100365 ctx->nr_active++;
366
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100367 if (counter->hw_event.exclusive)
368 cpuctx->exclusive = 1;
369
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100370 return 0;
371}
372
Thomas Gleixner0793a612008-12-04 20:12:29 +0100373/*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100374 * Return 1 for a group consisting entirely of software counters,
375 * 0 if the group contains any hardware counters.
376 */
377static int is_software_only_group(struct perf_counter *leader)
378{
379 struct perf_counter *counter;
380
381 if (!is_software_counter(leader))
382 return 0;
383 list_for_each_entry(counter, &leader->sibling_list, list_entry)
384 if (!is_software_counter(counter))
385 return 0;
386 return 1;
387}
388
389/*
390 * Work out whether we can put this counter group on the CPU now.
391 */
392static int group_can_go_on(struct perf_counter *counter,
393 struct perf_cpu_context *cpuctx,
394 int can_add_hw)
395{
396 /*
397 * Groups consisting entirely of software counters can always go on.
398 */
399 if (is_software_only_group(counter))
400 return 1;
401 /*
402 * If an exclusive group is already on, no other hardware
403 * counters can go on.
404 */
405 if (cpuctx->exclusive)
406 return 0;
407 /*
408 * If this group is exclusive and there are already
409 * counters on the CPU, it can't go on.
410 */
411 if (counter->hw_event.exclusive && cpuctx->active_oncpu)
412 return 0;
413 /*
414 * Otherwise, try to add it if all previous groups were able
415 * to go on.
416 */
417 return can_add_hw;
418}
419
420/*
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100421 * Cross CPU call to install and enable a performance counter
Thomas Gleixner0793a612008-12-04 20:12:29 +0100422 */
423static void __perf_install_in_context(void *info)
424{
425 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
426 struct perf_counter *counter = info;
427 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100428 struct perf_counter *leader = counter->group_leader;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100429 int cpu = smp_processor_id();
Ingo Molnar9b51f662008-12-12 13:49:45 +0100430 unsigned long flags;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100431 u64 perf_flags;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100432 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100433
434 /*
435 * If this is a task context, we need to check whether it is
436 * the current task context of this cpu. If not it has been
437 * scheduled out before the smp call arrived.
438 */
439 if (ctx->task && cpuctx->task_ctx != ctx)
440 return;
441
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100442 curr_rq_lock_irq_save(&flags);
443 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100444
445 /*
446 * Protect the list operation against NMI by disabling the
447 * counters on a global level. NOP for non NMI based counters.
448 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100449 perf_flags = hw_perf_save_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100450
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100451 list_add_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100452 ctx->nr_counters++;
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100453 counter->prev_state = PERF_COUNTER_STATE_OFF;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100454
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100455 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100456 * Don't put the counter on if it is disabled or if
457 * it is in a group and the group isn't on.
458 */
459 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
460 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
461 goto unlock;
462
463 /*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100464 * An exclusive counter can't go on if there are already active
465 * hardware counters, and no hardware counter can go on if there
466 * is already an exclusive counter on.
467 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100468 if (!group_can_go_on(counter, cpuctx, 1))
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100469 err = -EEXIST;
470 else
471 err = counter_sched_in(counter, cpuctx, ctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100472
Paul Mackerrasd859e292009-01-17 18:10:22 +1100473 if (err) {
474 /*
475 * This counter couldn't go on. If it is in a group
476 * then we have to pull the whole group off.
477 * If the counter group is pinned then put it in error state.
478 */
479 if (leader != counter)
480 group_sched_out(leader, cpuctx, ctx);
481 if (leader->hw_event.pinned)
482 leader->state = PERF_COUNTER_STATE_ERROR;
483 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100484
485 if (!err && !ctx->task && cpuctx->max_pertask)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100486 cpuctx->max_pertask--;
487
Paul Mackerrasd859e292009-01-17 18:10:22 +1100488 unlock:
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100489 hw_perf_restore(perf_flags);
490
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100491 spin_unlock(&ctx->lock);
492 curr_rq_unlock_irq_restore(&flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100493}
494
495/*
496 * Attach a performance counter to a context
497 *
498 * First we add the counter to the list with the hardware enable bit
499 * in counter->hw_config cleared.
500 *
501 * If the counter is attached to a task which is on a CPU we use a smp
502 * call to enable it in the task context. The task might have been
503 * scheduled away, but we check this in the smp call again.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100504 *
505 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100506 */
507static void
508perf_install_in_context(struct perf_counter_context *ctx,
509 struct perf_counter *counter,
510 int cpu)
511{
512 struct task_struct *task = ctx->task;
513
Thomas Gleixner0793a612008-12-04 20:12:29 +0100514 if (!task) {
515 /*
516 * Per cpu counters are installed via an smp call and
517 * the install is always sucessful.
518 */
519 smp_call_function_single(cpu, __perf_install_in_context,
520 counter, 1);
521 return;
522 }
523
524 counter->task = task;
525retry:
526 task_oncpu_function_call(task, __perf_install_in_context,
527 counter);
528
529 spin_lock_irq(&ctx->lock);
530 /*
Thomas Gleixner0793a612008-12-04 20:12:29 +0100531 * we need to retry the smp call.
532 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100533 if (ctx->is_active && list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100534 spin_unlock_irq(&ctx->lock);
535 goto retry;
536 }
537
538 /*
539 * The lock prevents that this context is scheduled in so we
540 * can add the counter safely, if it the call above did not
541 * succeed.
542 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100543 if (list_empty(&counter->list_entry)) {
544 list_add_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100545 ctx->nr_counters++;
546 }
547 spin_unlock_irq(&ctx->lock);
548}
549
Paul Mackerrasd859e292009-01-17 18:10:22 +1100550/*
551 * Cross CPU call to enable a performance counter
552 */
553static void __perf_counter_enable(void *info)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100554{
Paul Mackerrasd859e292009-01-17 18:10:22 +1100555 struct perf_counter *counter = info;
556 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
557 struct perf_counter_context *ctx = counter->ctx;
558 struct perf_counter *leader = counter->group_leader;
559 unsigned long flags;
560 int err;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100561
562 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100563 * If this is a per-task counter, need to check whether this
564 * counter's task is the current task on this cpu.
Ingo Molnar04289bb2008-12-11 08:38:42 +0100565 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100566 if (ctx->task && cpuctx->task_ctx != ctx)
567 return;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100568
Paul Mackerrasd859e292009-01-17 18:10:22 +1100569 curr_rq_lock_irq_save(&flags);
570 spin_lock(&ctx->lock);
571
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100572 counter->prev_state = counter->state;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100573 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
574 goto unlock;
575 counter->state = PERF_COUNTER_STATE_INACTIVE;
576
577 /*
578 * If the counter is in a group and isn't the group leader,
579 * then don't put it on unless the group is on.
580 */
581 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
582 goto unlock;
583
584 if (!group_can_go_on(counter, cpuctx, 1))
585 err = -EEXIST;
586 else
587 err = counter_sched_in(counter, cpuctx, ctx,
588 smp_processor_id());
589
590 if (err) {
591 /*
592 * If this counter can't go on and it's part of a
593 * group, then the whole group has to come off.
594 */
595 if (leader != counter)
596 group_sched_out(leader, cpuctx, ctx);
597 if (leader->hw_event.pinned)
598 leader->state = PERF_COUNTER_STATE_ERROR;
599 }
600
601 unlock:
602 spin_unlock(&ctx->lock);
603 curr_rq_unlock_irq_restore(&flags);
604}
605
606/*
607 * Enable a counter.
608 */
609static void perf_counter_enable(struct perf_counter *counter)
610{
611 struct perf_counter_context *ctx = counter->ctx;
612 struct task_struct *task = ctx->task;
613
614 if (!task) {
615 /*
616 * Enable the counter on the cpu that it's on
617 */
618 smp_call_function_single(counter->cpu, __perf_counter_enable,
619 counter, 1);
620 return;
621 }
622
623 spin_lock_irq(&ctx->lock);
624 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
625 goto out;
626
627 /*
628 * If the counter is in error state, clear that first.
629 * That way, if we see the counter in error state below, we
630 * know that it has gone back into error state, as distinct
631 * from the task having been scheduled away before the
632 * cross-call arrived.
633 */
634 if (counter->state == PERF_COUNTER_STATE_ERROR)
635 counter->state = PERF_COUNTER_STATE_OFF;
636
637 retry:
638 spin_unlock_irq(&ctx->lock);
639 task_oncpu_function_call(task, __perf_counter_enable, counter);
640
641 spin_lock_irq(&ctx->lock);
642
643 /*
644 * If the context is active and the counter is still off,
645 * we need to retry the cross-call.
646 */
647 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
648 goto retry;
649
650 /*
651 * Since we have the lock this context can't be scheduled
652 * in, so we can change the state safely.
653 */
654 if (counter->state == PERF_COUNTER_STATE_OFF)
655 counter->state = PERF_COUNTER_STATE_INACTIVE;
656 out:
657 spin_unlock_irq(&ctx->lock);
658}
659
660/*
661 * Enable a counter and all its children.
662 */
663static void perf_counter_enable_family(struct perf_counter *counter)
664{
665 struct perf_counter *child;
666
667 perf_counter_enable(counter);
668
669 /*
670 * Lock the mutex to protect the list of children
671 */
672 mutex_lock(&counter->mutex);
673 list_for_each_entry(child, &counter->child_list, child_list)
674 perf_counter_enable(child);
675 mutex_unlock(&counter->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100676}
677
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100678void __perf_counter_sched_out(struct perf_counter_context *ctx,
679 struct perf_cpu_context *cpuctx)
680{
681 struct perf_counter *counter;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100682 u64 flags;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100683
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100684 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100685 ctx->is_active = 0;
686 if (likely(!ctx->nr_counters))
687 goto out;
688
Paul Mackerras3cbed422009-01-09 16:43:42 +1100689 flags = hw_perf_save_disable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100690 if (ctx->nr_active) {
691 list_for_each_entry(counter, &ctx->counter_list, list_entry)
692 group_sched_out(counter, cpuctx, ctx);
693 }
Paul Mackerras3cbed422009-01-09 16:43:42 +1100694 hw_perf_restore(flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100695 out:
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100696 spin_unlock(&ctx->lock);
697}
698
Thomas Gleixner0793a612008-12-04 20:12:29 +0100699/*
700 * Called from scheduler to remove the counters of the current task,
701 * with interrupts disabled.
702 *
703 * We stop each counter and update the counter value in counter->count.
704 *
Ingo Molnar76715812008-12-17 14:20:28 +0100705 * This does not protect us against NMI, but disable()
Thomas Gleixner0793a612008-12-04 20:12:29 +0100706 * sets the disabled bit in the control field of counter _before_
707 * accessing the counter control register. If a NMI hits, then it will
708 * not restart the counter.
709 */
710void perf_counter_task_sched_out(struct task_struct *task, int cpu)
711{
712 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
713 struct perf_counter_context *ctx = &task->perf_counter_ctx;
Peter Zijlstra4a0deca2009-03-19 20:26:12 +0100714 struct pt_regs *regs;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100715
716 if (likely(!cpuctx->task_ctx))
717 return;
718
Peter Zijlstra4a0deca2009-03-19 20:26:12 +0100719 regs = task_pt_regs(task);
720 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100721 __perf_counter_sched_out(ctx, cpuctx);
722
Thomas Gleixner0793a612008-12-04 20:12:29 +0100723 cpuctx->task_ctx = NULL;
724}
725
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100726static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100727{
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100728 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100729}
730
Ingo Molnar79958882008-12-17 08:54:56 +0100731static int
Ingo Molnar04289bb2008-12-11 08:38:42 +0100732group_sched_in(struct perf_counter *group_counter,
733 struct perf_cpu_context *cpuctx,
734 struct perf_counter_context *ctx,
735 int cpu)
736{
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100737 struct perf_counter *counter, *partial_group;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100738 int ret;
739
740 if (group_counter->state == PERF_COUNTER_STATE_OFF)
741 return 0;
742
743 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
744 if (ret)
745 return ret < 0 ? ret : 0;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100746
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100747 group_counter->prev_state = group_counter->state;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100748 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
749 return -EAGAIN;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100750
751 /*
752 * Schedule in siblings as one group (if any):
753 */
Ingo Molnar79958882008-12-17 08:54:56 +0100754 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100755 counter->prev_state = counter->state;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100756 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
757 partial_group = counter;
758 goto group_error;
759 }
Ingo Molnar79958882008-12-17 08:54:56 +0100760 }
761
Paul Mackerras3cbed422009-01-09 16:43:42 +1100762 return 0;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100763
764group_error:
765 /*
766 * Groups can be scheduled in as one unit only, so undo any
767 * partial group before returning:
768 */
769 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
770 if (counter == partial_group)
771 break;
772 counter_sched_out(counter, cpuctx, ctx);
773 }
774 counter_sched_out(group_counter, cpuctx, ctx);
775
776 return -EAGAIN;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100777}
778
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100779static void
780__perf_counter_sched_in(struct perf_counter_context *ctx,
781 struct perf_cpu_context *cpuctx, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100782{
Thomas Gleixner0793a612008-12-04 20:12:29 +0100783 struct perf_counter *counter;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100784 u64 flags;
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +1100785 int can_add_hw = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100786
Thomas Gleixner0793a612008-12-04 20:12:29 +0100787 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100788 ctx->is_active = 1;
789 if (likely(!ctx->nr_counters))
790 goto out;
791
Paul Mackerras3cbed422009-01-09 16:43:42 +1100792 flags = hw_perf_save_disable();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100793
794 /*
795 * First go through the list and put on any pinned groups
796 * in order to give them the best chance of going on.
797 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100798 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100799 if (counter->state <= PERF_COUNTER_STATE_OFF ||
800 !counter->hw_event.pinned)
801 continue;
802 if (counter->cpu != -1 && counter->cpu != cpu)
803 continue;
804
805 if (group_can_go_on(counter, cpuctx, 1))
806 group_sched_in(counter, cpuctx, ctx, cpu);
807
808 /*
809 * If this pinned group hasn't been scheduled,
810 * put it in error state.
811 */
812 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
813 counter->state = PERF_COUNTER_STATE_ERROR;
814 }
815
816 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
817 /*
818 * Ignore counters in OFF or ERROR state, and
819 * ignore pinned counters since we did them already.
820 */
821 if (counter->state <= PERF_COUNTER_STATE_OFF ||
822 counter->hw_event.pinned)
823 continue;
824
Ingo Molnar04289bb2008-12-11 08:38:42 +0100825 /*
826 * Listen to the 'cpu' scheduling filter constraint
827 * of counters:
828 */
Thomas Gleixner0793a612008-12-04 20:12:29 +0100829 if (counter->cpu != -1 && counter->cpu != cpu)
830 continue;
831
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100832 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +1100833 if (group_sched_in(counter, cpuctx, ctx, cpu))
834 can_add_hw = 0;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100835 }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100836 }
Paul Mackerras3cbed422009-01-09 16:43:42 +1100837 hw_perf_restore(flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100838 out:
Thomas Gleixner0793a612008-12-04 20:12:29 +0100839 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100840}
Ingo Molnar04289bb2008-12-11 08:38:42 +0100841
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100842/*
843 * Called from scheduler to add the counters of the current task
844 * with interrupts disabled.
845 *
846 * We restore the counter value and then enable it.
847 *
848 * This does not protect us against NMI, but enable()
849 * sets the enabled bit in the control field of counter _before_
850 * accessing the counter control register. If a NMI hits, then it will
851 * keep the counter running.
852 */
853void perf_counter_task_sched_in(struct task_struct *task, int cpu)
854{
855 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
856 struct perf_counter_context *ctx = &task->perf_counter_ctx;
857
858 __perf_counter_sched_in(ctx, cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100859 cpuctx->task_ctx = ctx;
860}
861
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100862static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
863{
864 struct perf_counter_context *ctx = &cpuctx->ctx;
865
866 __perf_counter_sched_in(ctx, cpuctx, cpu);
867}
868
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100869int perf_counter_task_disable(void)
870{
871 struct task_struct *curr = current;
872 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
873 struct perf_counter *counter;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100874 unsigned long flags;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100875 u64 perf_flags;
876 int cpu;
877
878 if (likely(!ctx->nr_counters))
879 return 0;
880
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100881 curr_rq_lock_irq_save(&flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100882 cpu = smp_processor_id();
883
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100884 /* force the update of the task clock: */
885 __task_delta_exec(curr, 1);
886
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100887 perf_counter_task_sched_out(curr, cpu);
888
889 spin_lock(&ctx->lock);
890
891 /*
892 * Disable all the counters:
893 */
894 perf_flags = hw_perf_save_disable();
895
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100896 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
897 if (counter->state != PERF_COUNTER_STATE_ERROR)
898 counter->state = PERF_COUNTER_STATE_OFF;
899 }
Ingo Molnar9b51f662008-12-12 13:49:45 +0100900
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100901 hw_perf_restore(perf_flags);
902
903 spin_unlock(&ctx->lock);
904
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100905 curr_rq_unlock_irq_restore(&flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100906
907 return 0;
908}
909
910int perf_counter_task_enable(void)
911{
912 struct task_struct *curr = current;
913 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
914 struct perf_counter *counter;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100915 unsigned long flags;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100916 u64 perf_flags;
917 int cpu;
918
919 if (likely(!ctx->nr_counters))
920 return 0;
921
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100922 curr_rq_lock_irq_save(&flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100923 cpu = smp_processor_id();
924
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100925 /* force the update of the task clock: */
926 __task_delta_exec(curr, 1);
927
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100928 perf_counter_task_sched_out(curr, cpu);
929
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100930 spin_lock(&ctx->lock);
931
932 /*
933 * Disable all the counters:
934 */
935 perf_flags = hw_perf_save_disable();
936
937 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100938 if (counter->state > PERF_COUNTER_STATE_OFF)
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100939 continue;
Ingo Molnar6a930702008-12-11 15:17:03 +0100940 counter->state = PERF_COUNTER_STATE_INACTIVE;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100941 counter->hw_event.disabled = 0;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100942 }
943 hw_perf_restore(perf_flags);
944
945 spin_unlock(&ctx->lock);
946
947 perf_counter_task_sched_in(curr, cpu);
948
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100949 curr_rq_unlock_irq_restore(&flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100950
951 return 0;
952}
953
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100954/*
955 * Round-robin a context's counters:
956 */
957static void rotate_ctx(struct perf_counter_context *ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100958{
Thomas Gleixner0793a612008-12-04 20:12:29 +0100959 struct perf_counter *counter;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100960 u64 perf_flags;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100961
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100962 if (!ctx->nr_counters)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100963 return;
964
Thomas Gleixner0793a612008-12-04 20:12:29 +0100965 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100966 /*
Ingo Molnar04289bb2008-12-11 08:38:42 +0100967 * Rotate the first entry last (works just fine for group counters too):
Thomas Gleixner0793a612008-12-04 20:12:29 +0100968 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100969 perf_flags = hw_perf_save_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +0100970 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Peter Zijlstra75564232009-03-13 12:21:29 +0100971 list_move_tail(&counter->list_entry, &ctx->counter_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100972 break;
973 }
Ingo Molnar01b28382008-12-11 13:45:51 +0100974 hw_perf_restore(perf_flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100975
976 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100977}
Thomas Gleixner0793a612008-12-04 20:12:29 +0100978
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100979void perf_counter_task_tick(struct task_struct *curr, int cpu)
980{
981 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
982 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
983 const int rotate_percpu = 0;
984
985 if (rotate_percpu)
986 perf_counter_cpu_sched_out(cpuctx);
987 perf_counter_task_sched_out(curr, cpu);
988
989 if (rotate_percpu)
990 rotate_ctx(&cpuctx->ctx);
991 rotate_ctx(ctx);
992
993 if (rotate_percpu)
994 perf_counter_cpu_sched_in(cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100995 perf_counter_task_sched_in(curr, cpu);
996}
997
998/*
Thomas Gleixner0793a612008-12-04 20:12:29 +0100999 * Cross CPU call to read the hardware counter
1000 */
Ingo Molnar76715812008-12-17 14:20:28 +01001001static void __read(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001002{
Ingo Molnar621a01e2008-12-11 12:46:46 +01001003 struct perf_counter *counter = info;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001004 unsigned long flags;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001005
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001006 curr_rq_lock_irq_save(&flags);
Ingo Molnar76715812008-12-17 14:20:28 +01001007 counter->hw_ops->read(counter);
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001008 curr_rq_unlock_irq_restore(&flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001009}
1010
Ingo Molnar04289bb2008-12-11 08:38:42 +01001011static u64 perf_counter_read(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001012{
1013 /*
1014 * If counter is enabled and currently active on a CPU, update the
1015 * value in the counter structure:
1016 */
Ingo Molnar6a930702008-12-11 15:17:03 +01001017 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001018 smp_call_function_single(counter->oncpu,
Ingo Molnar76715812008-12-17 14:20:28 +01001019 __read, counter, 1);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001020 }
1021
Ingo Molnaree060942008-12-13 09:00:03 +01001022 return atomic64_read(&counter->count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001023}
1024
1025/*
1026 * Cross CPU call to switch performance data pointers
1027 */
1028static void __perf_switch_irq_data(void *info)
1029{
1030 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1031 struct perf_counter *counter = info;
1032 struct perf_counter_context *ctx = counter->ctx;
1033 struct perf_data *oldirqdata = counter->irqdata;
1034
1035 /*
1036 * If this is a task context, we need to check whether it is
1037 * the current task context of this cpu. If not it has been
1038 * scheduled out before the smp call arrived.
1039 */
1040 if (ctx->task) {
1041 if (cpuctx->task_ctx != ctx)
1042 return;
1043 spin_lock(&ctx->lock);
1044 }
1045
1046 /* Change the pointer NMI safe */
1047 atomic_long_set((atomic_long_t *)&counter->irqdata,
1048 (unsigned long) counter->usrdata);
1049 counter->usrdata = oldirqdata;
1050
1051 if (ctx->task)
1052 spin_unlock(&ctx->lock);
1053}
1054
1055static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
1056{
1057 struct perf_counter_context *ctx = counter->ctx;
1058 struct perf_data *oldirqdata = counter->irqdata;
1059 struct task_struct *task = ctx->task;
1060
1061 if (!task) {
1062 smp_call_function_single(counter->cpu,
1063 __perf_switch_irq_data,
1064 counter, 1);
1065 return counter->usrdata;
1066 }
1067
1068retry:
1069 spin_lock_irq(&ctx->lock);
Ingo Molnar6a930702008-12-11 15:17:03 +01001070 if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001071 counter->irqdata = counter->usrdata;
1072 counter->usrdata = oldirqdata;
1073 spin_unlock_irq(&ctx->lock);
1074 return oldirqdata;
1075 }
1076 spin_unlock_irq(&ctx->lock);
1077 task_oncpu_function_call(task, __perf_switch_irq_data, counter);
1078 /* Might have failed, because task was scheduled out */
1079 if (counter->irqdata == oldirqdata)
1080 goto retry;
1081
1082 return counter->usrdata;
1083}
1084
1085static void put_context(struct perf_counter_context *ctx)
1086{
1087 if (ctx->task)
1088 put_task_struct(ctx->task);
1089}
1090
1091static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1092{
1093 struct perf_cpu_context *cpuctx;
1094 struct perf_counter_context *ctx;
1095 struct task_struct *task;
1096
1097 /*
1098 * If cpu is not a wildcard then this is a percpu counter:
1099 */
1100 if (cpu != -1) {
1101 /* Must be root to operate on a CPU counter: */
1102 if (!capable(CAP_SYS_ADMIN))
1103 return ERR_PTR(-EACCES);
1104
1105 if (cpu < 0 || cpu > num_possible_cpus())
1106 return ERR_PTR(-EINVAL);
1107
1108 /*
1109 * We could be clever and allow to attach a counter to an
1110 * offline CPU and activate it when the CPU comes up, but
1111 * that's for later.
1112 */
1113 if (!cpu_isset(cpu, cpu_online_map))
1114 return ERR_PTR(-ENODEV);
1115
1116 cpuctx = &per_cpu(perf_cpu_context, cpu);
1117 ctx = &cpuctx->ctx;
1118
Thomas Gleixner0793a612008-12-04 20:12:29 +01001119 return ctx;
1120 }
1121
1122 rcu_read_lock();
1123 if (!pid)
1124 task = current;
1125 else
1126 task = find_task_by_vpid(pid);
1127 if (task)
1128 get_task_struct(task);
1129 rcu_read_unlock();
1130
1131 if (!task)
1132 return ERR_PTR(-ESRCH);
1133
1134 ctx = &task->perf_counter_ctx;
1135 ctx->task = task;
1136
1137 /* Reuse ptrace permission checks for now. */
1138 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1139 put_context(ctx);
1140 return ERR_PTR(-EACCES);
1141 }
1142
1143 return ctx;
1144}
1145
Peter Zijlstra592903c2009-03-13 12:21:36 +01001146static void free_counter_rcu(struct rcu_head *head)
1147{
1148 struct perf_counter *counter;
1149
1150 counter = container_of(head, struct perf_counter, rcu_head);
1151 kfree(counter);
1152}
1153
Peter Zijlstraf1600952009-03-19 20:26:16 +01001154static void free_counter(struct perf_counter *counter)
1155{
Peter Zijlstrae077df42009-03-19 20:26:17 +01001156 if (counter->destroy)
1157 counter->destroy(counter);
1158
Peter Zijlstraf1600952009-03-19 20:26:16 +01001159 call_rcu(&counter->rcu_head, free_counter_rcu);
1160}
1161
Thomas Gleixner0793a612008-12-04 20:12:29 +01001162/*
1163 * Called when the last reference to the file is gone.
1164 */
1165static int perf_release(struct inode *inode, struct file *file)
1166{
1167 struct perf_counter *counter = file->private_data;
1168 struct perf_counter_context *ctx = counter->ctx;
1169
1170 file->private_data = NULL;
1171
Paul Mackerrasd859e292009-01-17 18:10:22 +11001172 mutex_lock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001173 mutex_lock(&counter->mutex);
1174
Ingo Molnar04289bb2008-12-11 08:38:42 +01001175 perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001176
1177 mutex_unlock(&counter->mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001178 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001179
Peter Zijlstraf1600952009-03-19 20:26:16 +01001180 free_counter(counter);
Mike Galbraith5af75912009-02-11 10:53:37 +01001181 put_context(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001182
1183 return 0;
1184}
1185
1186/*
1187 * Read the performance counter - simple non blocking version for now
1188 */
1189static ssize_t
1190perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1191{
1192 u64 cntval;
1193
1194 if (count != sizeof(cntval))
1195 return -EINVAL;
1196
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001197 /*
1198 * Return end-of-file for a read on a counter that is in
1199 * error state (i.e. because it was pinned but it couldn't be
1200 * scheduled on to the CPU at some point).
1201 */
1202 if (counter->state == PERF_COUNTER_STATE_ERROR)
1203 return 0;
1204
Thomas Gleixner0793a612008-12-04 20:12:29 +01001205 mutex_lock(&counter->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001206 cntval = perf_counter_read(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001207 mutex_unlock(&counter->mutex);
1208
1209 return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
1210}
1211
1212static ssize_t
1213perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
1214{
1215 if (!usrdata->len)
1216 return 0;
1217
1218 count = min(count, (size_t)usrdata->len);
1219 if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
1220 return -EFAULT;
1221
1222 /* Adjust the counters */
1223 usrdata->len -= count;
1224 if (!usrdata->len)
1225 usrdata->rd_idx = 0;
1226 else
1227 usrdata->rd_idx += count;
1228
1229 return count;
1230}
1231
1232static ssize_t
1233perf_read_irq_data(struct perf_counter *counter,
1234 char __user *buf,
1235 size_t count,
1236 int nonblocking)
1237{
1238 struct perf_data *irqdata, *usrdata;
1239 DECLARE_WAITQUEUE(wait, current);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001240 ssize_t res, res2;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001241
1242 irqdata = counter->irqdata;
1243 usrdata = counter->usrdata;
1244
1245 if (usrdata->len + irqdata->len >= count)
1246 goto read_pending;
1247
1248 if (nonblocking)
1249 return -EAGAIN;
1250
1251 spin_lock_irq(&counter->waitq.lock);
1252 __add_wait_queue(&counter->waitq, &wait);
1253 for (;;) {
1254 set_current_state(TASK_INTERRUPTIBLE);
1255 if (usrdata->len + irqdata->len >= count)
1256 break;
1257
1258 if (signal_pending(current))
1259 break;
1260
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001261 if (counter->state == PERF_COUNTER_STATE_ERROR)
1262 break;
1263
Thomas Gleixner0793a612008-12-04 20:12:29 +01001264 spin_unlock_irq(&counter->waitq.lock);
1265 schedule();
1266 spin_lock_irq(&counter->waitq.lock);
1267 }
1268 __remove_wait_queue(&counter->waitq, &wait);
1269 __set_current_state(TASK_RUNNING);
1270 spin_unlock_irq(&counter->waitq.lock);
1271
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001272 if (usrdata->len + irqdata->len < count &&
1273 counter->state != PERF_COUNTER_STATE_ERROR)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001274 return -ERESTARTSYS;
1275read_pending:
1276 mutex_lock(&counter->mutex);
1277
1278 /* Drain pending data first: */
1279 res = perf_copy_usrdata(usrdata, buf, count);
1280 if (res < 0 || res == count)
1281 goto out;
1282
1283 /* Switch irq buffer: */
1284 usrdata = perf_switch_irq_data(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001285 res2 = perf_copy_usrdata(usrdata, buf + res, count - res);
1286 if (res2 < 0) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001287 if (!res)
1288 res = -EFAULT;
1289 } else {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001290 res += res2;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001291 }
1292out:
1293 mutex_unlock(&counter->mutex);
1294
1295 return res;
1296}
1297
1298static ssize_t
1299perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1300{
1301 struct perf_counter *counter = file->private_data;
1302
Ingo Molnar9f66a382008-12-10 12:33:23 +01001303 switch (counter->hw_event.record_type) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001304 case PERF_RECORD_SIMPLE:
1305 return perf_read_hw(counter, buf, count);
1306
1307 case PERF_RECORD_IRQ:
1308 case PERF_RECORD_GROUP:
1309 return perf_read_irq_data(counter, buf, count,
1310 file->f_flags & O_NONBLOCK);
1311 }
1312 return -EINVAL;
1313}
1314
1315static unsigned int perf_poll(struct file *file, poll_table *wait)
1316{
1317 struct perf_counter *counter = file->private_data;
1318 unsigned int events = 0;
1319 unsigned long flags;
1320
1321 poll_wait(file, &counter->waitq, wait);
1322
1323 spin_lock_irqsave(&counter->waitq.lock, flags);
1324 if (counter->usrdata->len || counter->irqdata->len)
1325 events |= POLLIN;
1326 spin_unlock_irqrestore(&counter->waitq.lock, flags);
1327
1328 return events;
1329}
1330
Paul Mackerrasd859e292009-01-17 18:10:22 +11001331static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1332{
1333 struct perf_counter *counter = file->private_data;
1334 int err = 0;
1335
1336 switch (cmd) {
1337 case PERF_COUNTER_IOC_ENABLE:
1338 perf_counter_enable_family(counter);
1339 break;
1340 case PERF_COUNTER_IOC_DISABLE:
1341 perf_counter_disable_family(counter);
1342 break;
1343 default:
1344 err = -ENOTTY;
1345 }
1346 return err;
1347}
1348
Thomas Gleixner0793a612008-12-04 20:12:29 +01001349static const struct file_operations perf_fops = {
1350 .release = perf_release,
1351 .read = perf_read,
1352 .poll = perf_poll,
Paul Mackerrasd859e292009-01-17 18:10:22 +11001353 .unlocked_ioctl = perf_ioctl,
1354 .compat_ioctl = perf_ioctl,
Thomas Gleixner0793a612008-12-04 20:12:29 +01001355};
1356
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001357/*
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001358 * Output
1359 */
1360
1361static void perf_counter_store_irq(struct perf_counter *counter, u64 data)
1362{
1363 struct perf_data *irqdata = counter->irqdata;
1364
1365 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
1366 irqdata->overrun++;
1367 } else {
1368 u64 *p = (u64 *) &irqdata->data[irqdata->len];
1369
1370 *p = data;
1371 irqdata->len += sizeof(u64);
1372 }
1373}
1374
1375static void perf_counter_handle_group(struct perf_counter *counter)
1376{
1377 struct perf_counter *leader, *sub;
1378
1379 leader = counter->group_leader;
1380 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1381 if (sub != counter)
1382 sub->hw_ops->read(sub);
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001383 perf_counter_store_irq(counter, sub->hw_event.config);
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001384 perf_counter_store_irq(counter, atomic64_read(&sub->count));
1385 }
1386}
1387
1388void perf_counter_output(struct perf_counter *counter,
1389 int nmi, struct pt_regs *regs)
1390{
1391 switch (counter->hw_event.record_type) {
1392 case PERF_RECORD_SIMPLE:
1393 return;
1394
1395 case PERF_RECORD_IRQ:
1396 perf_counter_store_irq(counter, instruction_pointer(regs));
1397 break;
1398
1399 case PERF_RECORD_GROUP:
1400 perf_counter_handle_group(counter);
1401 break;
1402 }
1403
1404 if (nmi) {
1405 counter->wakeup_pending = 1;
1406 set_perf_counter_pending();
1407 } else
1408 wake_up(&counter->waitq);
1409}
1410
1411/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001412 * Generic software counter infrastructure
1413 */
1414
1415static void perf_swcounter_update(struct perf_counter *counter)
1416{
1417 struct hw_perf_counter *hwc = &counter->hw;
1418 u64 prev, now;
1419 s64 delta;
1420
1421again:
1422 prev = atomic64_read(&hwc->prev_count);
1423 now = atomic64_read(&hwc->count);
1424 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
1425 goto again;
1426
1427 delta = now - prev;
1428
1429 atomic64_add(delta, &counter->count);
1430 atomic64_sub(delta, &hwc->period_left);
1431}
1432
1433static void perf_swcounter_set_period(struct perf_counter *counter)
1434{
1435 struct hw_perf_counter *hwc = &counter->hw;
1436 s64 left = atomic64_read(&hwc->period_left);
1437 s64 period = hwc->irq_period;
1438
1439 if (unlikely(left <= -period)) {
1440 left = period;
1441 atomic64_set(&hwc->period_left, left);
1442 }
1443
1444 if (unlikely(left <= 0)) {
1445 left += period;
1446 atomic64_add(period, &hwc->period_left);
1447 }
1448
1449 atomic64_set(&hwc->prev_count, -left);
1450 atomic64_set(&hwc->count, -left);
1451}
1452
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001453static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1454{
1455 struct perf_counter *counter;
1456 struct pt_regs *regs;
1457
1458 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
1459 counter->hw_ops->read(counter);
1460
1461 regs = get_irq_regs();
1462 /*
1463 * In case we exclude kernel IPs or are somehow not in interrupt
1464 * context, provide the next best thing, the user IP.
1465 */
1466 if ((counter->hw_event.exclude_kernel || !regs) &&
1467 !counter->hw_event.exclude_user)
1468 regs = task_pt_regs(current);
1469
1470 if (regs)
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001471 perf_counter_output(counter, 0, regs);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001472
1473 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
1474
1475 return HRTIMER_RESTART;
1476}
1477
1478static void perf_swcounter_overflow(struct perf_counter *counter,
1479 int nmi, struct pt_regs *regs)
1480{
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001481 perf_swcounter_update(counter);
1482 perf_swcounter_set_period(counter);
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001483 perf_counter_output(counter, nmi, regs);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001484}
1485
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001486static int perf_swcounter_match(struct perf_counter *counter,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001487 enum perf_event_types type,
1488 u32 event, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001489{
1490 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1491 return 0;
1492
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001493 if (perf_event_raw(&counter->hw_event))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001494 return 0;
1495
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001496 if (perf_event_type(&counter->hw_event) != type)
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001497 return 0;
1498
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001499 if (perf_event_id(&counter->hw_event) != event)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001500 return 0;
1501
1502 if (counter->hw_event.exclude_user && user_mode(regs))
1503 return 0;
1504
1505 if (counter->hw_event.exclude_kernel && !user_mode(regs))
1506 return 0;
1507
1508 return 1;
1509}
1510
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001511static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
1512 int nmi, struct pt_regs *regs)
1513{
1514 int neg = atomic64_add_negative(nr, &counter->hw.count);
1515 if (counter->hw.irq_period && !neg)
1516 perf_swcounter_overflow(counter, nmi, regs);
1517}
1518
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001519static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001520 enum perf_event_types type, u32 event,
1521 u64 nr, int nmi, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001522{
1523 struct perf_counter *counter;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001524
Peter Zijlstra01ef09d2009-03-19 20:26:11 +01001525 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001526 return;
1527
Peter Zijlstra592903c2009-03-13 12:21:36 +01001528 rcu_read_lock();
1529 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001530 if (perf_swcounter_match(counter, type, event, regs))
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001531 perf_swcounter_add(counter, nr, nmi, regs);
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001532 }
Peter Zijlstra592903c2009-03-13 12:21:36 +01001533 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001534}
1535
Peter Zijlstra96f6d442009-03-23 18:22:07 +01001536static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
1537{
1538 if (in_nmi())
1539 return &cpuctx->recursion[3];
1540
1541 if (in_irq())
1542 return &cpuctx->recursion[2];
1543
1544 if (in_softirq())
1545 return &cpuctx->recursion[1];
1546
1547 return &cpuctx->recursion[0];
1548}
1549
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001550static void __perf_swcounter_event(enum perf_event_types type, u32 event,
1551 u64 nr, int nmi, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001552{
1553 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
Peter Zijlstra96f6d442009-03-23 18:22:07 +01001554 int *recursion = perf_swcounter_recursion_context(cpuctx);
1555
1556 if (*recursion)
1557 goto out;
1558
1559 (*recursion)++;
1560 barrier();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001561
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001562 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
1563 if (cpuctx->task_ctx) {
1564 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
1565 nr, nmi, regs);
1566 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001567
Peter Zijlstra96f6d442009-03-23 18:22:07 +01001568 barrier();
1569 (*recursion)--;
1570
1571out:
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001572 put_cpu_var(perf_cpu_context);
1573}
1574
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001575void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs)
1576{
1577 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs);
1578}
1579
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001580static void perf_swcounter_read(struct perf_counter *counter)
1581{
1582 perf_swcounter_update(counter);
1583}
1584
1585static int perf_swcounter_enable(struct perf_counter *counter)
1586{
1587 perf_swcounter_set_period(counter);
1588 return 0;
1589}
1590
1591static void perf_swcounter_disable(struct perf_counter *counter)
1592{
1593 perf_swcounter_update(counter);
1594}
1595
Peter Zijlstraac17dc82009-03-13 12:21:34 +01001596static const struct hw_perf_counter_ops perf_ops_generic = {
1597 .enable = perf_swcounter_enable,
1598 .disable = perf_swcounter_disable,
1599 .read = perf_swcounter_read,
1600};
1601
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001602/*
1603 * Software counter: cpu wall time clock
1604 */
1605
Paul Mackerras9abf8a02009-01-09 16:26:43 +11001606static void cpu_clock_perf_counter_update(struct perf_counter *counter)
1607{
1608 int cpu = raw_smp_processor_id();
1609 s64 prev;
1610 u64 now;
1611
1612 now = cpu_clock(cpu);
1613 prev = atomic64_read(&counter->hw.prev_count);
1614 atomic64_set(&counter->hw.prev_count, now);
1615 atomic64_add(now - prev, &counter->count);
1616}
1617
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001618static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
1619{
1620 struct hw_perf_counter *hwc = &counter->hw;
1621 int cpu = raw_smp_processor_id();
1622
1623 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
Peter Zijlstra039fc912009-03-13 16:43:47 +01001624 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1625 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001626 if (hwc->irq_period) {
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001627 __hrtimer_start_range_ns(&hwc->hrtimer,
1628 ns_to_ktime(hwc->irq_period), 0,
1629 HRTIMER_MODE_REL, 0);
1630 }
1631
1632 return 0;
1633}
1634
Ingo Molnar5c92d122008-12-11 13:21:10 +01001635static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
1636{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001637 hrtimer_cancel(&counter->hw.hrtimer);
Paul Mackerras9abf8a02009-01-09 16:26:43 +11001638 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01001639}
1640
1641static void cpu_clock_perf_counter_read(struct perf_counter *counter)
1642{
Paul Mackerras9abf8a02009-01-09 16:26:43 +11001643 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01001644}
1645
1646static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01001647 .enable = cpu_clock_perf_counter_enable,
1648 .disable = cpu_clock_perf_counter_disable,
1649 .read = cpu_clock_perf_counter_read,
Ingo Molnar5c92d122008-12-11 13:21:10 +01001650};
1651
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001652/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001653 * Software counter: task time clock
1654 */
1655
1656/*
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001657 * Called from within the scheduler:
1658 */
1659static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
Ingo Molnarbae43c92008-12-11 14:03:20 +01001660{
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001661 struct task_struct *curr = counter->task;
1662 u64 delta;
1663
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001664 delta = __task_delta_exec(curr, update);
1665
1666 return curr->se.sum_exec_runtime + delta;
1667}
1668
1669static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
1670{
1671 u64 prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001672 s64 delta;
Ingo Molnarbae43c92008-12-11 14:03:20 +01001673
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001674 prev = atomic64_read(&counter->hw.prev_count);
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001675
1676 atomic64_set(&counter->hw.prev_count, now);
1677
1678 delta = now - prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001679
1680 atomic64_add(delta, &counter->count);
Ingo Molnarbae43c92008-12-11 14:03:20 +01001681}
1682
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001683static int task_clock_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001684{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001685 struct hw_perf_counter *hwc = &counter->hw;
1686
1687 atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
Peter Zijlstra039fc912009-03-13 16:43:47 +01001688 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1689 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001690 if (hwc->irq_period) {
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001691 __hrtimer_start_range_ns(&hwc->hrtimer,
1692 ns_to_ktime(hwc->irq_period), 0,
1693 HRTIMER_MODE_REL, 0);
1694 }
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001695
1696 return 0;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01001697}
1698
1699static void task_clock_perf_counter_disable(struct perf_counter *counter)
1700{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001701 hrtimer_cancel(&counter->hw.hrtimer);
1702 task_clock_perf_counter_update(counter,
1703 task_clock_perf_counter_val(counter, 0));
1704}
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001705
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001706static void task_clock_perf_counter_read(struct perf_counter *counter)
1707{
1708 task_clock_perf_counter_update(counter,
1709 task_clock_perf_counter_val(counter, 1));
Ingo Molnarbae43c92008-12-11 14:03:20 +01001710}
1711
1712static const struct hw_perf_counter_ops perf_ops_task_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01001713 .enable = task_clock_perf_counter_enable,
1714 .disable = task_clock_perf_counter_disable,
1715 .read = task_clock_perf_counter_read,
Ingo Molnarbae43c92008-12-11 14:03:20 +01001716};
1717
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001718/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001719 * Software counter: cpu migrations
1720 */
1721
Paul Mackerras23a185c2009-02-09 22:42:47 +11001722static inline u64 get_cpu_migrations(struct perf_counter *counter)
Ingo Molnar6c594c22008-12-14 12:34:15 +01001723{
Paul Mackerras23a185c2009-02-09 22:42:47 +11001724 struct task_struct *curr = counter->ctx->task;
1725
1726 if (curr)
1727 return curr->se.nr_migrations;
1728 return cpu_nr_migrations(smp_processor_id());
Ingo Molnar6c594c22008-12-14 12:34:15 +01001729}
1730
1731static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
1732{
1733 u64 prev, now;
1734 s64 delta;
1735
1736 prev = atomic64_read(&counter->hw.prev_count);
Paul Mackerras23a185c2009-02-09 22:42:47 +11001737 now = get_cpu_migrations(counter);
Ingo Molnar6c594c22008-12-14 12:34:15 +01001738
1739 atomic64_set(&counter->hw.prev_count, now);
1740
1741 delta = now - prev;
Ingo Molnar6c594c22008-12-14 12:34:15 +01001742
1743 atomic64_add(delta, &counter->count);
1744}
1745
1746static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1747{
1748 cpu_migrations_perf_counter_update(counter);
1749}
1750
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001751static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar6c594c22008-12-14 12:34:15 +01001752{
Paul Mackerrasc07c99b2009-02-13 22:10:34 +11001753 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1754 atomic64_set(&counter->hw.prev_count,
1755 get_cpu_migrations(counter));
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001756 return 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01001757}
1758
1759static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1760{
1761 cpu_migrations_perf_counter_update(counter);
1762}
1763
1764static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
Ingo Molnar76715812008-12-17 14:20:28 +01001765 .enable = cpu_migrations_perf_counter_enable,
1766 .disable = cpu_migrations_perf_counter_disable,
1767 .read = cpu_migrations_perf_counter_read,
Ingo Molnar6c594c22008-12-14 12:34:15 +01001768};
1769
Peter Zijlstrae077df42009-03-19 20:26:17 +01001770#ifdef CONFIG_EVENT_PROFILE
1771void perf_tpcounter_event(int event_id)
1772{
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001773 struct pt_regs *regs = get_irq_regs();
1774
1775 if (!regs)
1776 regs = task_pt_regs(current);
1777
1778 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs);
Peter Zijlstrae077df42009-03-19 20:26:17 +01001779}
1780
1781extern int ftrace_profile_enable(int);
1782extern void ftrace_profile_disable(int);
1783
1784static void tp_perf_counter_destroy(struct perf_counter *counter)
1785{
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001786 ftrace_profile_disable(perf_event_id(&counter->hw_event));
Peter Zijlstrae077df42009-03-19 20:26:17 +01001787}
1788
1789static const struct hw_perf_counter_ops *
1790tp_perf_counter_init(struct perf_counter *counter)
1791{
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001792 int event_id = perf_event_id(&counter->hw_event);
Peter Zijlstrae077df42009-03-19 20:26:17 +01001793 int ret;
1794
1795 ret = ftrace_profile_enable(event_id);
1796 if (ret)
1797 return NULL;
1798
1799 counter->destroy = tp_perf_counter_destroy;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001800 counter->hw.irq_period = counter->hw_event.irq_period;
Peter Zijlstrae077df42009-03-19 20:26:17 +01001801
1802 return &perf_ops_generic;
1803}
1804#else
1805static const struct hw_perf_counter_ops *
1806tp_perf_counter_init(struct perf_counter *counter)
1807{
1808 return NULL;
1809}
1810#endif
1811
Ingo Molnar5c92d122008-12-11 13:21:10 +01001812static const struct hw_perf_counter_ops *
1813sw_perf_counter_init(struct perf_counter *counter)
1814{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001815 struct perf_counter_hw_event *hw_event = &counter->hw_event;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001816 const struct hw_perf_counter_ops *hw_ops = NULL;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001817 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001818
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001819 /*
1820 * Software counters (currently) can't in general distinguish
1821 * between user, kernel and hypervisor events.
1822 * However, context switches and cpu migrations are considered
1823 * to be kernel events, and page faults are never hypervisor
1824 * events.
1825 */
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001826 switch (perf_event_id(&counter->hw_event)) {
Ingo Molnar5c92d122008-12-11 13:21:10 +01001827 case PERF_COUNT_CPU_CLOCK:
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001828 hw_ops = &perf_ops_cpu_clock;
1829
1830 if (hw_event->irq_period && hw_event->irq_period < 10000)
1831 hw_event->irq_period = 10000;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001832 break;
Ingo Molnarbae43c92008-12-11 14:03:20 +01001833 case PERF_COUNT_TASK_CLOCK:
Paul Mackerras23a185c2009-02-09 22:42:47 +11001834 /*
1835 * If the user instantiates this as a per-cpu counter,
1836 * use the cpu_clock counter instead.
1837 */
1838 if (counter->ctx->task)
1839 hw_ops = &perf_ops_task_clock;
1840 else
1841 hw_ops = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01001842
1843 if (hw_event->irq_period && hw_event->irq_period < 10000)
1844 hw_event->irq_period = 10000;
Ingo Molnarbae43c92008-12-11 14:03:20 +01001845 break;
Ingo Molnare06c61a2008-12-14 14:44:31 +01001846 case PERF_COUNT_PAGE_FAULTS:
Peter Zijlstraac17dc82009-03-13 12:21:34 +01001847 case PERF_COUNT_PAGE_FAULTS_MIN:
1848 case PERF_COUNT_PAGE_FAULTS_MAJ:
Ingo Molnar5d6a27d2008-12-14 12:28:33 +01001849 case PERF_COUNT_CONTEXT_SWITCHES:
Peter Zijlstra4a0deca2009-03-19 20:26:12 +01001850 hw_ops = &perf_ops_generic;
Ingo Molnar5d6a27d2008-12-14 12:28:33 +01001851 break;
Ingo Molnar6c594c22008-12-14 12:34:15 +01001852 case PERF_COUNT_CPU_MIGRATIONS:
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001853 if (!counter->hw_event.exclude_kernel)
1854 hw_ops = &perf_ops_cpu_migrations;
Ingo Molnar6c594c22008-12-14 12:34:15 +01001855 break;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001856 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001857
1858 if (hw_ops)
1859 hwc->irq_period = hw_event->irq_period;
1860
Ingo Molnar5c92d122008-12-11 13:21:10 +01001861 return hw_ops;
1862}
1863
Thomas Gleixner0793a612008-12-04 20:12:29 +01001864/*
1865 * Allocate and initialize a counter structure
1866 */
1867static struct perf_counter *
Ingo Molnar04289bb2008-12-11 08:38:42 +01001868perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1869 int cpu,
Paul Mackerras23a185c2009-02-09 22:42:47 +11001870 struct perf_counter_context *ctx,
Ingo Molnar9b51f662008-12-12 13:49:45 +01001871 struct perf_counter *group_leader,
1872 gfp_t gfpflags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001873{
Ingo Molnar5c92d122008-12-11 13:21:10 +01001874 const struct hw_perf_counter_ops *hw_ops;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001875 struct perf_counter *counter;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001876
Ingo Molnar9b51f662008-12-12 13:49:45 +01001877 counter = kzalloc(sizeof(*counter), gfpflags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001878 if (!counter)
1879 return NULL;
1880
Ingo Molnar04289bb2008-12-11 08:38:42 +01001881 /*
1882 * Single counters are their own group leaders, with an
1883 * empty sibling list:
1884 */
1885 if (!group_leader)
1886 group_leader = counter;
1887
Thomas Gleixner0793a612008-12-04 20:12:29 +01001888 mutex_init(&counter->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001889 INIT_LIST_HEAD(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +01001890 INIT_LIST_HEAD(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +01001891 INIT_LIST_HEAD(&counter->sibling_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001892 init_waitqueue_head(&counter->waitq);
1893
Paul Mackerrasd859e292009-01-17 18:10:22 +11001894 INIT_LIST_HEAD(&counter->child_list);
1895
Ingo Molnar9f66a382008-12-10 12:33:23 +01001896 counter->irqdata = &counter->data[0];
1897 counter->usrdata = &counter->data[1];
1898 counter->cpu = cpu;
1899 counter->hw_event = *hw_event;
1900 counter->wakeup_pending = 0;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001901 counter->group_leader = group_leader;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001902 counter->hw_ops = NULL;
Paul Mackerras23a185c2009-02-09 22:42:47 +11001903 counter->ctx = ctx;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001904
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001905 counter->state = PERF_COUNTER_STATE_INACTIVE;
Ingo Molnara86ed502008-12-17 00:43:10 +01001906 if (hw_event->disabled)
1907 counter->state = PERF_COUNTER_STATE_OFF;
1908
Ingo Molnar5c92d122008-12-11 13:21:10 +01001909 hw_ops = NULL;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001910
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001911 if (perf_event_raw(hw_event)) {
Ingo Molnar5c92d122008-12-11 13:21:10 +01001912 hw_ops = hw_perf_counter_init(counter);
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001913 goto done;
1914 }
1915
1916 switch (perf_event_type(hw_event)) {
Peter Zijlstrab8e83512009-03-19 20:26:18 +01001917 case PERF_TYPE_HARDWARE:
1918 hw_ops = hw_perf_counter_init(counter);
1919 break;
1920
1921 case PERF_TYPE_SOFTWARE:
1922 hw_ops = sw_perf_counter_init(counter);
1923 break;
1924
1925 case PERF_TYPE_TRACEPOINT:
1926 hw_ops = tp_perf_counter_init(counter);
1927 break;
1928 }
Ingo Molnar5c92d122008-12-11 13:21:10 +01001929
Ingo Molnar621a01e2008-12-11 12:46:46 +01001930 if (!hw_ops) {
1931 kfree(counter);
1932 return NULL;
1933 }
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01001934done:
Ingo Molnar621a01e2008-12-11 12:46:46 +01001935 counter->hw_ops = hw_ops;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001936
1937 return counter;
1938}
1939
1940/**
Paul Mackerras2743a5b2009-03-04 20:36:51 +11001941 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
Ingo Molnar9f66a382008-12-10 12:33:23 +01001942 *
1943 * @hw_event_uptr: event type attributes for monitoring/sampling
Thomas Gleixner0793a612008-12-04 20:12:29 +01001944 * @pid: target pid
Ingo Molnar9f66a382008-12-10 12:33:23 +01001945 * @cpu: target cpu
1946 * @group_fd: group leader counter fd
Thomas Gleixner0793a612008-12-04 20:12:29 +01001947 */
Paul Mackerras2743a5b2009-03-04 20:36:51 +11001948SYSCALL_DEFINE5(perf_counter_open,
Paul Mackerrasf3dfd262009-02-26 22:43:46 +11001949 const struct perf_counter_hw_event __user *, hw_event_uptr,
Paul Mackerras2743a5b2009-03-04 20:36:51 +11001950 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001951{
Ingo Molnar04289bb2008-12-11 08:38:42 +01001952 struct perf_counter *counter, *group_leader;
Ingo Molnar9f66a382008-12-10 12:33:23 +01001953 struct perf_counter_hw_event hw_event;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001954 struct perf_counter_context *ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +01001955 struct file *counter_file = NULL;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001956 struct file *group_file = NULL;
1957 int fput_needed = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01001958 int fput_needed2 = 0;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001959 int ret;
1960
Paul Mackerras2743a5b2009-03-04 20:36:51 +11001961 /* for future expandability... */
1962 if (flags)
1963 return -EINVAL;
1964
Ingo Molnar9f66a382008-12-10 12:33:23 +01001965 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
Thomas Gleixnereab656a2008-12-08 19:26:59 +01001966 return -EFAULT;
1967
Ingo Molnar04289bb2008-12-11 08:38:42 +01001968 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01001969 * Get the target context (task or percpu):
1970 */
1971 ctx = find_get_context(pid, cpu);
1972 if (IS_ERR(ctx))
1973 return PTR_ERR(ctx);
1974
1975 /*
1976 * Look up the group leader (we will attach this counter to it):
Ingo Molnar04289bb2008-12-11 08:38:42 +01001977 */
1978 group_leader = NULL;
1979 if (group_fd != -1) {
1980 ret = -EINVAL;
1981 group_file = fget_light(group_fd, &fput_needed);
1982 if (!group_file)
Ingo Molnarccff2862008-12-11 11:26:29 +01001983 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001984 if (group_file->f_op != &perf_fops)
Ingo Molnarccff2862008-12-11 11:26:29 +01001985 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01001986
1987 group_leader = group_file->private_data;
1988 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01001989 * Do not allow a recursive hierarchy (this new sibling
1990 * becoming part of another group-sibling):
Ingo Molnar04289bb2008-12-11 08:38:42 +01001991 */
Ingo Molnarccff2862008-12-11 11:26:29 +01001992 if (group_leader->group_leader != group_leader)
1993 goto err_put_context;
1994 /*
1995 * Do not allow to attach to a group in a different
1996 * task or CPU context:
1997 */
1998 if (group_leader->ctx != ctx)
1999 goto err_put_context;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11002000 /*
2001 * Only a group leader can be exclusive or pinned
2002 */
2003 if (hw_event.exclusive || hw_event.pinned)
2004 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002005 }
2006
Ingo Molnar5c92d122008-12-11 13:21:10 +01002007 ret = -EINVAL;
Paul Mackerras23a185c2009-02-09 22:42:47 +11002008 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2009 GFP_KERNEL);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002010 if (!counter)
2011 goto err_put_context;
2012
Thomas Gleixner0793a612008-12-04 20:12:29 +01002013 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
2014 if (ret < 0)
Ingo Molnar9b51f662008-12-12 13:49:45 +01002015 goto err_free_put_context;
2016
2017 counter_file = fget_light(ret, &fput_needed2);
2018 if (!counter_file)
2019 goto err_free_put_context;
2020
2021 counter->filp = counter_file;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002022 mutex_lock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002023 perf_install_in_context(ctx, counter, cpu);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002024 mutex_unlock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002025
2026 fput_light(counter_file, fput_needed2);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002027
Ingo Molnar04289bb2008-12-11 08:38:42 +01002028out_fput:
2029 fput_light(group_file, fput_needed);
2030
Thomas Gleixner0793a612008-12-04 20:12:29 +01002031 return ret;
2032
Ingo Molnar9b51f662008-12-12 13:49:45 +01002033err_free_put_context:
Thomas Gleixner0793a612008-12-04 20:12:29 +01002034 kfree(counter);
2035
2036err_put_context:
2037 put_context(ctx);
2038
Ingo Molnar04289bb2008-12-11 08:38:42 +01002039 goto out_fput;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002040}
2041
Ingo Molnar9b51f662008-12-12 13:49:45 +01002042/*
2043 * Initialize the perf_counter context in a task_struct:
2044 */
2045static void
2046__perf_counter_init_context(struct perf_counter_context *ctx,
2047 struct task_struct *task)
2048{
2049 memset(ctx, 0, sizeof(*ctx));
2050 spin_lock_init(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002051 mutex_init(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002052 INIT_LIST_HEAD(&ctx->counter_list);
Peter Zijlstra592903c2009-03-13 12:21:36 +01002053 INIT_LIST_HEAD(&ctx->event_list);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002054 ctx->task = task;
2055}
2056
2057/*
2058 * inherit a counter from parent task to child task:
2059 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11002060static struct perf_counter *
Ingo Molnar9b51f662008-12-12 13:49:45 +01002061inherit_counter(struct perf_counter *parent_counter,
2062 struct task_struct *parent,
2063 struct perf_counter_context *parent_ctx,
2064 struct task_struct *child,
Paul Mackerrasd859e292009-01-17 18:10:22 +11002065 struct perf_counter *group_leader,
Ingo Molnar9b51f662008-12-12 13:49:45 +01002066 struct perf_counter_context *child_ctx)
2067{
2068 struct perf_counter *child_counter;
2069
Paul Mackerrasd859e292009-01-17 18:10:22 +11002070 /*
2071 * Instead of creating recursive hierarchies of counters,
2072 * we link inherited counters back to the original parent,
2073 * which has a filp for sure, which we use as the reference
2074 * count:
2075 */
2076 if (parent_counter->parent)
2077 parent_counter = parent_counter->parent;
2078
Ingo Molnar9b51f662008-12-12 13:49:45 +01002079 child_counter = perf_counter_alloc(&parent_counter->hw_event,
Paul Mackerras23a185c2009-02-09 22:42:47 +11002080 parent_counter->cpu, child_ctx,
2081 group_leader, GFP_KERNEL);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002082 if (!child_counter)
Paul Mackerrasd859e292009-01-17 18:10:22 +11002083 return NULL;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002084
2085 /*
2086 * Link it up in the child's context:
2087 */
Ingo Molnar9b51f662008-12-12 13:49:45 +01002088 child_counter->task = child;
2089 list_add_counter(child_counter, child_ctx);
2090 child_ctx->nr_counters++;
2091
2092 child_counter->parent = parent_counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002093 /*
2094 * inherit into child's child as well:
2095 */
2096 child_counter->hw_event.inherit = 1;
2097
2098 /*
2099 * Get a reference to the parent filp - we will fput it
2100 * when the child counter exits. This is safe to do because
2101 * we are in the parent and we know that the filp still
2102 * exists and has a nonzero count:
2103 */
2104 atomic_long_inc(&parent_counter->filp->f_count);
2105
Paul Mackerrasd859e292009-01-17 18:10:22 +11002106 /*
2107 * Link this into the parent counter's child list
2108 */
2109 mutex_lock(&parent_counter->mutex);
2110 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
2111
2112 /*
2113 * Make the child state follow the state of the parent counter,
2114 * not its hw_event.disabled bit. We hold the parent's mutex,
2115 * so we won't race with perf_counter_{en,dis}able_family.
2116 */
2117 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
2118 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
2119 else
2120 child_counter->state = PERF_COUNTER_STATE_OFF;
2121
2122 mutex_unlock(&parent_counter->mutex);
2123
2124 return child_counter;
2125}
2126
2127static int inherit_group(struct perf_counter *parent_counter,
2128 struct task_struct *parent,
2129 struct perf_counter_context *parent_ctx,
2130 struct task_struct *child,
2131 struct perf_counter_context *child_ctx)
2132{
2133 struct perf_counter *leader;
2134 struct perf_counter *sub;
2135
2136 leader = inherit_counter(parent_counter, parent, parent_ctx,
2137 child, NULL, child_ctx);
2138 if (!leader)
2139 return -ENOMEM;
2140 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
2141 if (!inherit_counter(sub, parent, parent_ctx,
2142 child, leader, child_ctx))
2143 return -ENOMEM;
2144 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01002145 return 0;
2146}
2147
Paul Mackerrasd859e292009-01-17 18:10:22 +11002148static void sync_child_counter(struct perf_counter *child_counter,
2149 struct perf_counter *parent_counter)
2150{
2151 u64 parent_val, child_val;
2152
2153 parent_val = atomic64_read(&parent_counter->count);
2154 child_val = atomic64_read(&child_counter->count);
2155
2156 /*
2157 * Add back the child's count to the parent's count:
2158 */
2159 atomic64_add(child_val, &parent_counter->count);
2160
2161 /*
2162 * Remove this counter from the parent's list
2163 */
2164 mutex_lock(&parent_counter->mutex);
2165 list_del_init(&child_counter->child_list);
2166 mutex_unlock(&parent_counter->mutex);
2167
2168 /*
2169 * Release the parent counter, if this was the last
2170 * reference to it.
2171 */
2172 fput(parent_counter->filp);
2173}
2174
Ingo Molnar9b51f662008-12-12 13:49:45 +01002175static void
2176__perf_counter_exit_task(struct task_struct *child,
2177 struct perf_counter *child_counter,
2178 struct perf_counter_context *child_ctx)
2179{
2180 struct perf_counter *parent_counter;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002181 struct perf_counter *sub, *tmp;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002182
2183 /*
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002184 * If we do not self-reap then we have to wait for the
2185 * child task to unschedule (it will happen for sure),
2186 * so that its counter is at its final count. (This
2187 * condition triggers rarely - child tasks usually get
2188 * off their CPU before the parent has a chance to
2189 * get this far into the reaping action)
Ingo Molnar9b51f662008-12-12 13:49:45 +01002190 */
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002191 if (child != current) {
2192 wait_task_inactive(child, 0);
2193 list_del_init(&child_counter->list_entry);
2194 } else {
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002195 struct perf_cpu_context *cpuctx;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002196 unsigned long flags;
2197 u64 perf_flags;
2198
2199 /*
2200 * Disable and unlink this counter.
2201 *
2202 * Be careful about zapping the list - IRQ/NMI context
2203 * could still be processing it:
2204 */
2205 curr_rq_lock_irq_save(&flags);
2206 perf_flags = hw_perf_save_disable();
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002207
2208 cpuctx = &__get_cpu_var(perf_cpu_context);
2209
Paul Mackerrasd859e292009-01-17 18:10:22 +11002210 group_sched_out(child_counter, cpuctx, child_ctx);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002211
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002212 list_del_init(&child_counter->list_entry);
2213
2214 child_ctx->nr_counters--;
2215
2216 hw_perf_restore(perf_flags);
2217 curr_rq_unlock_irq_restore(&flags);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002218 }
2219
Ingo Molnar9b51f662008-12-12 13:49:45 +01002220 parent_counter = child_counter->parent;
2221 /*
2222 * It can happen that parent exits first, and has counters
2223 * that are still around due to the child reference. These
2224 * counters need to be zapped - but otherwise linger.
2225 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11002226 if (parent_counter) {
2227 sync_child_counter(child_counter, parent_counter);
2228 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
2229 list_entry) {
Paul Mackerras4bcf3492009-02-11 13:53:19 +01002230 if (sub->parent) {
Paul Mackerrasd859e292009-01-17 18:10:22 +11002231 sync_child_counter(sub, sub->parent);
Peter Zijlstraf1600952009-03-19 20:26:16 +01002232 free_counter(sub);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01002233 }
Paul Mackerrasd859e292009-01-17 18:10:22 +11002234 }
Peter Zijlstraf1600952009-03-19 20:26:16 +01002235 free_counter(child_counter);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01002236 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01002237}
2238
2239/*
Paul Mackerrasd859e292009-01-17 18:10:22 +11002240 * When a child task exits, feed back counter values to parent counters.
Ingo Molnar9b51f662008-12-12 13:49:45 +01002241 *
Paul Mackerrasd859e292009-01-17 18:10:22 +11002242 * Note: we may be running in child context, but the PID is not hashed
Ingo Molnar9b51f662008-12-12 13:49:45 +01002243 * anymore so new counters will not be added.
2244 */
2245void perf_counter_exit_task(struct task_struct *child)
2246{
2247 struct perf_counter *child_counter, *tmp;
2248 struct perf_counter_context *child_ctx;
2249
2250 child_ctx = &child->perf_counter_ctx;
2251
2252 if (likely(!child_ctx->nr_counters))
2253 return;
2254
2255 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
2256 list_entry)
2257 __perf_counter_exit_task(child, child_counter, child_ctx);
2258}
2259
2260/*
2261 * Initialize the perf_counter context in task_struct
2262 */
2263void perf_counter_init_task(struct task_struct *child)
2264{
2265 struct perf_counter_context *child_ctx, *parent_ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002266 struct perf_counter *counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002267 struct task_struct *parent = current;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002268
2269 child_ctx = &child->perf_counter_ctx;
2270 parent_ctx = &parent->perf_counter_ctx;
2271
2272 __perf_counter_init_context(child_ctx, child);
2273
2274 /*
2275 * This is executed from the parent task context, so inherit
2276 * counters that have been marked for cloning:
2277 */
2278
2279 if (likely(!parent_ctx->nr_counters))
2280 return;
2281
2282 /*
2283 * Lock the parent list. No need to lock the child - not PID
2284 * hashed yet and not running, so nobody can access it.
2285 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11002286 mutex_lock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002287
2288 /*
2289 * We dont have to disable NMIs - we are only looking at
2290 * the list, not manipulating it:
2291 */
2292 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
Paul Mackerrasd859e292009-01-17 18:10:22 +11002293 if (!counter->hw_event.inherit)
Ingo Molnar9b51f662008-12-12 13:49:45 +01002294 continue;
2295
Paul Mackerrasd859e292009-01-17 18:10:22 +11002296 if (inherit_group(counter, parent,
Ingo Molnar9b51f662008-12-12 13:49:45 +01002297 parent_ctx, child, child_ctx))
2298 break;
2299 }
2300
Paul Mackerrasd859e292009-01-17 18:10:22 +11002301 mutex_unlock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002302}
2303
Ingo Molnar04289bb2008-12-11 08:38:42 +01002304static void __cpuinit perf_counter_init_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002305{
Ingo Molnar04289bb2008-12-11 08:38:42 +01002306 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002307
Ingo Molnar04289bb2008-12-11 08:38:42 +01002308 cpuctx = &per_cpu(perf_cpu_context, cpu);
2309 __perf_counter_init_context(&cpuctx->ctx, NULL);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002310
2311 mutex_lock(&perf_resource_mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002312 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002313 mutex_unlock(&perf_resource_mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002314
Paul Mackerras01d02872009-01-14 13:44:19 +11002315 hw_perf_counter_setup(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002316}
2317
2318#ifdef CONFIG_HOTPLUG_CPU
Ingo Molnar04289bb2008-12-11 08:38:42 +01002319static void __perf_counter_exit_cpu(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002320{
2321 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
2322 struct perf_counter_context *ctx = &cpuctx->ctx;
2323 struct perf_counter *counter, *tmp;
2324
Ingo Molnar04289bb2008-12-11 08:38:42 +01002325 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
2326 __perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002327}
Ingo Molnar04289bb2008-12-11 08:38:42 +01002328static void perf_counter_exit_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002329{
Paul Mackerrasd859e292009-01-17 18:10:22 +11002330 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
2331 struct perf_counter_context *ctx = &cpuctx->ctx;
2332
2333 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002334 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002335 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002336}
2337#else
Ingo Molnar04289bb2008-12-11 08:38:42 +01002338static inline void perf_counter_exit_cpu(int cpu) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +01002339#endif
2340
2341static int __cpuinit
2342perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
2343{
2344 unsigned int cpu = (long)hcpu;
2345
2346 switch (action) {
2347
2348 case CPU_UP_PREPARE:
2349 case CPU_UP_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01002350 perf_counter_init_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002351 break;
2352
2353 case CPU_DOWN_PREPARE:
2354 case CPU_DOWN_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01002355 perf_counter_exit_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002356 break;
2357
2358 default:
2359 break;
2360 }
2361
2362 return NOTIFY_OK;
2363}
2364
2365static struct notifier_block __cpuinitdata perf_cpu_nb = {
2366 .notifier_call = perf_cpu_notify,
2367};
2368
2369static int __init perf_counter_init(void)
2370{
2371 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
2372 (void *)(long)smp_processor_id());
2373 register_cpu_notifier(&perf_cpu_nb);
2374
2375 return 0;
2376}
2377early_initcall(perf_counter_init);
2378
2379static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
2380{
2381 return sprintf(buf, "%d\n", perf_reserved_percpu);
2382}
2383
2384static ssize_t
2385perf_set_reserve_percpu(struct sysdev_class *class,
2386 const char *buf,
2387 size_t count)
2388{
2389 struct perf_cpu_context *cpuctx;
2390 unsigned long val;
2391 int err, cpu, mpt;
2392
2393 err = strict_strtoul(buf, 10, &val);
2394 if (err)
2395 return err;
2396 if (val > perf_max_counters)
2397 return -EINVAL;
2398
2399 mutex_lock(&perf_resource_mutex);
2400 perf_reserved_percpu = val;
2401 for_each_online_cpu(cpu) {
2402 cpuctx = &per_cpu(perf_cpu_context, cpu);
2403 spin_lock_irq(&cpuctx->ctx.lock);
2404 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
2405 perf_max_counters - perf_reserved_percpu);
2406 cpuctx->max_pertask = mpt;
2407 spin_unlock_irq(&cpuctx->ctx.lock);
2408 }
2409 mutex_unlock(&perf_resource_mutex);
2410
2411 return count;
2412}
2413
2414static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
2415{
2416 return sprintf(buf, "%d\n", perf_overcommit);
2417}
2418
2419static ssize_t
2420perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
2421{
2422 unsigned long val;
2423 int err;
2424
2425 err = strict_strtoul(buf, 10, &val);
2426 if (err)
2427 return err;
2428 if (val > 1)
2429 return -EINVAL;
2430
2431 mutex_lock(&perf_resource_mutex);
2432 perf_overcommit = val;
2433 mutex_unlock(&perf_resource_mutex);
2434
2435 return count;
2436}
2437
2438static SYSDEV_CLASS_ATTR(
2439 reserve_percpu,
2440 0644,
2441 perf_show_reserve_percpu,
2442 perf_set_reserve_percpu
2443 );
2444
2445static SYSDEV_CLASS_ATTR(
2446 overcommit,
2447 0644,
2448 perf_show_overcommit,
2449 perf_set_overcommit
2450 );
2451
2452static struct attribute *perfclass_attrs[] = {
2453 &attr_reserve_percpu.attr,
2454 &attr_overcommit.attr,
2455 NULL
2456};
2457
2458static struct attribute_group perfclass_attr_group = {
2459 .attrs = perfclass_attrs,
2460 .name = "perf_counters",
2461};
2462
2463static int __init perf_counter_sysfs_init(void)
2464{
2465 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
2466 &perfclass_attr_group);
2467}
2468device_initcall(perf_counter_sysfs_init);