blob: bf12df6f3538c6b624a705f68b7b854ff7a4c123 [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counter core code
3 *
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6 *
Peter Zijlstra7b732a72009-03-23 18:22:10 +01007 *
8 * For licensing details see kernel-base/COPYING
Thomas Gleixner0793a612008-12-04 20:12:29 +01009 */
10
11#include <linux/fs.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010012#include <linux/mm.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010013#include <linux/cpu.h>
14#include <linux/smp.h>
Ingo Molnar04289bb2008-12-11 08:38:42 +010015#include <linux/file.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010016#include <linux/poll.h>
17#include <linux/sysfs.h>
18#include <linux/ptrace.h>
19#include <linux/percpu.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010020#include <linux/vmstat.h>
21#include <linux/hardirq.h>
22#include <linux/rculist.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010023#include <linux/uaccess.h>
24#include <linux/syscalls.h>
25#include <linux/anon_inodes.h>
Ingo Molnaraa9c4c02008-12-17 14:10:57 +010026#include <linux/kernel_stat.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010027#include <linux/perf_counter.h>
Peter Zijlstra0a4a9392009-03-30 19:07:05 +020028#include <linux/dcache.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010029
Tim Blechmann4e193bd2009-03-14 14:29:25 +010030#include <asm/irq_regs.h>
31
Thomas Gleixner0793a612008-12-04 20:12:29 +010032/*
33 * Each CPU has a list of per CPU counters:
34 */
35DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
36
Ingo Molnar088e2852008-12-14 20:21:00 +010037int perf_max_counters __read_mostly = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +010038static int perf_reserved_percpu __read_mostly;
39static int perf_overcommit __read_mostly = 1;
40
41/*
42 * Mutex for (sysadmin-configurable) counter reservations:
43 */
44static DEFINE_MUTEX(perf_resource_mutex);
45
46/*
47 * Architecture provided APIs - weak aliases:
48 */
Ingo Molnar5c92d122008-12-11 13:21:10 +010049extern __weak const struct hw_perf_counter_ops *
Ingo Molnar621a01e2008-12-11 12:46:46 +010050hw_perf_counter_init(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +010051{
Paul Mackerrasff6f0542009-01-09 16:19:25 +110052 return NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +010053}
54
Ingo Molnar01b28382008-12-11 13:45:51 +010055u64 __weak hw_perf_save_disable(void) { return 0; }
Yinghai Lu01ea1cc2008-12-26 21:05:06 -080056void __weak hw_perf_restore(u64 ctrl) { barrier(); }
Paul Mackerras01d02872009-01-14 13:44:19 +110057void __weak hw_perf_counter_setup(int cpu) { barrier(); }
Paul Mackerras3cbed422009-01-09 16:43:42 +110058int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
59 struct perf_cpu_context *cpuctx,
60 struct perf_counter_context *ctx, int cpu)
61{
62 return 0;
63}
Thomas Gleixner0793a612008-12-04 20:12:29 +010064
Paul Mackerras4eb96fc2009-01-09 17:24:34 +110065void __weak perf_counter_print_debug(void) { }
66
Ingo Molnar04289bb2008-12-11 08:38:42 +010067static void
68list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
69{
70 struct perf_counter *group_leader = counter->group_leader;
71
72 /*
73 * Depending on whether it is a standalone or sibling counter,
74 * add it straight to the context's counter list, or to the group
75 * leader's sibling list:
76 */
77 if (counter->group_leader == counter)
78 list_add_tail(&counter->list_entry, &ctx->counter_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +010079 else {
Ingo Molnar04289bb2008-12-11 08:38:42 +010080 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +010081 group_leader->nr_siblings++;
82 }
Peter Zijlstra592903c2009-03-13 12:21:36 +010083
84 list_add_rcu(&counter->event_entry, &ctx->event_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +010085}
86
87static void
88list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
89{
90 struct perf_counter *sibling, *tmp;
91
92 list_del_init(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +010093 list_del_rcu(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +010094
Peter Zijlstra5c148192009-03-25 12:30:23 +010095 if (counter->group_leader != counter)
96 counter->group_leader->nr_siblings--;
97
Ingo Molnar04289bb2008-12-11 08:38:42 +010098 /*
99 * If this was a group counter with sibling counters then
100 * upgrade the siblings to singleton counters by adding them
101 * to the context list directly:
102 */
103 list_for_each_entry_safe(sibling, tmp,
104 &counter->sibling_list, list_entry) {
105
Peter Zijlstra75564232009-03-13 12:21:29 +0100106 list_move_tail(&sibling->list_entry, &ctx->counter_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100107 sibling->group_leader = sibling;
108 }
109}
110
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100111static void
112counter_sched_out(struct perf_counter *counter,
113 struct perf_cpu_context *cpuctx,
114 struct perf_counter_context *ctx)
115{
116 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
117 return;
118
119 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200120 counter->tstamp_stopped = ctx->time;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100121 counter->hw_ops->disable(counter);
122 counter->oncpu = -1;
123
124 if (!is_software_counter(counter))
125 cpuctx->active_oncpu--;
126 ctx->nr_active--;
127 if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
128 cpuctx->exclusive = 0;
129}
130
Paul Mackerrasd859e292009-01-17 18:10:22 +1100131static void
132group_sched_out(struct perf_counter *group_counter,
133 struct perf_cpu_context *cpuctx,
134 struct perf_counter_context *ctx)
135{
136 struct perf_counter *counter;
137
138 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
139 return;
140
141 counter_sched_out(group_counter, cpuctx, ctx);
142
143 /*
144 * Schedule out siblings (if any):
145 */
146 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
147 counter_sched_out(counter, cpuctx, ctx);
148
149 if (group_counter->hw_event.exclusive)
150 cpuctx->exclusive = 0;
151}
152
Thomas Gleixner0793a612008-12-04 20:12:29 +0100153/*
154 * Cross CPU call to remove a performance counter
155 *
156 * We disable the counter on the hardware level first. After that we
157 * remove it from the context list.
158 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100159static void __perf_counter_remove_from_context(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100160{
161 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
162 struct perf_counter *counter = info;
163 struct perf_counter_context *ctx = counter->ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +0100164 unsigned long flags;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100165 u64 perf_flags;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100166
167 /*
168 * If this is a task context, we need to check whether it is
169 * the current task context of this cpu. If not it has been
170 * scheduled out before the smp call arrived.
171 */
172 if (ctx->task && cpuctx->task_ctx != ctx)
173 return;
174
Peter Zijlstra849691a2009-04-06 11:45:12 +0200175 spin_lock_irqsave(&ctx->lock, flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100176
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100177 counter_sched_out(counter, cpuctx, ctx);
178
179 counter->task = NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100180 ctx->nr_counters--;
181
182 /*
183 * Protect the list operation against NMI by disabling the
184 * counters on a global level. NOP for non NMI based counters.
185 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100186 perf_flags = hw_perf_save_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +0100187 list_del_counter(counter, ctx);
Ingo Molnar01b28382008-12-11 13:45:51 +0100188 hw_perf_restore(perf_flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100189
190 if (!ctx->task) {
191 /*
192 * Allow more per task counters with respect to the
193 * reservation:
194 */
195 cpuctx->max_pertask =
196 min(perf_max_counters - ctx->nr_counters,
197 perf_max_counters - perf_reserved_percpu);
198 }
199
Peter Zijlstra849691a2009-04-06 11:45:12 +0200200 spin_unlock_irqrestore(&ctx->lock, flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100201}
202
203
204/*
205 * Remove the counter from a task's (or a CPU's) list of counters.
206 *
Paul Mackerrasd859e292009-01-17 18:10:22 +1100207 * Must be called with counter->mutex and ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100208 *
209 * CPU counters are removed with a smp call. For task counters we only
210 * call when the task is on a CPU.
211 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100212static void perf_counter_remove_from_context(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100213{
214 struct perf_counter_context *ctx = counter->ctx;
215 struct task_struct *task = ctx->task;
216
217 if (!task) {
218 /*
219 * Per cpu counters are removed via an smp call and
220 * the removal is always sucessful.
221 */
222 smp_call_function_single(counter->cpu,
Ingo Molnar04289bb2008-12-11 08:38:42 +0100223 __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100224 counter, 1);
225 return;
226 }
227
228retry:
Ingo Molnar04289bb2008-12-11 08:38:42 +0100229 task_oncpu_function_call(task, __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100230 counter);
231
232 spin_lock_irq(&ctx->lock);
233 /*
234 * If the context is active we need to retry the smp call.
235 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100236 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100237 spin_unlock_irq(&ctx->lock);
238 goto retry;
239 }
240
241 /*
242 * The lock prevents that this context is scheduled in so we
Ingo Molnar04289bb2008-12-11 08:38:42 +0100243 * can remove the counter safely, if the call above did not
Thomas Gleixner0793a612008-12-04 20:12:29 +0100244 * succeed.
245 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100246 if (!list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100247 ctx->nr_counters--;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100248 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100249 counter->task = NULL;
250 }
251 spin_unlock_irq(&ctx->lock);
252}
253
Peter Zijlstra4af49982009-04-06 11:45:10 +0200254static inline u64 perf_clock(void)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100255{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200256 return cpu_clock(smp_processor_id());
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100257}
258
259/*
260 * Update the record of the current time in a context.
261 */
Peter Zijlstra4af49982009-04-06 11:45:10 +0200262static void update_context_time(struct perf_counter_context *ctx)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100263{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200264 u64 now = perf_clock();
265
266 ctx->time += now - ctx->timestamp;
267 ctx->timestamp = now;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100268}
269
270/*
271 * Update the total_time_enabled and total_time_running fields for a counter.
272 */
273static void update_counter_times(struct perf_counter *counter)
274{
275 struct perf_counter_context *ctx = counter->ctx;
276 u64 run_end;
277
Peter Zijlstra4af49982009-04-06 11:45:10 +0200278 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
279 return;
280
281 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
282
283 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
284 run_end = counter->tstamp_stopped;
285 else
286 run_end = ctx->time;
287
288 counter->total_time_running = run_end - counter->tstamp_running;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100289}
290
291/*
292 * Update total_time_enabled and total_time_running for all counters in a group.
293 */
294static void update_group_times(struct perf_counter *leader)
295{
296 struct perf_counter *counter;
297
298 update_counter_times(leader);
299 list_for_each_entry(counter, &leader->sibling_list, list_entry)
300 update_counter_times(counter);
301}
302
303/*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100304 * Cross CPU call to disable a performance counter
305 */
306static void __perf_counter_disable(void *info)
307{
308 struct perf_counter *counter = info;
309 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
310 struct perf_counter_context *ctx = counter->ctx;
311 unsigned long flags;
312
313 /*
314 * If this is a per-task counter, need to check whether this
315 * counter's task is the current task on this cpu.
316 */
317 if (ctx->task && cpuctx->task_ctx != ctx)
318 return;
319
Peter Zijlstra849691a2009-04-06 11:45:12 +0200320 spin_lock_irqsave(&ctx->lock, flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100321
322 /*
323 * If the counter is on, turn it off.
324 * If it is in error state, leave it in error state.
325 */
326 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
Peter Zijlstra4af49982009-04-06 11:45:10 +0200327 update_context_time(ctx);
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100328 update_counter_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100329 if (counter == counter->group_leader)
330 group_sched_out(counter, cpuctx, ctx);
331 else
332 counter_sched_out(counter, cpuctx, ctx);
333 counter->state = PERF_COUNTER_STATE_OFF;
334 }
335
Peter Zijlstra849691a2009-04-06 11:45:12 +0200336 spin_unlock_irqrestore(&ctx->lock, flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100337}
338
339/*
340 * Disable a counter.
341 */
342static void perf_counter_disable(struct perf_counter *counter)
343{
344 struct perf_counter_context *ctx = counter->ctx;
345 struct task_struct *task = ctx->task;
346
347 if (!task) {
348 /*
349 * Disable the counter on the cpu that it's on
350 */
351 smp_call_function_single(counter->cpu, __perf_counter_disable,
352 counter, 1);
353 return;
354 }
355
356 retry:
357 task_oncpu_function_call(task, __perf_counter_disable, counter);
358
359 spin_lock_irq(&ctx->lock);
360 /*
361 * If the counter is still active, we need to retry the cross-call.
362 */
363 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
364 spin_unlock_irq(&ctx->lock);
365 goto retry;
366 }
367
368 /*
369 * Since we have the lock this context can't be scheduled
370 * in, so we can change the state safely.
371 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100372 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
373 update_counter_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100374 counter->state = PERF_COUNTER_STATE_OFF;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100375 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100376
377 spin_unlock_irq(&ctx->lock);
378}
379
380/*
381 * Disable a counter and all its children.
382 */
383static void perf_counter_disable_family(struct perf_counter *counter)
384{
385 struct perf_counter *child;
386
387 perf_counter_disable(counter);
388
389 /*
390 * Lock the mutex to protect the list of children
391 */
392 mutex_lock(&counter->mutex);
393 list_for_each_entry(child, &counter->child_list, child_list)
394 perf_counter_disable(child);
395 mutex_unlock(&counter->mutex);
396}
397
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100398static int
399counter_sched_in(struct perf_counter *counter,
400 struct perf_cpu_context *cpuctx,
401 struct perf_counter_context *ctx,
402 int cpu)
403{
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100404 if (counter->state <= PERF_COUNTER_STATE_OFF)
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100405 return 0;
406
407 counter->state = PERF_COUNTER_STATE_ACTIVE;
408 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
409 /*
410 * The new state must be visible before we turn it on in the hardware:
411 */
412 smp_wmb();
413
414 if (counter->hw_ops->enable(counter)) {
415 counter->state = PERF_COUNTER_STATE_INACTIVE;
416 counter->oncpu = -1;
417 return -EAGAIN;
418 }
419
Peter Zijlstra4af49982009-04-06 11:45:10 +0200420 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100421
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100422 if (!is_software_counter(counter))
423 cpuctx->active_oncpu++;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100424 ctx->nr_active++;
425
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100426 if (counter->hw_event.exclusive)
427 cpuctx->exclusive = 1;
428
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100429 return 0;
430}
431
Thomas Gleixner0793a612008-12-04 20:12:29 +0100432/*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100433 * Return 1 for a group consisting entirely of software counters,
434 * 0 if the group contains any hardware counters.
435 */
436static int is_software_only_group(struct perf_counter *leader)
437{
438 struct perf_counter *counter;
439
440 if (!is_software_counter(leader))
441 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100442
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100443 list_for_each_entry(counter, &leader->sibling_list, list_entry)
444 if (!is_software_counter(counter))
445 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100446
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100447 return 1;
448}
449
450/*
451 * Work out whether we can put this counter group on the CPU now.
452 */
453static int group_can_go_on(struct perf_counter *counter,
454 struct perf_cpu_context *cpuctx,
455 int can_add_hw)
456{
457 /*
458 * Groups consisting entirely of software counters can always go on.
459 */
460 if (is_software_only_group(counter))
461 return 1;
462 /*
463 * If an exclusive group is already on, no other hardware
464 * counters can go on.
465 */
466 if (cpuctx->exclusive)
467 return 0;
468 /*
469 * If this group is exclusive and there are already
470 * counters on the CPU, it can't go on.
471 */
472 if (counter->hw_event.exclusive && cpuctx->active_oncpu)
473 return 0;
474 /*
475 * Otherwise, try to add it if all previous groups were able
476 * to go on.
477 */
478 return can_add_hw;
479}
480
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100481static void add_counter_to_ctx(struct perf_counter *counter,
482 struct perf_counter_context *ctx)
483{
484 list_add_counter(counter, ctx);
485 ctx->nr_counters++;
486 counter->prev_state = PERF_COUNTER_STATE_OFF;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200487 counter->tstamp_enabled = ctx->time;
488 counter->tstamp_running = ctx->time;
489 counter->tstamp_stopped = ctx->time;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100490}
491
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100492/*
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100493 * Cross CPU call to install and enable a performance counter
Thomas Gleixner0793a612008-12-04 20:12:29 +0100494 */
495static void __perf_install_in_context(void *info)
496{
497 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
498 struct perf_counter *counter = info;
499 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100500 struct perf_counter *leader = counter->group_leader;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100501 int cpu = smp_processor_id();
Ingo Molnar9b51f662008-12-12 13:49:45 +0100502 unsigned long flags;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100503 u64 perf_flags;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100504 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100505
506 /*
507 * If this is a task context, we need to check whether it is
508 * the current task context of this cpu. If not it has been
509 * scheduled out before the smp call arrived.
510 */
511 if (ctx->task && cpuctx->task_ctx != ctx)
512 return;
513
Peter Zijlstra849691a2009-04-06 11:45:12 +0200514 spin_lock_irqsave(&ctx->lock, flags);
Peter Zijlstra4af49982009-04-06 11:45:10 +0200515 update_context_time(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100516
517 /*
518 * Protect the list operation against NMI by disabling the
519 * counters on a global level. NOP for non NMI based counters.
520 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100521 perf_flags = hw_perf_save_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100522
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100523 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100524
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100525 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100526 * Don't put the counter on if it is disabled or if
527 * it is in a group and the group isn't on.
528 */
529 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
530 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
531 goto unlock;
532
533 /*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100534 * An exclusive counter can't go on if there are already active
535 * hardware counters, and no hardware counter can go on if there
536 * is already an exclusive counter on.
537 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100538 if (!group_can_go_on(counter, cpuctx, 1))
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100539 err = -EEXIST;
540 else
541 err = counter_sched_in(counter, cpuctx, ctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100542
Paul Mackerrasd859e292009-01-17 18:10:22 +1100543 if (err) {
544 /*
545 * This counter couldn't go on. If it is in a group
546 * then we have to pull the whole group off.
547 * If the counter group is pinned then put it in error state.
548 */
549 if (leader != counter)
550 group_sched_out(leader, cpuctx, ctx);
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100551 if (leader->hw_event.pinned) {
552 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100553 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100554 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100555 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100556
557 if (!err && !ctx->task && cpuctx->max_pertask)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100558 cpuctx->max_pertask--;
559
Paul Mackerrasd859e292009-01-17 18:10:22 +1100560 unlock:
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100561 hw_perf_restore(perf_flags);
562
Peter Zijlstra849691a2009-04-06 11:45:12 +0200563 spin_unlock_irqrestore(&ctx->lock, flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100564}
565
566/*
567 * Attach a performance counter to a context
568 *
569 * First we add the counter to the list with the hardware enable bit
570 * in counter->hw_config cleared.
571 *
572 * If the counter is attached to a task which is on a CPU we use a smp
573 * call to enable it in the task context. The task might have been
574 * scheduled away, but we check this in the smp call again.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100575 *
576 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100577 */
578static void
579perf_install_in_context(struct perf_counter_context *ctx,
580 struct perf_counter *counter,
581 int cpu)
582{
583 struct task_struct *task = ctx->task;
584
Thomas Gleixner0793a612008-12-04 20:12:29 +0100585 if (!task) {
586 /*
587 * Per cpu counters are installed via an smp call and
588 * the install is always sucessful.
589 */
590 smp_call_function_single(cpu, __perf_install_in_context,
591 counter, 1);
592 return;
593 }
594
595 counter->task = task;
596retry:
597 task_oncpu_function_call(task, __perf_install_in_context,
598 counter);
599
600 spin_lock_irq(&ctx->lock);
601 /*
Thomas Gleixner0793a612008-12-04 20:12:29 +0100602 * we need to retry the smp call.
603 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100604 if (ctx->is_active && list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100605 spin_unlock_irq(&ctx->lock);
606 goto retry;
607 }
608
609 /*
610 * The lock prevents that this context is scheduled in so we
611 * can add the counter safely, if it the call above did not
612 * succeed.
613 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100614 if (list_empty(&counter->list_entry))
615 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100616 spin_unlock_irq(&ctx->lock);
617}
618
Paul Mackerrasd859e292009-01-17 18:10:22 +1100619/*
620 * Cross CPU call to enable a performance counter
621 */
622static void __perf_counter_enable(void *info)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100623{
Paul Mackerrasd859e292009-01-17 18:10:22 +1100624 struct perf_counter *counter = info;
625 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
626 struct perf_counter_context *ctx = counter->ctx;
627 struct perf_counter *leader = counter->group_leader;
628 unsigned long flags;
629 int err;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100630
631 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100632 * If this is a per-task counter, need to check whether this
633 * counter's task is the current task on this cpu.
Ingo Molnar04289bb2008-12-11 08:38:42 +0100634 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100635 if (ctx->task && cpuctx->task_ctx != ctx)
636 return;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100637
Peter Zijlstra849691a2009-04-06 11:45:12 +0200638 spin_lock_irqsave(&ctx->lock, flags);
Peter Zijlstra4af49982009-04-06 11:45:10 +0200639 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100640
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100641 counter->prev_state = counter->state;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100642 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
643 goto unlock;
644 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200645 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100646
647 /*
648 * If the counter is in a group and isn't the group leader,
649 * then don't put it on unless the group is on.
650 */
651 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
652 goto unlock;
653
654 if (!group_can_go_on(counter, cpuctx, 1))
655 err = -EEXIST;
656 else
657 err = counter_sched_in(counter, cpuctx, ctx,
658 smp_processor_id());
659
660 if (err) {
661 /*
662 * If this counter can't go on and it's part of a
663 * group, then the whole group has to come off.
664 */
665 if (leader != counter)
666 group_sched_out(leader, cpuctx, ctx);
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100667 if (leader->hw_event.pinned) {
668 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100669 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100670 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100671 }
672
673 unlock:
Peter Zijlstra849691a2009-04-06 11:45:12 +0200674 spin_unlock_irqrestore(&ctx->lock, flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100675}
676
677/*
678 * Enable a counter.
679 */
680static void perf_counter_enable(struct perf_counter *counter)
681{
682 struct perf_counter_context *ctx = counter->ctx;
683 struct task_struct *task = ctx->task;
684
685 if (!task) {
686 /*
687 * Enable the counter on the cpu that it's on
688 */
689 smp_call_function_single(counter->cpu, __perf_counter_enable,
690 counter, 1);
691 return;
692 }
693
694 spin_lock_irq(&ctx->lock);
695 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
696 goto out;
697
698 /*
699 * If the counter is in error state, clear that first.
700 * That way, if we see the counter in error state below, we
701 * know that it has gone back into error state, as distinct
702 * from the task having been scheduled away before the
703 * cross-call arrived.
704 */
705 if (counter->state == PERF_COUNTER_STATE_ERROR)
706 counter->state = PERF_COUNTER_STATE_OFF;
707
708 retry:
709 spin_unlock_irq(&ctx->lock);
710 task_oncpu_function_call(task, __perf_counter_enable, counter);
711
712 spin_lock_irq(&ctx->lock);
713
714 /*
715 * If the context is active and the counter is still off,
716 * we need to retry the cross-call.
717 */
718 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
719 goto retry;
720
721 /*
722 * Since we have the lock this context can't be scheduled
723 * in, so we can change the state safely.
724 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100725 if (counter->state == PERF_COUNTER_STATE_OFF) {
Paul Mackerrasd859e292009-01-17 18:10:22 +1100726 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200727 counter->tstamp_enabled =
728 ctx->time - counter->total_time_enabled;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100729 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100730 out:
731 spin_unlock_irq(&ctx->lock);
732}
733
Peter Zijlstra79f14642009-04-06 11:45:07 +0200734static void perf_counter_refresh(struct perf_counter *counter, int refresh)
735{
736 atomic_add(refresh, &counter->event_limit);
737 perf_counter_enable(counter);
738}
739
Paul Mackerrasd859e292009-01-17 18:10:22 +1100740/*
741 * Enable a counter and all its children.
742 */
743static void perf_counter_enable_family(struct perf_counter *counter)
744{
745 struct perf_counter *child;
746
747 perf_counter_enable(counter);
748
749 /*
750 * Lock the mutex to protect the list of children
751 */
752 mutex_lock(&counter->mutex);
753 list_for_each_entry(child, &counter->child_list, child_list)
754 perf_counter_enable(child);
755 mutex_unlock(&counter->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100756}
757
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100758void __perf_counter_sched_out(struct perf_counter_context *ctx,
759 struct perf_cpu_context *cpuctx)
760{
761 struct perf_counter *counter;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100762 u64 flags;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100763
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100764 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100765 ctx->is_active = 0;
766 if (likely(!ctx->nr_counters))
767 goto out;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200768 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100769
Paul Mackerras3cbed422009-01-09 16:43:42 +1100770 flags = hw_perf_save_disable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100771 if (ctx->nr_active) {
772 list_for_each_entry(counter, &ctx->counter_list, list_entry)
773 group_sched_out(counter, cpuctx, ctx);
774 }
Paul Mackerras3cbed422009-01-09 16:43:42 +1100775 hw_perf_restore(flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100776 out:
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100777 spin_unlock(&ctx->lock);
778}
779
Thomas Gleixner0793a612008-12-04 20:12:29 +0100780/*
781 * Called from scheduler to remove the counters of the current task,
782 * with interrupts disabled.
783 *
784 * We stop each counter and update the counter value in counter->count.
785 *
Ingo Molnar76715812008-12-17 14:20:28 +0100786 * This does not protect us against NMI, but disable()
Thomas Gleixner0793a612008-12-04 20:12:29 +0100787 * sets the disabled bit in the control field of counter _before_
788 * accessing the counter control register. If a NMI hits, then it will
789 * not restart the counter.
790 */
791void perf_counter_task_sched_out(struct task_struct *task, int cpu)
792{
793 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
794 struct perf_counter_context *ctx = &task->perf_counter_ctx;
Peter Zijlstra4a0deca2009-03-19 20:26:12 +0100795 struct pt_regs *regs;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100796
797 if (likely(!cpuctx->task_ctx))
798 return;
799
Peter Zijlstrabce379b2009-04-06 11:45:13 +0200800 update_context_time(ctx);
801
Peter Zijlstra4a0deca2009-03-19 20:26:12 +0100802 regs = task_pt_regs(task);
803 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100804 __perf_counter_sched_out(ctx, cpuctx);
805
Thomas Gleixner0793a612008-12-04 20:12:29 +0100806 cpuctx->task_ctx = NULL;
807}
808
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100809static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100810{
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100811 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100812}
813
Ingo Molnar79958882008-12-17 08:54:56 +0100814static int
Ingo Molnar04289bb2008-12-11 08:38:42 +0100815group_sched_in(struct perf_counter *group_counter,
816 struct perf_cpu_context *cpuctx,
817 struct perf_counter_context *ctx,
818 int cpu)
819{
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100820 struct perf_counter *counter, *partial_group;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100821 int ret;
822
823 if (group_counter->state == PERF_COUNTER_STATE_OFF)
824 return 0;
825
826 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
827 if (ret)
828 return ret < 0 ? ret : 0;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100829
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100830 group_counter->prev_state = group_counter->state;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100831 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
832 return -EAGAIN;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100833
834 /*
835 * Schedule in siblings as one group (if any):
836 */
Ingo Molnar79958882008-12-17 08:54:56 +0100837 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100838 counter->prev_state = counter->state;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100839 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
840 partial_group = counter;
841 goto group_error;
842 }
Ingo Molnar79958882008-12-17 08:54:56 +0100843 }
844
Paul Mackerras3cbed422009-01-09 16:43:42 +1100845 return 0;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100846
847group_error:
848 /*
849 * Groups can be scheduled in as one unit only, so undo any
850 * partial group before returning:
851 */
852 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
853 if (counter == partial_group)
854 break;
855 counter_sched_out(counter, cpuctx, ctx);
856 }
857 counter_sched_out(group_counter, cpuctx, ctx);
858
859 return -EAGAIN;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100860}
861
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100862static void
863__perf_counter_sched_in(struct perf_counter_context *ctx,
864 struct perf_cpu_context *cpuctx, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100865{
Thomas Gleixner0793a612008-12-04 20:12:29 +0100866 struct perf_counter *counter;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100867 u64 flags;
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +1100868 int can_add_hw = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100869
Thomas Gleixner0793a612008-12-04 20:12:29 +0100870 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100871 ctx->is_active = 1;
872 if (likely(!ctx->nr_counters))
873 goto out;
874
Peter Zijlstra4af49982009-04-06 11:45:10 +0200875 ctx->timestamp = perf_clock();
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100876
Paul Mackerras3cbed422009-01-09 16:43:42 +1100877 flags = hw_perf_save_disable();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100878
879 /*
880 * First go through the list and put on any pinned groups
881 * in order to give them the best chance of going on.
882 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100883 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100884 if (counter->state <= PERF_COUNTER_STATE_OFF ||
885 !counter->hw_event.pinned)
886 continue;
887 if (counter->cpu != -1 && counter->cpu != cpu)
888 continue;
889
890 if (group_can_go_on(counter, cpuctx, 1))
891 group_sched_in(counter, cpuctx, ctx, cpu);
892
893 /*
894 * If this pinned group hasn't been scheduled,
895 * put it in error state.
896 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100897 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
898 update_group_times(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100899 counter->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100900 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100901 }
902
903 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
904 /*
905 * Ignore counters in OFF or ERROR state, and
906 * ignore pinned counters since we did them already.
907 */
908 if (counter->state <= PERF_COUNTER_STATE_OFF ||
909 counter->hw_event.pinned)
910 continue;
911
Ingo Molnar04289bb2008-12-11 08:38:42 +0100912 /*
913 * Listen to the 'cpu' scheduling filter constraint
914 * of counters:
915 */
Thomas Gleixner0793a612008-12-04 20:12:29 +0100916 if (counter->cpu != -1 && counter->cpu != cpu)
917 continue;
918
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100919 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +1100920 if (group_sched_in(counter, cpuctx, ctx, cpu))
921 can_add_hw = 0;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100922 }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100923 }
Paul Mackerras3cbed422009-01-09 16:43:42 +1100924 hw_perf_restore(flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100925 out:
Thomas Gleixner0793a612008-12-04 20:12:29 +0100926 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100927}
Ingo Molnar04289bb2008-12-11 08:38:42 +0100928
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100929/*
930 * Called from scheduler to add the counters of the current task
931 * with interrupts disabled.
932 *
933 * We restore the counter value and then enable it.
934 *
935 * This does not protect us against NMI, but enable()
936 * sets the enabled bit in the control field of counter _before_
937 * accessing the counter control register. If a NMI hits, then it will
938 * keep the counter running.
939 */
940void perf_counter_task_sched_in(struct task_struct *task, int cpu)
941{
942 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
943 struct perf_counter_context *ctx = &task->perf_counter_ctx;
944
945 __perf_counter_sched_in(ctx, cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100946 cpuctx->task_ctx = ctx;
947}
948
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100949static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
950{
951 struct perf_counter_context *ctx = &cpuctx->ctx;
952
953 __perf_counter_sched_in(ctx, cpuctx, cpu);
954}
955
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100956int perf_counter_task_disable(void)
957{
958 struct task_struct *curr = current;
959 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
960 struct perf_counter *counter;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100961 unsigned long flags;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100962 u64 perf_flags;
963 int cpu;
964
965 if (likely(!ctx->nr_counters))
966 return 0;
967
Peter Zijlstra849691a2009-04-06 11:45:12 +0200968 local_irq_save(flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100969 cpu = smp_processor_id();
970
971 perf_counter_task_sched_out(curr, cpu);
972
973 spin_lock(&ctx->lock);
974
975 /*
976 * Disable all the counters:
977 */
978 perf_flags = hw_perf_save_disable();
979
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100980 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100981 if (counter->state != PERF_COUNTER_STATE_ERROR) {
982 update_group_times(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100983 counter->state = PERF_COUNTER_STATE_OFF;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100984 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100985 }
Ingo Molnar9b51f662008-12-12 13:49:45 +0100986
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100987 hw_perf_restore(perf_flags);
988
Peter Zijlstra849691a2009-04-06 11:45:12 +0200989 spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100990
991 return 0;
992}
993
994int perf_counter_task_enable(void)
995{
996 struct task_struct *curr = current;
997 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
998 struct perf_counter *counter;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100999 unsigned long flags;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +01001000 u64 perf_flags;
1001 int cpu;
1002
1003 if (likely(!ctx->nr_counters))
1004 return 0;
1005
Peter Zijlstra849691a2009-04-06 11:45:12 +02001006 local_irq_save(flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +01001007 cpu = smp_processor_id();
1008
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001009 perf_counter_task_sched_out(curr, cpu);
1010
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +01001011 spin_lock(&ctx->lock);
1012
1013 /*
1014 * Disable all the counters:
1015 */
1016 perf_flags = hw_perf_save_disable();
1017
1018 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001019 if (counter->state > PERF_COUNTER_STATE_OFF)
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +01001020 continue;
Ingo Molnar6a930702008-12-11 15:17:03 +01001021 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +02001022 counter->tstamp_enabled =
1023 ctx->time - counter->total_time_enabled;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001024 counter->hw_event.disabled = 0;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +01001025 }
1026 hw_perf_restore(perf_flags);
1027
1028 spin_unlock(&ctx->lock);
1029
1030 perf_counter_task_sched_in(curr, cpu);
1031
Peter Zijlstra849691a2009-04-06 11:45:12 +02001032 local_irq_restore(flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +01001033
1034 return 0;
1035}
1036
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001037/*
1038 * Round-robin a context's counters:
1039 */
1040static void rotate_ctx(struct perf_counter_context *ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001041{
Thomas Gleixner0793a612008-12-04 20:12:29 +01001042 struct perf_counter *counter;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001043 u64 perf_flags;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001044
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001045 if (!ctx->nr_counters)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001046 return;
1047
Thomas Gleixner0793a612008-12-04 20:12:29 +01001048 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001049 /*
Ingo Molnar04289bb2008-12-11 08:38:42 +01001050 * Rotate the first entry last (works just fine for group counters too):
Thomas Gleixner0793a612008-12-04 20:12:29 +01001051 */
Ingo Molnar01b28382008-12-11 13:45:51 +01001052 perf_flags = hw_perf_save_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +01001053 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Peter Zijlstra75564232009-03-13 12:21:29 +01001054 list_move_tail(&counter->list_entry, &ctx->counter_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001055 break;
1056 }
Ingo Molnar01b28382008-12-11 13:45:51 +01001057 hw_perf_restore(perf_flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001058
1059 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001060}
Thomas Gleixner0793a612008-12-04 20:12:29 +01001061
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001062void perf_counter_task_tick(struct task_struct *curr, int cpu)
1063{
1064 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1065 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
1066 const int rotate_percpu = 0;
1067
1068 if (rotate_percpu)
1069 perf_counter_cpu_sched_out(cpuctx);
1070 perf_counter_task_sched_out(curr, cpu);
1071
1072 if (rotate_percpu)
1073 rotate_ctx(&cpuctx->ctx);
1074 rotate_ctx(ctx);
1075
1076 if (rotate_percpu)
1077 perf_counter_cpu_sched_in(cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001078 perf_counter_task_sched_in(curr, cpu);
1079}
1080
1081/*
Thomas Gleixner0793a612008-12-04 20:12:29 +01001082 * Cross CPU call to read the hardware counter
1083 */
Ingo Molnar76715812008-12-17 14:20:28 +01001084static void __read(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001085{
Ingo Molnar621a01e2008-12-11 12:46:46 +01001086 struct perf_counter *counter = info;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001087 struct perf_counter_context *ctx = counter->ctx;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001088 unsigned long flags;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001089
Peter Zijlstra849691a2009-04-06 11:45:12 +02001090 local_irq_save(flags);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001091 if (ctx->is_active)
Peter Zijlstra4af49982009-04-06 11:45:10 +02001092 update_context_time(ctx);
Ingo Molnar76715812008-12-17 14:20:28 +01001093 counter->hw_ops->read(counter);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001094 update_counter_times(counter);
Peter Zijlstra849691a2009-04-06 11:45:12 +02001095 local_irq_restore(flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001096}
1097
Ingo Molnar04289bb2008-12-11 08:38:42 +01001098static u64 perf_counter_read(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001099{
1100 /*
1101 * If counter is enabled and currently active on a CPU, update the
1102 * value in the counter structure:
1103 */
Ingo Molnar6a930702008-12-11 15:17:03 +01001104 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001105 smp_call_function_single(counter->oncpu,
Ingo Molnar76715812008-12-17 14:20:28 +01001106 __read, counter, 1);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001107 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1108 update_counter_times(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001109 }
1110
Ingo Molnaree060942008-12-13 09:00:03 +01001111 return atomic64_read(&counter->count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001112}
1113
Thomas Gleixner0793a612008-12-04 20:12:29 +01001114static void put_context(struct perf_counter_context *ctx)
1115{
1116 if (ctx->task)
1117 put_task_struct(ctx->task);
1118}
1119
1120static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1121{
1122 struct perf_cpu_context *cpuctx;
1123 struct perf_counter_context *ctx;
1124 struct task_struct *task;
1125
1126 /*
1127 * If cpu is not a wildcard then this is a percpu counter:
1128 */
1129 if (cpu != -1) {
1130 /* Must be root to operate on a CPU counter: */
1131 if (!capable(CAP_SYS_ADMIN))
1132 return ERR_PTR(-EACCES);
1133
1134 if (cpu < 0 || cpu > num_possible_cpus())
1135 return ERR_PTR(-EINVAL);
1136
1137 /*
1138 * We could be clever and allow to attach a counter to an
1139 * offline CPU and activate it when the CPU comes up, but
1140 * that's for later.
1141 */
1142 if (!cpu_isset(cpu, cpu_online_map))
1143 return ERR_PTR(-ENODEV);
1144
1145 cpuctx = &per_cpu(perf_cpu_context, cpu);
1146 ctx = &cpuctx->ctx;
1147
Thomas Gleixner0793a612008-12-04 20:12:29 +01001148 return ctx;
1149 }
1150
1151 rcu_read_lock();
1152 if (!pid)
1153 task = current;
1154 else
1155 task = find_task_by_vpid(pid);
1156 if (task)
1157 get_task_struct(task);
1158 rcu_read_unlock();
1159
1160 if (!task)
1161 return ERR_PTR(-ESRCH);
1162
1163 ctx = &task->perf_counter_ctx;
1164 ctx->task = task;
1165
1166 /* Reuse ptrace permission checks for now. */
1167 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1168 put_context(ctx);
1169 return ERR_PTR(-EACCES);
1170 }
1171
1172 return ctx;
1173}
1174
Peter Zijlstra592903c2009-03-13 12:21:36 +01001175static void free_counter_rcu(struct rcu_head *head)
1176{
1177 struct perf_counter *counter;
1178
1179 counter = container_of(head, struct perf_counter, rcu_head);
1180 kfree(counter);
1181}
1182
Peter Zijlstra925d5192009-03-30 19:07:02 +02001183static void perf_pending_sync(struct perf_counter *counter);
1184
Peter Zijlstraf1600952009-03-19 20:26:16 +01001185static void free_counter(struct perf_counter *counter)
1186{
Peter Zijlstra925d5192009-03-30 19:07:02 +02001187 perf_pending_sync(counter);
1188
Peter Zijlstrae077df42009-03-19 20:26:17 +01001189 if (counter->destroy)
1190 counter->destroy(counter);
1191
Peter Zijlstraf1600952009-03-19 20:26:16 +01001192 call_rcu(&counter->rcu_head, free_counter_rcu);
1193}
1194
Thomas Gleixner0793a612008-12-04 20:12:29 +01001195/*
1196 * Called when the last reference to the file is gone.
1197 */
1198static int perf_release(struct inode *inode, struct file *file)
1199{
1200 struct perf_counter *counter = file->private_data;
1201 struct perf_counter_context *ctx = counter->ctx;
1202
1203 file->private_data = NULL;
1204
Paul Mackerrasd859e292009-01-17 18:10:22 +11001205 mutex_lock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001206 mutex_lock(&counter->mutex);
1207
Ingo Molnar04289bb2008-12-11 08:38:42 +01001208 perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001209
1210 mutex_unlock(&counter->mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001211 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001212
Peter Zijlstraf1600952009-03-19 20:26:16 +01001213 free_counter(counter);
Mike Galbraith5af75912009-02-11 10:53:37 +01001214 put_context(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001215
1216 return 0;
1217}
1218
1219/*
1220 * Read the performance counter - simple non blocking version for now
1221 */
1222static ssize_t
1223perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1224{
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001225 u64 values[3];
1226 int n;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001227
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001228 /*
1229 * Return end-of-file for a read on a counter that is in
1230 * error state (i.e. because it was pinned but it couldn't be
1231 * scheduled on to the CPU at some point).
1232 */
1233 if (counter->state == PERF_COUNTER_STATE_ERROR)
1234 return 0;
1235
Thomas Gleixner0793a612008-12-04 20:12:29 +01001236 mutex_lock(&counter->mutex);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001237 values[0] = perf_counter_read(counter);
1238 n = 1;
1239 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1240 values[n++] = counter->total_time_enabled +
1241 atomic64_read(&counter->child_total_time_enabled);
1242 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1243 values[n++] = counter->total_time_running +
1244 atomic64_read(&counter->child_total_time_running);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001245 mutex_unlock(&counter->mutex);
1246
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001247 if (count < n * sizeof(u64))
1248 return -EINVAL;
1249 count = n * sizeof(u64);
1250
1251 if (copy_to_user(buf, values, count))
1252 return -EFAULT;
1253
1254 return count;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001255}
1256
1257static ssize_t
Thomas Gleixner0793a612008-12-04 20:12:29 +01001258perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1259{
1260 struct perf_counter *counter = file->private_data;
1261
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001262 return perf_read_hw(counter, buf, count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001263}
1264
1265static unsigned int perf_poll(struct file *file, poll_table *wait)
1266{
1267 struct perf_counter *counter = file->private_data;
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001268 struct perf_mmap_data *data;
1269 unsigned int events;
1270
1271 rcu_read_lock();
1272 data = rcu_dereference(counter->data);
1273 if (data)
1274 events = atomic_xchg(&data->wakeup, 0);
1275 else
1276 events = POLL_HUP;
1277 rcu_read_unlock();
Thomas Gleixner0793a612008-12-04 20:12:29 +01001278
1279 poll_wait(file, &counter->waitq, wait);
1280
Thomas Gleixner0793a612008-12-04 20:12:29 +01001281 return events;
1282}
1283
Paul Mackerrasd859e292009-01-17 18:10:22 +11001284static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1285{
1286 struct perf_counter *counter = file->private_data;
1287 int err = 0;
1288
1289 switch (cmd) {
1290 case PERF_COUNTER_IOC_ENABLE:
1291 perf_counter_enable_family(counter);
1292 break;
1293 case PERF_COUNTER_IOC_DISABLE:
1294 perf_counter_disable_family(counter);
1295 break;
Peter Zijlstra79f14642009-04-06 11:45:07 +02001296 case PERF_COUNTER_IOC_REFRESH:
1297 perf_counter_refresh(counter, arg);
1298 break;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001299 default:
1300 err = -ENOTTY;
1301 }
1302 return err;
1303}
1304
Peter Zijlstra38ff6672009-03-30 19:07:03 +02001305/*
1306 * Callers need to ensure there can be no nesting of this function, otherwise
1307 * the seqlock logic goes bad. We can not serialize this because the arch
1308 * code calls this from NMI context.
1309 */
1310void perf_counter_update_userpage(struct perf_counter *counter)
Paul Mackerras37d81822009-03-23 18:22:08 +01001311{
Peter Zijlstra38ff6672009-03-30 19:07:03 +02001312 struct perf_mmap_data *data;
1313 struct perf_counter_mmap_page *userpg;
1314
1315 rcu_read_lock();
1316 data = rcu_dereference(counter->data);
1317 if (!data)
1318 goto unlock;
1319
1320 userpg = data->user_page;
Paul Mackerras37d81822009-03-23 18:22:08 +01001321
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001322 /*
1323 * Disable preemption so as to not let the corresponding user-space
1324 * spin too long if we get preempted.
1325 */
1326 preempt_disable();
Paul Mackerras37d81822009-03-23 18:22:08 +01001327 ++userpg->lock;
Peter Zijlstra92f22a32009-04-02 11:12:04 +02001328 barrier();
Paul Mackerras37d81822009-03-23 18:22:08 +01001329 userpg->index = counter->hw.idx;
1330 userpg->offset = atomic64_read(&counter->count);
1331 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1332 userpg->offset -= atomic64_read(&counter->hw.prev_count);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001333
Peter Zijlstra92f22a32009-04-02 11:12:04 +02001334 barrier();
Paul Mackerras37d81822009-03-23 18:22:08 +01001335 ++userpg->lock;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001336 preempt_enable();
Peter Zijlstra38ff6672009-03-30 19:07:03 +02001337unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001338 rcu_read_unlock();
Paul Mackerras37d81822009-03-23 18:22:08 +01001339}
1340
1341static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1342{
1343 struct perf_counter *counter = vma->vm_file->private_data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001344 struct perf_mmap_data *data;
1345 int ret = VM_FAULT_SIGBUS;
Paul Mackerras37d81822009-03-23 18:22:08 +01001346
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001347 rcu_read_lock();
1348 data = rcu_dereference(counter->data);
1349 if (!data)
1350 goto unlock;
Paul Mackerras37d81822009-03-23 18:22:08 +01001351
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001352 if (vmf->pgoff == 0) {
1353 vmf->page = virt_to_page(data->user_page);
1354 } else {
1355 int nr = vmf->pgoff - 1;
1356
1357 if ((unsigned)nr > data->nr_pages)
1358 goto unlock;
1359
1360 vmf->page = virt_to_page(data->data_pages[nr]);
1361 }
Paul Mackerras37d81822009-03-23 18:22:08 +01001362 get_page(vmf->page);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001363 ret = 0;
1364unlock:
1365 rcu_read_unlock();
1366
1367 return ret;
1368}
1369
1370static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1371{
1372 struct perf_mmap_data *data;
1373 unsigned long size;
1374 int i;
1375
1376 WARN_ON(atomic_read(&counter->mmap_count));
1377
1378 size = sizeof(struct perf_mmap_data);
1379 size += nr_pages * sizeof(void *);
1380
1381 data = kzalloc(size, GFP_KERNEL);
1382 if (!data)
1383 goto fail;
1384
1385 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1386 if (!data->user_page)
1387 goto fail_user_page;
1388
1389 for (i = 0; i < nr_pages; i++) {
1390 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1391 if (!data->data_pages[i])
1392 goto fail_data_pages;
1393 }
1394
1395 data->nr_pages = nr_pages;
1396
1397 rcu_assign_pointer(counter->data, data);
1398
Paul Mackerras37d81822009-03-23 18:22:08 +01001399 return 0;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001400
1401fail_data_pages:
1402 for (i--; i >= 0; i--)
1403 free_page((unsigned long)data->data_pages[i]);
1404
1405 free_page((unsigned long)data->user_page);
1406
1407fail_user_page:
1408 kfree(data);
1409
1410fail:
1411 return -ENOMEM;
1412}
1413
1414static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1415{
1416 struct perf_mmap_data *data = container_of(rcu_head,
1417 struct perf_mmap_data, rcu_head);
1418 int i;
1419
1420 free_page((unsigned long)data->user_page);
1421 for (i = 0; i < data->nr_pages; i++)
1422 free_page((unsigned long)data->data_pages[i]);
1423 kfree(data);
1424}
1425
1426static void perf_mmap_data_free(struct perf_counter *counter)
1427{
1428 struct perf_mmap_data *data = counter->data;
1429
1430 WARN_ON(atomic_read(&counter->mmap_count));
1431
1432 rcu_assign_pointer(counter->data, NULL);
1433 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1434}
1435
1436static void perf_mmap_open(struct vm_area_struct *vma)
1437{
1438 struct perf_counter *counter = vma->vm_file->private_data;
1439
1440 atomic_inc(&counter->mmap_count);
1441}
1442
1443static void perf_mmap_close(struct vm_area_struct *vma)
1444{
1445 struct perf_counter *counter = vma->vm_file->private_data;
1446
1447 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1448 &counter->mmap_mutex)) {
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001449 vma->vm_mm->locked_vm -= counter->data->nr_pages + 1;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001450 perf_mmap_data_free(counter);
1451 mutex_unlock(&counter->mmap_mutex);
1452 }
Paul Mackerras37d81822009-03-23 18:22:08 +01001453}
1454
1455static struct vm_operations_struct perf_mmap_vmops = {
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001456 .open = perf_mmap_open,
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001457 .close = perf_mmap_close,
Paul Mackerras37d81822009-03-23 18:22:08 +01001458 .fault = perf_mmap_fault,
1459};
1460
1461static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1462{
1463 struct perf_counter *counter = file->private_data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001464 unsigned long vma_size;
1465 unsigned long nr_pages;
1466 unsigned long locked, lock_limit;
1467 int ret = 0;
Paul Mackerras37d81822009-03-23 18:22:08 +01001468
1469 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1470 return -EINVAL;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001471
1472 vma_size = vma->vm_end - vma->vm_start;
1473 nr_pages = (vma_size / PAGE_SIZE) - 1;
1474
Peter Zijlstra7730d862009-03-25 12:48:31 +01001475 /*
1476 * If we have data pages ensure they're a power-of-two number, so we
1477 * can do bitmasks instead of modulo.
1478 */
1479 if (nr_pages != 0 && !is_power_of_2(nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01001480 return -EINVAL;
1481
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001482 if (vma_size != PAGE_SIZE * (1 + nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01001483 return -EINVAL;
1484
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001485 if (vma->vm_pgoff != 0)
1486 return -EINVAL;
Paul Mackerras37d81822009-03-23 18:22:08 +01001487
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001488 mutex_lock(&counter->mmap_mutex);
1489 if (atomic_inc_not_zero(&counter->mmap_count)) {
1490 if (nr_pages != counter->data->nr_pages)
1491 ret = -EINVAL;
1492 goto unlock;
1493 }
1494
1495 locked = vma->vm_mm->locked_vm;
1496 locked += nr_pages + 1;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001497
1498 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1499 lock_limit >>= PAGE_SHIFT;
1500
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001501 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1502 ret = -EPERM;
1503 goto unlock;
1504 }
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001505
1506 WARN_ON(counter->data);
1507 ret = perf_mmap_data_alloc(counter, nr_pages);
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001508 if (ret)
1509 goto unlock;
1510
1511 atomic_set(&counter->mmap_count, 1);
1512 vma->vm_mm->locked_vm += nr_pages + 1;
1513unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001514 mutex_unlock(&counter->mmap_mutex);
Paul Mackerras37d81822009-03-23 18:22:08 +01001515
1516 vma->vm_flags &= ~VM_MAYWRITE;
1517 vma->vm_flags |= VM_RESERVED;
1518 vma->vm_ops = &perf_mmap_vmops;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001519
1520 return ret;
Paul Mackerras37d81822009-03-23 18:22:08 +01001521}
1522
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02001523static int perf_fasync(int fd, struct file *filp, int on)
1524{
1525 struct perf_counter *counter = filp->private_data;
1526 struct inode *inode = filp->f_path.dentry->d_inode;
1527 int retval;
1528
1529 mutex_lock(&inode->i_mutex);
1530 retval = fasync_helper(fd, filp, on, &counter->fasync);
1531 mutex_unlock(&inode->i_mutex);
1532
1533 if (retval < 0)
1534 return retval;
1535
1536 return 0;
1537}
1538
Thomas Gleixner0793a612008-12-04 20:12:29 +01001539static const struct file_operations perf_fops = {
1540 .release = perf_release,
1541 .read = perf_read,
1542 .poll = perf_poll,
Paul Mackerrasd859e292009-01-17 18:10:22 +11001543 .unlocked_ioctl = perf_ioctl,
1544 .compat_ioctl = perf_ioctl,
Paul Mackerras37d81822009-03-23 18:22:08 +01001545 .mmap = perf_mmap,
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02001546 .fasync = perf_fasync,
Thomas Gleixner0793a612008-12-04 20:12:29 +01001547};
1548
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001549/*
Peter Zijlstra925d5192009-03-30 19:07:02 +02001550 * Perf counter wakeup
1551 *
1552 * If there's data, ensure we set the poll() state and publish everything
1553 * to user-space before waking everybody up.
1554 */
1555
1556void perf_counter_wakeup(struct perf_counter *counter)
1557{
1558 struct perf_mmap_data *data;
1559
1560 rcu_read_lock();
1561 data = rcu_dereference(counter->data);
1562 if (data) {
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02001563 atomic_set(&data->wakeup, POLL_IN);
Peter Zijlstra38ff6672009-03-30 19:07:03 +02001564 /*
1565 * Ensure all data writes are issued before updating the
1566 * user-space data head information. The matching rmb()
1567 * will be in userspace after reading this value.
1568 */
1569 smp_wmb();
1570 data->user_page->data_head = atomic_read(&data->head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001571 }
1572 rcu_read_unlock();
1573
1574 wake_up_all(&counter->waitq);
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001575
1576 if (counter->pending_kill) {
1577 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1578 counter->pending_kill = 0;
1579 }
Peter Zijlstra925d5192009-03-30 19:07:02 +02001580}
1581
1582/*
1583 * Pending wakeups
1584 *
1585 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1586 *
1587 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1588 * single linked list and use cmpxchg() to add entries lockless.
1589 */
1590
Peter Zijlstra79f14642009-04-06 11:45:07 +02001591static void perf_pending_counter(struct perf_pending_entry *entry)
1592{
1593 struct perf_counter *counter = container_of(entry,
1594 struct perf_counter, pending);
1595
1596 if (counter->pending_disable) {
1597 counter->pending_disable = 0;
1598 perf_counter_disable(counter);
1599 }
1600
1601 if (counter->pending_wakeup) {
1602 counter->pending_wakeup = 0;
1603 perf_counter_wakeup(counter);
1604 }
1605}
1606
Peter Zijlstra671dec52009-04-06 11:45:02 +02001607#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02001608
Peter Zijlstra671dec52009-04-06 11:45:02 +02001609static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
Peter Zijlstra925d5192009-03-30 19:07:02 +02001610 PENDING_TAIL,
1611};
1612
Peter Zijlstra671dec52009-04-06 11:45:02 +02001613static void perf_pending_queue(struct perf_pending_entry *entry,
1614 void (*func)(struct perf_pending_entry *))
Peter Zijlstra925d5192009-03-30 19:07:02 +02001615{
Peter Zijlstra671dec52009-04-06 11:45:02 +02001616 struct perf_pending_entry **head;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001617
Peter Zijlstra671dec52009-04-06 11:45:02 +02001618 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02001619 return;
1620
Peter Zijlstra671dec52009-04-06 11:45:02 +02001621 entry->func = func;
1622
1623 head = &get_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001624
1625 do {
Peter Zijlstra671dec52009-04-06 11:45:02 +02001626 entry->next = *head;
1627 } while (cmpxchg(head, entry->next, entry) != entry->next);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001628
1629 set_perf_counter_pending();
1630
Peter Zijlstra671dec52009-04-06 11:45:02 +02001631 put_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001632}
1633
1634static int __perf_pending_run(void)
1635{
Peter Zijlstra671dec52009-04-06 11:45:02 +02001636 struct perf_pending_entry *list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001637 int nr = 0;
1638
Peter Zijlstra671dec52009-04-06 11:45:02 +02001639 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001640 while (list != PENDING_TAIL) {
Peter Zijlstra671dec52009-04-06 11:45:02 +02001641 void (*func)(struct perf_pending_entry *);
1642 struct perf_pending_entry *entry = list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001643
1644 list = list->next;
1645
Peter Zijlstra671dec52009-04-06 11:45:02 +02001646 func = entry->func;
1647 entry->next = NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001648 /*
1649 * Ensure we observe the unqueue before we issue the wakeup,
1650 * so that we won't be waiting forever.
1651 * -- see perf_not_pending().
1652 */
1653 smp_wmb();
1654
Peter Zijlstra671dec52009-04-06 11:45:02 +02001655 func(entry);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001656 nr++;
1657 }
1658
1659 return nr;
1660}
1661
1662static inline int perf_not_pending(struct perf_counter *counter)
1663{
1664 /*
1665 * If we flush on whatever cpu we run, there is a chance we don't
1666 * need to wait.
1667 */
1668 get_cpu();
1669 __perf_pending_run();
1670 put_cpu();
1671
1672 /*
1673 * Ensure we see the proper queue state before going to sleep
1674 * so that we do not miss the wakeup. -- see perf_pending_handle()
1675 */
1676 smp_rmb();
Peter Zijlstra671dec52009-04-06 11:45:02 +02001677 return counter->pending.next == NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001678}
1679
1680static void perf_pending_sync(struct perf_counter *counter)
1681{
1682 wait_event(counter->waitq, perf_not_pending(counter));
1683}
1684
1685void perf_counter_do_pending(void)
1686{
1687 __perf_pending_run();
1688}
1689
1690/*
Peter Zijlstra394ee072009-03-30 19:07:14 +02001691 * Callchain support -- arch specific
1692 */
1693
Peter Zijlstra9c03d882009-04-06 11:45:00 +02001694__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
Peter Zijlstra394ee072009-03-30 19:07:14 +02001695{
1696 return NULL;
1697}
1698
1699/*
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001700 * Output
1701 */
1702
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001703struct perf_output_handle {
1704 struct perf_counter *counter;
1705 struct perf_mmap_data *data;
1706 unsigned int offset;
Peter Zijlstra63e35b22009-03-25 12:30:24 +01001707 unsigned int head;
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001708 int wakeup;
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001709 int nmi;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001710 int overflow;
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001711};
1712
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001713static inline void __perf_output_wakeup(struct perf_output_handle *handle)
1714{
Peter Zijlstra671dec52009-04-06 11:45:02 +02001715 if (handle->nmi) {
Peter Zijlstra79f14642009-04-06 11:45:07 +02001716 handle->counter->pending_wakeup = 1;
Peter Zijlstra671dec52009-04-06 11:45:02 +02001717 perf_pending_queue(&handle->counter->pending,
Peter Zijlstra79f14642009-04-06 11:45:07 +02001718 perf_pending_counter);
Peter Zijlstra671dec52009-04-06 11:45:02 +02001719 } else
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001720 perf_counter_wakeup(handle->counter);
1721}
1722
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001723static int perf_output_begin(struct perf_output_handle *handle,
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001724 struct perf_counter *counter, unsigned int size,
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001725 int nmi, int overflow)
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001726{
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001727 struct perf_mmap_data *data;
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001728 unsigned int offset, head;
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001729
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001730 rcu_read_lock();
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001731 data = rcu_dereference(counter->data);
1732 if (!data)
1733 goto out;
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001734
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001735 handle->counter = counter;
1736 handle->nmi = nmi;
1737 handle->overflow = overflow;
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001738
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001739 if (!data->nr_pages)
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001740 goto fail;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001741
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001742 do {
1743 offset = head = atomic_read(&data->head);
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001744 head += size;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001745 } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1746
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001747 handle->data = data;
1748 handle->offset = offset;
Peter Zijlstra63e35b22009-03-25 12:30:24 +01001749 handle->head = head;
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001750 handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001751
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001752 return 0;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001753
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001754fail:
1755 __perf_output_wakeup(handle);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001756out:
1757 rcu_read_unlock();
1758
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001759 return -ENOSPC;
1760}
1761
1762static void perf_output_copy(struct perf_output_handle *handle,
1763 void *buf, unsigned int len)
1764{
1765 unsigned int pages_mask;
1766 unsigned int offset;
1767 unsigned int size;
1768 void **pages;
1769
1770 offset = handle->offset;
1771 pages_mask = handle->data->nr_pages - 1;
1772 pages = handle->data->data_pages;
1773
1774 do {
1775 unsigned int page_offset;
1776 int nr;
1777
1778 nr = (offset >> PAGE_SHIFT) & pages_mask;
1779 page_offset = offset & (PAGE_SIZE - 1);
1780 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
1781
1782 memcpy(pages[nr] + page_offset, buf, size);
1783
1784 len -= size;
1785 buf += size;
1786 offset += size;
1787 } while (len);
1788
1789 handle->offset = offset;
Peter Zijlstra63e35b22009-03-25 12:30:24 +01001790
1791 WARN_ON_ONCE(handle->offset > handle->head);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001792}
1793
Peter Zijlstra5c148192009-03-25 12:30:23 +01001794#define perf_output_put(handle, x) \
1795 perf_output_copy((handle), &(x), sizeof(x))
1796
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001797static void perf_output_end(struct perf_output_handle *handle)
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001798{
Peter Zijlstrac4578102009-04-02 11:12:01 +02001799 int wakeup_events = handle->counter->hw_event.wakeup_events;
1800
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001801 if (handle->overflow && wakeup_events) {
Peter Zijlstrac4578102009-04-02 11:12:01 +02001802 int events = atomic_inc_return(&handle->data->events);
1803 if (events >= wakeup_events) {
1804 atomic_sub(wakeup_events, &handle->data->events);
1805 __perf_output_wakeup(handle);
1806 }
1807 } else if (handle->wakeup)
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001808 __perf_output_wakeup(handle);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001809 rcu_read_unlock();
1810}
1811
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02001812static void perf_counter_output(struct perf_counter *counter,
1813 int nmi, struct pt_regs *regs)
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001814{
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001815 int ret;
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001816 u64 record_type = counter->hw_event.record_type;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001817 struct perf_output_handle handle;
1818 struct perf_event_header header;
1819 u64 ip;
Peter Zijlstra5c148192009-03-25 12:30:23 +01001820 struct {
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001821 u32 pid, tid;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001822 } tid_entry;
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001823 struct {
1824 u64 event;
1825 u64 counter;
1826 } group_entry;
Peter Zijlstra394ee072009-03-30 19:07:14 +02001827 struct perf_callchain_entry *callchain = NULL;
1828 int callchain_size = 0;
Peter Zijlstra339f7c92009-04-06 11:45:06 +02001829 u64 time;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001830
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001831 header.type = 0;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001832 header.size = sizeof(header);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001833
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001834 header.misc = PERF_EVENT_MISC_OVERFLOW;
1835 header.misc |= user_mode(regs) ?
Peter Zijlstra6fab0192009-04-08 15:01:26 +02001836 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
1837
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001838 if (record_type & PERF_RECORD_IP) {
1839 ip = instruction_pointer(regs);
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001840 header.type |= PERF_RECORD_IP;
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001841 header.size += sizeof(ip);
1842 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001843
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001844 if (record_type & PERF_RECORD_TID) {
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001845 /* namespace issues */
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001846 tid_entry.pid = current->group_leader->pid;
1847 tid_entry.tid = current->pid;
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001848
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001849 header.type |= PERF_RECORD_TID;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001850 header.size += sizeof(tid_entry);
1851 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001852
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001853 if (record_type & PERF_RECORD_GROUP) {
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001854 header.type |= PERF_RECORD_GROUP;
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001855 header.size += sizeof(u64) +
1856 counter->nr_siblings * sizeof(group_entry);
1857 }
1858
1859 if (record_type & PERF_RECORD_CALLCHAIN) {
Peter Zijlstra394ee072009-03-30 19:07:14 +02001860 callchain = perf_callchain(regs);
1861
1862 if (callchain) {
Peter Zijlstra9c03d882009-04-06 11:45:00 +02001863 callchain_size = (1 + callchain->nr) * sizeof(u64);
Peter Zijlstra394ee072009-03-30 19:07:14 +02001864
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001865 header.type |= PERF_RECORD_CALLCHAIN;
Peter Zijlstra394ee072009-03-30 19:07:14 +02001866 header.size += callchain_size;
1867 }
1868 }
1869
Peter Zijlstra339f7c92009-04-06 11:45:06 +02001870 if (record_type & PERF_RECORD_TIME) {
1871 /*
1872 * Maybe do better on x86 and provide cpu_clock_nmi()
1873 */
1874 time = sched_clock();
1875
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001876 header.type |= PERF_RECORD_TIME;
Peter Zijlstra339f7c92009-04-06 11:45:06 +02001877 header.size += sizeof(u64);
1878 }
1879
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001880 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001881 if (ret)
1882 return;
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001883
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001884 perf_output_put(&handle, header);
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001885
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001886 if (record_type & PERF_RECORD_IP)
1887 perf_output_put(&handle, ip);
1888
1889 if (record_type & PERF_RECORD_TID)
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001890 perf_output_put(&handle, tid_entry);
1891
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001892 if (record_type & PERF_RECORD_GROUP) {
1893 struct perf_counter *leader, *sub;
1894 u64 nr = counter->nr_siblings;
1895
1896 perf_output_put(&handle, nr);
1897
1898 leader = counter->group_leader;
1899 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1900 if (sub != counter)
1901 sub->hw_ops->read(sub);
1902
1903 group_entry.event = sub->hw_event.config;
1904 group_entry.counter = atomic64_read(&sub->count);
1905
1906 perf_output_put(&handle, group_entry);
1907 }
1908 }
1909
Peter Zijlstra394ee072009-03-30 19:07:14 +02001910 if (callchain)
1911 perf_output_copy(&handle, callchain, callchain_size);
1912
Peter Zijlstra339f7c92009-04-06 11:45:06 +02001913 if (record_type & PERF_RECORD_TIME)
1914 perf_output_put(&handle, time);
1915
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001916 perf_output_end(&handle);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001917}
1918
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001919/*
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02001920 * mmap tracking
1921 */
1922
1923struct perf_mmap_event {
1924 struct file *file;
1925 char *file_name;
1926 int file_size;
1927
1928 struct {
1929 struct perf_event_header header;
1930
1931 u32 pid;
1932 u32 tid;
1933 u64 start;
1934 u64 len;
1935 u64 pgoff;
1936 } event;
1937};
1938
1939static void perf_counter_mmap_output(struct perf_counter *counter,
1940 struct perf_mmap_event *mmap_event)
1941{
1942 struct perf_output_handle handle;
1943 int size = mmap_event->event.header.size;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001944 int ret = perf_output_begin(&handle, counter, size, 0, 0);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02001945
1946 if (ret)
1947 return;
1948
1949 perf_output_put(&handle, mmap_event->event);
1950 perf_output_copy(&handle, mmap_event->file_name,
1951 mmap_event->file_size);
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001952 perf_output_end(&handle);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02001953}
1954
1955static int perf_counter_mmap_match(struct perf_counter *counter,
1956 struct perf_mmap_event *mmap_event)
1957{
1958 if (counter->hw_event.mmap &&
1959 mmap_event->event.header.type == PERF_EVENT_MMAP)
1960 return 1;
1961
1962 if (counter->hw_event.munmap &&
1963 mmap_event->event.header.type == PERF_EVENT_MUNMAP)
1964 return 1;
1965
1966 return 0;
1967}
1968
1969static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
1970 struct perf_mmap_event *mmap_event)
1971{
1972 struct perf_counter *counter;
1973
1974 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
1975 return;
1976
1977 rcu_read_lock();
1978 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
1979 if (perf_counter_mmap_match(counter, mmap_event))
1980 perf_counter_mmap_output(counter, mmap_event);
1981 }
1982 rcu_read_unlock();
1983}
1984
1985static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
1986{
1987 struct perf_cpu_context *cpuctx;
1988 struct file *file = mmap_event->file;
1989 unsigned int size;
1990 char tmp[16];
1991 char *buf = NULL;
1992 char *name;
1993
1994 if (file) {
1995 buf = kzalloc(PATH_MAX, GFP_KERNEL);
1996 if (!buf) {
1997 name = strncpy(tmp, "//enomem", sizeof(tmp));
1998 goto got_name;
1999 }
2000 name = dentry_path(file->f_dentry, buf, PATH_MAX);
2001 if (IS_ERR(name)) {
2002 name = strncpy(tmp, "//toolong", sizeof(tmp));
2003 goto got_name;
2004 }
2005 } else {
2006 name = strncpy(tmp, "//anon", sizeof(tmp));
2007 goto got_name;
2008 }
2009
2010got_name:
2011 size = ALIGN(strlen(name), sizeof(u64));
2012
2013 mmap_event->file_name = name;
2014 mmap_event->file_size = size;
2015
2016 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2017
2018 cpuctx = &get_cpu_var(perf_cpu_context);
2019 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2020 put_cpu_var(perf_cpu_context);
2021
2022 perf_counter_mmap_ctx(&current->perf_counter_ctx, mmap_event);
2023
2024 kfree(buf);
2025}
2026
2027void perf_counter_mmap(unsigned long addr, unsigned long len,
2028 unsigned long pgoff, struct file *file)
2029{
2030 struct perf_mmap_event mmap_event = {
2031 .file = file,
2032 .event = {
2033 .header = { .type = PERF_EVENT_MMAP, },
2034 .pid = current->group_leader->pid,
2035 .tid = current->pid,
2036 .start = addr,
2037 .len = len,
2038 .pgoff = pgoff,
2039 },
2040 };
2041
2042 perf_counter_mmap_event(&mmap_event);
2043}
2044
2045void perf_counter_munmap(unsigned long addr, unsigned long len,
2046 unsigned long pgoff, struct file *file)
2047{
2048 struct perf_mmap_event mmap_event = {
2049 .file = file,
2050 .event = {
2051 .header = { .type = PERF_EVENT_MUNMAP, },
2052 .pid = current->group_leader->pid,
2053 .tid = current->pid,
2054 .start = addr,
2055 .len = len,
2056 .pgoff = pgoff,
2057 },
2058 };
2059
2060 perf_counter_mmap_event(&mmap_event);
2061}
2062
2063/*
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002064 * Generic counter overflow handling.
2065 */
2066
2067int perf_counter_overflow(struct perf_counter *counter,
2068 int nmi, struct pt_regs *regs)
2069{
Peter Zijlstra79f14642009-04-06 11:45:07 +02002070 int events = atomic_read(&counter->event_limit);
2071 int ret = 0;
2072
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002073 counter->pending_kill = POLL_IN;
Peter Zijlstra79f14642009-04-06 11:45:07 +02002074 if (events && atomic_dec_and_test(&counter->event_limit)) {
2075 ret = 1;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002076 counter->pending_kill = POLL_HUP;
Peter Zijlstra79f14642009-04-06 11:45:07 +02002077 if (nmi) {
2078 counter->pending_disable = 1;
2079 perf_pending_queue(&counter->pending,
2080 perf_pending_counter);
2081 } else
2082 perf_counter_disable(counter);
2083 }
2084
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002085 perf_counter_output(counter, nmi, regs);
Peter Zijlstra79f14642009-04-06 11:45:07 +02002086 return ret;
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002087}
2088
2089/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002090 * Generic software counter infrastructure
2091 */
2092
2093static void perf_swcounter_update(struct perf_counter *counter)
2094{
2095 struct hw_perf_counter *hwc = &counter->hw;
2096 u64 prev, now;
2097 s64 delta;
2098
2099again:
2100 prev = atomic64_read(&hwc->prev_count);
2101 now = atomic64_read(&hwc->count);
2102 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2103 goto again;
2104
2105 delta = now - prev;
2106
2107 atomic64_add(delta, &counter->count);
2108 atomic64_sub(delta, &hwc->period_left);
2109}
2110
2111static void perf_swcounter_set_period(struct perf_counter *counter)
2112{
2113 struct hw_perf_counter *hwc = &counter->hw;
2114 s64 left = atomic64_read(&hwc->period_left);
2115 s64 period = hwc->irq_period;
2116
2117 if (unlikely(left <= -period)) {
2118 left = period;
2119 atomic64_set(&hwc->period_left, left);
2120 }
2121
2122 if (unlikely(left <= 0)) {
2123 left += period;
2124 atomic64_add(period, &hwc->period_left);
2125 }
2126
2127 atomic64_set(&hwc->prev_count, -left);
2128 atomic64_set(&hwc->count, -left);
2129}
2130
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002131static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2132{
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002133 enum hrtimer_restart ret = HRTIMER_RESTART;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002134 struct perf_counter *counter;
2135 struct pt_regs *regs;
2136
2137 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
2138 counter->hw_ops->read(counter);
2139
2140 regs = get_irq_regs();
2141 /*
2142 * In case we exclude kernel IPs or are somehow not in interrupt
2143 * context, provide the next best thing, the user IP.
2144 */
2145 if ((counter->hw_event.exclude_kernel || !regs) &&
2146 !counter->hw_event.exclude_user)
2147 regs = task_pt_regs(current);
2148
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002149 if (regs) {
2150 if (perf_counter_overflow(counter, 0, regs))
2151 ret = HRTIMER_NORESTART;
2152 }
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002153
2154 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
2155
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002156 return ret;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002157}
2158
2159static void perf_swcounter_overflow(struct perf_counter *counter,
2160 int nmi, struct pt_regs *regs)
2161{
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002162 perf_swcounter_update(counter);
2163 perf_swcounter_set_period(counter);
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002164 if (perf_counter_overflow(counter, nmi, regs))
2165 /* soft-disable the counter */
2166 ;
2167
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002168}
2169
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002170static int perf_swcounter_match(struct perf_counter *counter,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002171 enum perf_event_types type,
2172 u32 event, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002173{
2174 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2175 return 0;
2176
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002177 if (perf_event_raw(&counter->hw_event))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002178 return 0;
2179
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002180 if (perf_event_type(&counter->hw_event) != type)
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002181 return 0;
2182
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002183 if (perf_event_id(&counter->hw_event) != event)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002184 return 0;
2185
2186 if (counter->hw_event.exclude_user && user_mode(regs))
2187 return 0;
2188
2189 if (counter->hw_event.exclude_kernel && !user_mode(regs))
2190 return 0;
2191
2192 return 1;
2193}
2194
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002195static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2196 int nmi, struct pt_regs *regs)
2197{
2198 int neg = atomic64_add_negative(nr, &counter->hw.count);
2199 if (counter->hw.irq_period && !neg)
2200 perf_swcounter_overflow(counter, nmi, regs);
2201}
2202
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002203static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002204 enum perf_event_types type, u32 event,
2205 u64 nr, int nmi, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002206{
2207 struct perf_counter *counter;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002208
Peter Zijlstra01ef09d2009-03-19 20:26:11 +01002209 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002210 return;
2211
Peter Zijlstra592903c2009-03-13 12:21:36 +01002212 rcu_read_lock();
2213 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002214 if (perf_swcounter_match(counter, type, event, regs))
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002215 perf_swcounter_add(counter, nr, nmi, regs);
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002216 }
Peter Zijlstra592903c2009-03-13 12:21:36 +01002217 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002218}
2219
Peter Zijlstra96f6d442009-03-23 18:22:07 +01002220static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
2221{
2222 if (in_nmi())
2223 return &cpuctx->recursion[3];
2224
2225 if (in_irq())
2226 return &cpuctx->recursion[2];
2227
2228 if (in_softirq())
2229 return &cpuctx->recursion[1];
2230
2231 return &cpuctx->recursion[0];
2232}
2233
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002234static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2235 u64 nr, int nmi, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002236{
2237 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
Peter Zijlstra96f6d442009-03-23 18:22:07 +01002238 int *recursion = perf_swcounter_recursion_context(cpuctx);
2239
2240 if (*recursion)
2241 goto out;
2242
2243 (*recursion)++;
2244 barrier();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002245
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002246 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
2247 if (cpuctx->task_ctx) {
2248 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
2249 nr, nmi, regs);
2250 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002251
Peter Zijlstra96f6d442009-03-23 18:22:07 +01002252 barrier();
2253 (*recursion)--;
2254
2255out:
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002256 put_cpu_var(perf_cpu_context);
2257}
2258
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002259void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs)
2260{
2261 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs);
2262}
2263
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002264static void perf_swcounter_read(struct perf_counter *counter)
2265{
2266 perf_swcounter_update(counter);
2267}
2268
2269static int perf_swcounter_enable(struct perf_counter *counter)
2270{
2271 perf_swcounter_set_period(counter);
2272 return 0;
2273}
2274
2275static void perf_swcounter_disable(struct perf_counter *counter)
2276{
2277 perf_swcounter_update(counter);
2278}
2279
Peter Zijlstraac17dc82009-03-13 12:21:34 +01002280static const struct hw_perf_counter_ops perf_ops_generic = {
2281 .enable = perf_swcounter_enable,
2282 .disable = perf_swcounter_disable,
2283 .read = perf_swcounter_read,
2284};
2285
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002286/*
2287 * Software counter: cpu wall time clock
2288 */
2289
Paul Mackerras9abf8a02009-01-09 16:26:43 +11002290static void cpu_clock_perf_counter_update(struct perf_counter *counter)
2291{
2292 int cpu = raw_smp_processor_id();
2293 s64 prev;
2294 u64 now;
2295
2296 now = cpu_clock(cpu);
2297 prev = atomic64_read(&counter->hw.prev_count);
2298 atomic64_set(&counter->hw.prev_count, now);
2299 atomic64_add(now - prev, &counter->count);
2300}
2301
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002302static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
2303{
2304 struct hw_perf_counter *hwc = &counter->hw;
2305 int cpu = raw_smp_processor_id();
2306
2307 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
Peter Zijlstra039fc912009-03-13 16:43:47 +01002308 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2309 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002310 if (hwc->irq_period) {
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002311 __hrtimer_start_range_ns(&hwc->hrtimer,
2312 ns_to_ktime(hwc->irq_period), 0,
2313 HRTIMER_MODE_REL, 0);
2314 }
2315
2316 return 0;
2317}
2318
Ingo Molnar5c92d122008-12-11 13:21:10 +01002319static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
2320{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002321 hrtimer_cancel(&counter->hw.hrtimer);
Paul Mackerras9abf8a02009-01-09 16:26:43 +11002322 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01002323}
2324
2325static void cpu_clock_perf_counter_read(struct perf_counter *counter)
2326{
Paul Mackerras9abf8a02009-01-09 16:26:43 +11002327 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01002328}
2329
2330static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01002331 .enable = cpu_clock_perf_counter_enable,
2332 .disable = cpu_clock_perf_counter_disable,
2333 .read = cpu_clock_perf_counter_read,
Ingo Molnar5c92d122008-12-11 13:21:10 +01002334};
2335
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01002336/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002337 * Software counter: task time clock
2338 */
2339
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02002340static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
Ingo Molnarbae43c92008-12-11 14:03:20 +01002341{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02002342 u64 prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002343 s64 delta;
Ingo Molnarbae43c92008-12-11 14:03:20 +01002344
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02002345 prev = atomic64_xchg(&counter->hw.prev_count, now);
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002346 delta = now - prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002347 atomic64_add(delta, &counter->count);
Ingo Molnarbae43c92008-12-11 14:03:20 +01002348}
2349
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01002350static int task_clock_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002351{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002352 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02002353 u64 now;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002354
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02002355 now = counter->ctx->time;
2356
2357 atomic64_set(&hwc->prev_count, now);
Peter Zijlstra039fc912009-03-13 16:43:47 +01002358 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2359 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002360 if (hwc->irq_period) {
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002361 __hrtimer_start_range_ns(&hwc->hrtimer,
2362 ns_to_ktime(hwc->irq_period), 0,
2363 HRTIMER_MODE_REL, 0);
2364 }
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01002365
2366 return 0;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002367}
2368
2369static void task_clock_perf_counter_disable(struct perf_counter *counter)
2370{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002371 hrtimer_cancel(&counter->hw.hrtimer);
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02002372 task_clock_perf_counter_update(counter, counter->ctx->time);
2373
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002374}
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01002375
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002376static void task_clock_perf_counter_read(struct perf_counter *counter)
2377{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02002378 u64 time;
2379
2380 if (!in_nmi()) {
2381 update_context_time(counter->ctx);
2382 time = counter->ctx->time;
2383 } else {
2384 u64 now = perf_clock();
2385 u64 delta = now - counter->ctx->timestamp;
2386 time = counter->ctx->time + delta;
2387 }
2388
2389 task_clock_perf_counter_update(counter, time);
Ingo Molnarbae43c92008-12-11 14:03:20 +01002390}
2391
2392static const struct hw_perf_counter_ops perf_ops_task_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01002393 .enable = task_clock_perf_counter_enable,
2394 .disable = task_clock_perf_counter_disable,
2395 .read = task_clock_perf_counter_read,
Ingo Molnarbae43c92008-12-11 14:03:20 +01002396};
2397
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002398/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002399 * Software counter: cpu migrations
2400 */
2401
Paul Mackerras23a185c2009-02-09 22:42:47 +11002402static inline u64 get_cpu_migrations(struct perf_counter *counter)
Ingo Molnar6c594c22008-12-14 12:34:15 +01002403{
Paul Mackerras23a185c2009-02-09 22:42:47 +11002404 struct task_struct *curr = counter->ctx->task;
2405
2406 if (curr)
2407 return curr->se.nr_migrations;
2408 return cpu_nr_migrations(smp_processor_id());
Ingo Molnar6c594c22008-12-14 12:34:15 +01002409}
2410
2411static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
2412{
2413 u64 prev, now;
2414 s64 delta;
2415
2416 prev = atomic64_read(&counter->hw.prev_count);
Paul Mackerras23a185c2009-02-09 22:42:47 +11002417 now = get_cpu_migrations(counter);
Ingo Molnar6c594c22008-12-14 12:34:15 +01002418
2419 atomic64_set(&counter->hw.prev_count, now);
2420
2421 delta = now - prev;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002422
2423 atomic64_add(delta, &counter->count);
2424}
2425
2426static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
2427{
2428 cpu_migrations_perf_counter_update(counter);
2429}
2430
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01002431static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar6c594c22008-12-14 12:34:15 +01002432{
Paul Mackerrasc07c99b2009-02-13 22:10:34 +11002433 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
2434 atomic64_set(&counter->hw.prev_count,
2435 get_cpu_migrations(counter));
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01002436 return 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002437}
2438
2439static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
2440{
2441 cpu_migrations_perf_counter_update(counter);
2442}
2443
2444static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
Ingo Molnar76715812008-12-17 14:20:28 +01002445 .enable = cpu_migrations_perf_counter_enable,
2446 .disable = cpu_migrations_perf_counter_disable,
2447 .read = cpu_migrations_perf_counter_read,
Ingo Molnar6c594c22008-12-14 12:34:15 +01002448};
2449
Peter Zijlstrae077df42009-03-19 20:26:17 +01002450#ifdef CONFIG_EVENT_PROFILE
2451void perf_tpcounter_event(int event_id)
2452{
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002453 struct pt_regs *regs = get_irq_regs();
2454
2455 if (!regs)
2456 regs = task_pt_regs(current);
2457
2458 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs);
Peter Zijlstrae077df42009-03-19 20:26:17 +01002459}
2460
2461extern int ftrace_profile_enable(int);
2462extern void ftrace_profile_disable(int);
2463
2464static void tp_perf_counter_destroy(struct perf_counter *counter)
2465{
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002466 ftrace_profile_disable(perf_event_id(&counter->hw_event));
Peter Zijlstrae077df42009-03-19 20:26:17 +01002467}
2468
2469static const struct hw_perf_counter_ops *
2470tp_perf_counter_init(struct perf_counter *counter)
2471{
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002472 int event_id = perf_event_id(&counter->hw_event);
Peter Zijlstrae077df42009-03-19 20:26:17 +01002473 int ret;
2474
2475 ret = ftrace_profile_enable(event_id);
2476 if (ret)
2477 return NULL;
2478
2479 counter->destroy = tp_perf_counter_destroy;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002480 counter->hw.irq_period = counter->hw_event.irq_period;
Peter Zijlstrae077df42009-03-19 20:26:17 +01002481
2482 return &perf_ops_generic;
2483}
2484#else
2485static const struct hw_perf_counter_ops *
2486tp_perf_counter_init(struct perf_counter *counter)
2487{
2488 return NULL;
2489}
2490#endif
2491
Ingo Molnar5c92d122008-12-11 13:21:10 +01002492static const struct hw_perf_counter_ops *
2493sw_perf_counter_init(struct perf_counter *counter)
2494{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002495 struct perf_counter_hw_event *hw_event = &counter->hw_event;
Ingo Molnar5c92d122008-12-11 13:21:10 +01002496 const struct hw_perf_counter_ops *hw_ops = NULL;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002497 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar5c92d122008-12-11 13:21:10 +01002498
Paul Mackerras0475f9e2009-02-11 14:35:35 +11002499 /*
2500 * Software counters (currently) can't in general distinguish
2501 * between user, kernel and hypervisor events.
2502 * However, context switches and cpu migrations are considered
2503 * to be kernel events, and page faults are never hypervisor
2504 * events.
2505 */
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002506 switch (perf_event_id(&counter->hw_event)) {
Ingo Molnar5c92d122008-12-11 13:21:10 +01002507 case PERF_COUNT_CPU_CLOCK:
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002508 hw_ops = &perf_ops_cpu_clock;
2509
2510 if (hw_event->irq_period && hw_event->irq_period < 10000)
2511 hw_event->irq_period = 10000;
Ingo Molnar5c92d122008-12-11 13:21:10 +01002512 break;
Ingo Molnarbae43c92008-12-11 14:03:20 +01002513 case PERF_COUNT_TASK_CLOCK:
Paul Mackerras23a185c2009-02-09 22:42:47 +11002514 /*
2515 * If the user instantiates this as a per-cpu counter,
2516 * use the cpu_clock counter instead.
2517 */
2518 if (counter->ctx->task)
2519 hw_ops = &perf_ops_task_clock;
2520 else
2521 hw_ops = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002522
2523 if (hw_event->irq_period && hw_event->irq_period < 10000)
2524 hw_event->irq_period = 10000;
Ingo Molnarbae43c92008-12-11 14:03:20 +01002525 break;
Ingo Molnare06c61a2008-12-14 14:44:31 +01002526 case PERF_COUNT_PAGE_FAULTS:
Peter Zijlstraac17dc82009-03-13 12:21:34 +01002527 case PERF_COUNT_PAGE_FAULTS_MIN:
2528 case PERF_COUNT_PAGE_FAULTS_MAJ:
Ingo Molnar5d6a27d2008-12-14 12:28:33 +01002529 case PERF_COUNT_CONTEXT_SWITCHES:
Peter Zijlstra4a0deca2009-03-19 20:26:12 +01002530 hw_ops = &perf_ops_generic;
Ingo Molnar5d6a27d2008-12-14 12:28:33 +01002531 break;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002532 case PERF_COUNT_CPU_MIGRATIONS:
Paul Mackerras0475f9e2009-02-11 14:35:35 +11002533 if (!counter->hw_event.exclude_kernel)
2534 hw_ops = &perf_ops_cpu_migrations;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002535 break;
Ingo Molnar5c92d122008-12-11 13:21:10 +01002536 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002537
2538 if (hw_ops)
2539 hwc->irq_period = hw_event->irq_period;
2540
Ingo Molnar5c92d122008-12-11 13:21:10 +01002541 return hw_ops;
2542}
2543
Thomas Gleixner0793a612008-12-04 20:12:29 +01002544/*
2545 * Allocate and initialize a counter structure
2546 */
2547static struct perf_counter *
Ingo Molnar04289bb2008-12-11 08:38:42 +01002548perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2549 int cpu,
Paul Mackerras23a185c2009-02-09 22:42:47 +11002550 struct perf_counter_context *ctx,
Ingo Molnar9b51f662008-12-12 13:49:45 +01002551 struct perf_counter *group_leader,
2552 gfp_t gfpflags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002553{
Ingo Molnar5c92d122008-12-11 13:21:10 +01002554 const struct hw_perf_counter_ops *hw_ops;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002555 struct perf_counter *counter;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002556 long err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002557
Ingo Molnar9b51f662008-12-12 13:49:45 +01002558 counter = kzalloc(sizeof(*counter), gfpflags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002559 if (!counter)
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002560 return ERR_PTR(-ENOMEM);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002561
Ingo Molnar04289bb2008-12-11 08:38:42 +01002562 /*
2563 * Single counters are their own group leaders, with an
2564 * empty sibling list:
2565 */
2566 if (!group_leader)
2567 group_leader = counter;
2568
Thomas Gleixner0793a612008-12-04 20:12:29 +01002569 mutex_init(&counter->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002570 INIT_LIST_HEAD(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +01002571 INIT_LIST_HEAD(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002572 INIT_LIST_HEAD(&counter->sibling_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002573 init_waitqueue_head(&counter->waitq);
2574
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002575 mutex_init(&counter->mmap_mutex);
2576
Paul Mackerrasd859e292009-01-17 18:10:22 +11002577 INIT_LIST_HEAD(&counter->child_list);
2578
Ingo Molnar9f66a382008-12-10 12:33:23 +01002579 counter->cpu = cpu;
2580 counter->hw_event = *hw_event;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002581 counter->group_leader = group_leader;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002582 counter->hw_ops = NULL;
Paul Mackerras23a185c2009-02-09 22:42:47 +11002583 counter->ctx = ctx;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002584
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002585 counter->state = PERF_COUNTER_STATE_INACTIVE;
Ingo Molnara86ed502008-12-17 00:43:10 +01002586 if (hw_event->disabled)
2587 counter->state = PERF_COUNTER_STATE_OFF;
2588
Ingo Molnar5c92d122008-12-11 13:21:10 +01002589 hw_ops = NULL;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002590
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002591 if (perf_event_raw(hw_event)) {
Ingo Molnar5c92d122008-12-11 13:21:10 +01002592 hw_ops = hw_perf_counter_init(counter);
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002593 goto done;
2594 }
2595
2596 switch (perf_event_type(hw_event)) {
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002597 case PERF_TYPE_HARDWARE:
2598 hw_ops = hw_perf_counter_init(counter);
2599 break;
2600
2601 case PERF_TYPE_SOFTWARE:
2602 hw_ops = sw_perf_counter_init(counter);
2603 break;
2604
2605 case PERF_TYPE_TRACEPOINT:
2606 hw_ops = tp_perf_counter_init(counter);
2607 break;
2608 }
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002609done:
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002610 err = 0;
2611 if (!hw_ops)
2612 err = -EINVAL;
2613 else if (IS_ERR(hw_ops))
2614 err = PTR_ERR(hw_ops);
2615
2616 if (err) {
2617 kfree(counter);
2618 return ERR_PTR(err);
2619 }
2620
Ingo Molnar621a01e2008-12-11 12:46:46 +01002621 counter->hw_ops = hw_ops;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002622
2623 return counter;
2624}
2625
2626/**
Paul Mackerras2743a5b2009-03-04 20:36:51 +11002627 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
Ingo Molnar9f66a382008-12-10 12:33:23 +01002628 *
2629 * @hw_event_uptr: event type attributes for monitoring/sampling
Thomas Gleixner0793a612008-12-04 20:12:29 +01002630 * @pid: target pid
Ingo Molnar9f66a382008-12-10 12:33:23 +01002631 * @cpu: target cpu
2632 * @group_fd: group leader counter fd
Thomas Gleixner0793a612008-12-04 20:12:29 +01002633 */
Paul Mackerras2743a5b2009-03-04 20:36:51 +11002634SYSCALL_DEFINE5(perf_counter_open,
Paul Mackerrasf3dfd262009-02-26 22:43:46 +11002635 const struct perf_counter_hw_event __user *, hw_event_uptr,
Paul Mackerras2743a5b2009-03-04 20:36:51 +11002636 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002637{
Ingo Molnar04289bb2008-12-11 08:38:42 +01002638 struct perf_counter *counter, *group_leader;
Ingo Molnar9f66a382008-12-10 12:33:23 +01002639 struct perf_counter_hw_event hw_event;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002640 struct perf_counter_context *ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002641 struct file *counter_file = NULL;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002642 struct file *group_file = NULL;
2643 int fput_needed = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002644 int fput_needed2 = 0;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002645 int ret;
2646
Paul Mackerras2743a5b2009-03-04 20:36:51 +11002647 /* for future expandability... */
2648 if (flags)
2649 return -EINVAL;
2650
Ingo Molnar9f66a382008-12-10 12:33:23 +01002651 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
Thomas Gleixnereab656a2008-12-08 19:26:59 +01002652 return -EFAULT;
2653
Ingo Molnar04289bb2008-12-11 08:38:42 +01002654 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01002655 * Get the target context (task or percpu):
2656 */
2657 ctx = find_get_context(pid, cpu);
2658 if (IS_ERR(ctx))
2659 return PTR_ERR(ctx);
2660
2661 /*
2662 * Look up the group leader (we will attach this counter to it):
Ingo Molnar04289bb2008-12-11 08:38:42 +01002663 */
2664 group_leader = NULL;
2665 if (group_fd != -1) {
2666 ret = -EINVAL;
2667 group_file = fget_light(group_fd, &fput_needed);
2668 if (!group_file)
Ingo Molnarccff2862008-12-11 11:26:29 +01002669 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002670 if (group_file->f_op != &perf_fops)
Ingo Molnarccff2862008-12-11 11:26:29 +01002671 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002672
2673 group_leader = group_file->private_data;
2674 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01002675 * Do not allow a recursive hierarchy (this new sibling
2676 * becoming part of another group-sibling):
Ingo Molnar04289bb2008-12-11 08:38:42 +01002677 */
Ingo Molnarccff2862008-12-11 11:26:29 +01002678 if (group_leader->group_leader != group_leader)
2679 goto err_put_context;
2680 /*
2681 * Do not allow to attach to a group in a different
2682 * task or CPU context:
2683 */
2684 if (group_leader->ctx != ctx)
2685 goto err_put_context;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11002686 /*
2687 * Only a group leader can be exclusive or pinned
2688 */
2689 if (hw_event.exclusive || hw_event.pinned)
2690 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002691 }
2692
Paul Mackerras23a185c2009-02-09 22:42:47 +11002693 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2694 GFP_KERNEL);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002695 ret = PTR_ERR(counter);
2696 if (IS_ERR(counter))
Thomas Gleixner0793a612008-12-04 20:12:29 +01002697 goto err_put_context;
2698
Thomas Gleixner0793a612008-12-04 20:12:29 +01002699 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
2700 if (ret < 0)
Ingo Molnar9b51f662008-12-12 13:49:45 +01002701 goto err_free_put_context;
2702
2703 counter_file = fget_light(ret, &fput_needed2);
2704 if (!counter_file)
2705 goto err_free_put_context;
2706
2707 counter->filp = counter_file;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002708 mutex_lock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002709 perf_install_in_context(ctx, counter, cpu);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002710 mutex_unlock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002711
2712 fput_light(counter_file, fput_needed2);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002713
Ingo Molnar04289bb2008-12-11 08:38:42 +01002714out_fput:
2715 fput_light(group_file, fput_needed);
2716
Thomas Gleixner0793a612008-12-04 20:12:29 +01002717 return ret;
2718
Ingo Molnar9b51f662008-12-12 13:49:45 +01002719err_free_put_context:
Thomas Gleixner0793a612008-12-04 20:12:29 +01002720 kfree(counter);
2721
2722err_put_context:
2723 put_context(ctx);
2724
Ingo Molnar04289bb2008-12-11 08:38:42 +01002725 goto out_fput;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002726}
2727
Ingo Molnar9b51f662008-12-12 13:49:45 +01002728/*
2729 * Initialize the perf_counter context in a task_struct:
2730 */
2731static void
2732__perf_counter_init_context(struct perf_counter_context *ctx,
2733 struct task_struct *task)
2734{
2735 memset(ctx, 0, sizeof(*ctx));
2736 spin_lock_init(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002737 mutex_init(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002738 INIT_LIST_HEAD(&ctx->counter_list);
Peter Zijlstra592903c2009-03-13 12:21:36 +01002739 INIT_LIST_HEAD(&ctx->event_list);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002740 ctx->task = task;
2741}
2742
2743/*
2744 * inherit a counter from parent task to child task:
2745 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11002746static struct perf_counter *
Ingo Molnar9b51f662008-12-12 13:49:45 +01002747inherit_counter(struct perf_counter *parent_counter,
2748 struct task_struct *parent,
2749 struct perf_counter_context *parent_ctx,
2750 struct task_struct *child,
Paul Mackerrasd859e292009-01-17 18:10:22 +11002751 struct perf_counter *group_leader,
Ingo Molnar9b51f662008-12-12 13:49:45 +01002752 struct perf_counter_context *child_ctx)
2753{
2754 struct perf_counter *child_counter;
2755
Paul Mackerrasd859e292009-01-17 18:10:22 +11002756 /*
2757 * Instead of creating recursive hierarchies of counters,
2758 * we link inherited counters back to the original parent,
2759 * which has a filp for sure, which we use as the reference
2760 * count:
2761 */
2762 if (parent_counter->parent)
2763 parent_counter = parent_counter->parent;
2764
Ingo Molnar9b51f662008-12-12 13:49:45 +01002765 child_counter = perf_counter_alloc(&parent_counter->hw_event,
Paul Mackerras23a185c2009-02-09 22:42:47 +11002766 parent_counter->cpu, child_ctx,
2767 group_leader, GFP_KERNEL);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002768 if (IS_ERR(child_counter))
2769 return child_counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002770
2771 /*
2772 * Link it up in the child's context:
2773 */
Ingo Molnar9b51f662008-12-12 13:49:45 +01002774 child_counter->task = child;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11002775 add_counter_to_ctx(child_counter, child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002776
2777 child_counter->parent = parent_counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002778 /*
2779 * inherit into child's child as well:
2780 */
2781 child_counter->hw_event.inherit = 1;
2782
2783 /*
2784 * Get a reference to the parent filp - we will fput it
2785 * when the child counter exits. This is safe to do because
2786 * we are in the parent and we know that the filp still
2787 * exists and has a nonzero count:
2788 */
2789 atomic_long_inc(&parent_counter->filp->f_count);
2790
Paul Mackerrasd859e292009-01-17 18:10:22 +11002791 /*
2792 * Link this into the parent counter's child list
2793 */
2794 mutex_lock(&parent_counter->mutex);
2795 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
2796
2797 /*
2798 * Make the child state follow the state of the parent counter,
2799 * not its hw_event.disabled bit. We hold the parent's mutex,
2800 * so we won't race with perf_counter_{en,dis}able_family.
2801 */
2802 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
2803 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
2804 else
2805 child_counter->state = PERF_COUNTER_STATE_OFF;
2806
2807 mutex_unlock(&parent_counter->mutex);
2808
2809 return child_counter;
2810}
2811
2812static int inherit_group(struct perf_counter *parent_counter,
2813 struct task_struct *parent,
2814 struct perf_counter_context *parent_ctx,
2815 struct task_struct *child,
2816 struct perf_counter_context *child_ctx)
2817{
2818 struct perf_counter *leader;
2819 struct perf_counter *sub;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002820 struct perf_counter *child_ctr;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002821
2822 leader = inherit_counter(parent_counter, parent, parent_ctx,
2823 child, NULL, child_ctx);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002824 if (IS_ERR(leader))
2825 return PTR_ERR(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002826 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002827 child_ctr = inherit_counter(sub, parent, parent_ctx,
2828 child, leader, child_ctx);
2829 if (IS_ERR(child_ctr))
2830 return PTR_ERR(child_ctr);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002831 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01002832 return 0;
2833}
2834
Paul Mackerrasd859e292009-01-17 18:10:22 +11002835static void sync_child_counter(struct perf_counter *child_counter,
2836 struct perf_counter *parent_counter)
2837{
2838 u64 parent_val, child_val;
2839
2840 parent_val = atomic64_read(&parent_counter->count);
2841 child_val = atomic64_read(&child_counter->count);
2842
2843 /*
2844 * Add back the child's count to the parent's count:
2845 */
2846 atomic64_add(child_val, &parent_counter->count);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11002847 atomic64_add(child_counter->total_time_enabled,
2848 &parent_counter->child_total_time_enabled);
2849 atomic64_add(child_counter->total_time_running,
2850 &parent_counter->child_total_time_running);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002851
2852 /*
2853 * Remove this counter from the parent's list
2854 */
2855 mutex_lock(&parent_counter->mutex);
2856 list_del_init(&child_counter->child_list);
2857 mutex_unlock(&parent_counter->mutex);
2858
2859 /*
2860 * Release the parent counter, if this was the last
2861 * reference to it.
2862 */
2863 fput(parent_counter->filp);
2864}
2865
Ingo Molnar9b51f662008-12-12 13:49:45 +01002866static void
2867__perf_counter_exit_task(struct task_struct *child,
2868 struct perf_counter *child_counter,
2869 struct perf_counter_context *child_ctx)
2870{
2871 struct perf_counter *parent_counter;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002872 struct perf_counter *sub, *tmp;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002873
2874 /*
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002875 * If we do not self-reap then we have to wait for the
2876 * child task to unschedule (it will happen for sure),
2877 * so that its counter is at its final count. (This
2878 * condition triggers rarely - child tasks usually get
2879 * off their CPU before the parent has a chance to
2880 * get this far into the reaping action)
Ingo Molnar9b51f662008-12-12 13:49:45 +01002881 */
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002882 if (child != current) {
2883 wait_task_inactive(child, 0);
2884 list_del_init(&child_counter->list_entry);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11002885 update_counter_times(child_counter);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002886 } else {
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002887 struct perf_cpu_context *cpuctx;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002888 unsigned long flags;
2889 u64 perf_flags;
2890
2891 /*
2892 * Disable and unlink this counter.
2893 *
2894 * Be careful about zapping the list - IRQ/NMI context
2895 * could still be processing it:
2896 */
Peter Zijlstra849691a2009-04-06 11:45:12 +02002897 local_irq_save(flags);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002898 perf_flags = hw_perf_save_disable();
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002899
2900 cpuctx = &__get_cpu_var(perf_cpu_context);
2901
Paul Mackerrasd859e292009-01-17 18:10:22 +11002902 group_sched_out(child_counter, cpuctx, child_ctx);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11002903 update_counter_times(child_counter);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002904
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002905 list_del_init(&child_counter->list_entry);
2906
2907 child_ctx->nr_counters--;
2908
2909 hw_perf_restore(perf_flags);
Peter Zijlstra849691a2009-04-06 11:45:12 +02002910 local_irq_restore(flags);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01002911 }
2912
Ingo Molnar9b51f662008-12-12 13:49:45 +01002913 parent_counter = child_counter->parent;
2914 /*
2915 * It can happen that parent exits first, and has counters
2916 * that are still around due to the child reference. These
2917 * counters need to be zapped - but otherwise linger.
2918 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11002919 if (parent_counter) {
2920 sync_child_counter(child_counter, parent_counter);
2921 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
2922 list_entry) {
Paul Mackerras4bcf3492009-02-11 13:53:19 +01002923 if (sub->parent) {
Paul Mackerrasd859e292009-01-17 18:10:22 +11002924 sync_child_counter(sub, sub->parent);
Peter Zijlstraf1600952009-03-19 20:26:16 +01002925 free_counter(sub);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01002926 }
Paul Mackerrasd859e292009-01-17 18:10:22 +11002927 }
Peter Zijlstraf1600952009-03-19 20:26:16 +01002928 free_counter(child_counter);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01002929 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01002930}
2931
2932/*
Paul Mackerrasd859e292009-01-17 18:10:22 +11002933 * When a child task exits, feed back counter values to parent counters.
Ingo Molnar9b51f662008-12-12 13:49:45 +01002934 *
Paul Mackerrasd859e292009-01-17 18:10:22 +11002935 * Note: we may be running in child context, but the PID is not hashed
Ingo Molnar9b51f662008-12-12 13:49:45 +01002936 * anymore so new counters will not be added.
2937 */
2938void perf_counter_exit_task(struct task_struct *child)
2939{
2940 struct perf_counter *child_counter, *tmp;
2941 struct perf_counter_context *child_ctx;
2942
2943 child_ctx = &child->perf_counter_ctx;
2944
2945 if (likely(!child_ctx->nr_counters))
2946 return;
2947
2948 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
2949 list_entry)
2950 __perf_counter_exit_task(child, child_counter, child_ctx);
2951}
2952
2953/*
2954 * Initialize the perf_counter context in task_struct
2955 */
2956void perf_counter_init_task(struct task_struct *child)
2957{
2958 struct perf_counter_context *child_ctx, *parent_ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002959 struct perf_counter *counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002960 struct task_struct *parent = current;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002961
2962 child_ctx = &child->perf_counter_ctx;
2963 parent_ctx = &parent->perf_counter_ctx;
2964
2965 __perf_counter_init_context(child_ctx, child);
2966
2967 /*
2968 * This is executed from the parent task context, so inherit
2969 * counters that have been marked for cloning:
2970 */
2971
2972 if (likely(!parent_ctx->nr_counters))
2973 return;
2974
2975 /*
2976 * Lock the parent list. No need to lock the child - not PID
2977 * hashed yet and not running, so nobody can access it.
2978 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11002979 mutex_lock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002980
2981 /*
2982 * We dont have to disable NMIs - we are only looking at
2983 * the list, not manipulating it:
2984 */
2985 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
Paul Mackerrasd859e292009-01-17 18:10:22 +11002986 if (!counter->hw_event.inherit)
Ingo Molnar9b51f662008-12-12 13:49:45 +01002987 continue;
2988
Paul Mackerrasd859e292009-01-17 18:10:22 +11002989 if (inherit_group(counter, parent,
Ingo Molnar9b51f662008-12-12 13:49:45 +01002990 parent_ctx, child, child_ctx))
2991 break;
2992 }
2993
Paul Mackerrasd859e292009-01-17 18:10:22 +11002994 mutex_unlock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002995}
2996
Ingo Molnar04289bb2008-12-11 08:38:42 +01002997static void __cpuinit perf_counter_init_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002998{
Ingo Molnar04289bb2008-12-11 08:38:42 +01002999 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01003000
Ingo Molnar04289bb2008-12-11 08:38:42 +01003001 cpuctx = &per_cpu(perf_cpu_context, cpu);
3002 __perf_counter_init_context(&cpuctx->ctx, NULL);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003003
3004 mutex_lock(&perf_resource_mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01003005 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
Thomas Gleixner0793a612008-12-04 20:12:29 +01003006 mutex_unlock(&perf_resource_mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01003007
Paul Mackerras01d02872009-01-14 13:44:19 +11003008 hw_perf_counter_setup(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003009}
3010
3011#ifdef CONFIG_HOTPLUG_CPU
Ingo Molnar04289bb2008-12-11 08:38:42 +01003012static void __perf_counter_exit_cpu(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01003013{
3014 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3015 struct perf_counter_context *ctx = &cpuctx->ctx;
3016 struct perf_counter *counter, *tmp;
3017
Ingo Molnar04289bb2008-12-11 08:38:42 +01003018 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
3019 __perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003020}
Ingo Molnar04289bb2008-12-11 08:38:42 +01003021static void perf_counter_exit_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01003022{
Paul Mackerrasd859e292009-01-17 18:10:22 +11003023 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3024 struct perf_counter_context *ctx = &cpuctx->ctx;
3025
3026 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01003027 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
Paul Mackerrasd859e292009-01-17 18:10:22 +11003028 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003029}
3030#else
Ingo Molnar04289bb2008-12-11 08:38:42 +01003031static inline void perf_counter_exit_cpu(int cpu) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +01003032#endif
3033
3034static int __cpuinit
3035perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
3036{
3037 unsigned int cpu = (long)hcpu;
3038
3039 switch (action) {
3040
3041 case CPU_UP_PREPARE:
3042 case CPU_UP_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01003043 perf_counter_init_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003044 break;
3045
3046 case CPU_DOWN_PREPARE:
3047 case CPU_DOWN_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01003048 perf_counter_exit_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003049 break;
3050
3051 default:
3052 break;
3053 }
3054
3055 return NOTIFY_OK;
3056}
3057
3058static struct notifier_block __cpuinitdata perf_cpu_nb = {
3059 .notifier_call = perf_cpu_notify,
3060};
3061
3062static int __init perf_counter_init(void)
3063{
3064 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
3065 (void *)(long)smp_processor_id());
3066 register_cpu_notifier(&perf_cpu_nb);
3067
3068 return 0;
3069}
3070early_initcall(perf_counter_init);
3071
3072static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
3073{
3074 return sprintf(buf, "%d\n", perf_reserved_percpu);
3075}
3076
3077static ssize_t
3078perf_set_reserve_percpu(struct sysdev_class *class,
3079 const char *buf,
3080 size_t count)
3081{
3082 struct perf_cpu_context *cpuctx;
3083 unsigned long val;
3084 int err, cpu, mpt;
3085
3086 err = strict_strtoul(buf, 10, &val);
3087 if (err)
3088 return err;
3089 if (val > perf_max_counters)
3090 return -EINVAL;
3091
3092 mutex_lock(&perf_resource_mutex);
3093 perf_reserved_percpu = val;
3094 for_each_online_cpu(cpu) {
3095 cpuctx = &per_cpu(perf_cpu_context, cpu);
3096 spin_lock_irq(&cpuctx->ctx.lock);
3097 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
3098 perf_max_counters - perf_reserved_percpu);
3099 cpuctx->max_pertask = mpt;
3100 spin_unlock_irq(&cpuctx->ctx.lock);
3101 }
3102 mutex_unlock(&perf_resource_mutex);
3103
3104 return count;
3105}
3106
3107static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
3108{
3109 return sprintf(buf, "%d\n", perf_overcommit);
3110}
3111
3112static ssize_t
3113perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
3114{
3115 unsigned long val;
3116 int err;
3117
3118 err = strict_strtoul(buf, 10, &val);
3119 if (err)
3120 return err;
3121 if (val > 1)
3122 return -EINVAL;
3123
3124 mutex_lock(&perf_resource_mutex);
3125 perf_overcommit = val;
3126 mutex_unlock(&perf_resource_mutex);
3127
3128 return count;
3129}
3130
3131static SYSDEV_CLASS_ATTR(
3132 reserve_percpu,
3133 0644,
3134 perf_show_reserve_percpu,
3135 perf_set_reserve_percpu
3136 );
3137
3138static SYSDEV_CLASS_ATTR(
3139 overcommit,
3140 0644,
3141 perf_show_overcommit,
3142 perf_set_overcommit
3143 );
3144
3145static struct attribute *perfclass_attrs[] = {
3146 &attr_reserve_percpu.attr,
3147 &attr_overcommit.attr,
3148 NULL
3149};
3150
3151static struct attribute_group perfclass_attr_group = {
3152 .attrs = perfclass_attrs,
3153 .name = "perf_counters",
3154};
3155
3156static int __init perf_counter_sysfs_init(void)
3157{
3158 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
3159 &perfclass_attr_group);
3160}
3161device_initcall(perf_counter_sysfs_init);