blob: f4883f1f47ebced90c936ba3ec93f85d3a8fb87a [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counter core code
3 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Paul Mackerrasc5dd0162009-04-30 09:48:16 +10007 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
Peter Zijlstra7b732a72009-03-23 18:22:10 +01008 *
9 * For licensing details see kernel-base/COPYING
Thomas Gleixner0793a612008-12-04 20:12:29 +010010 */
11
12#include <linux/fs.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010013#include <linux/mm.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010014#include <linux/cpu.h>
15#include <linux/smp.h>
Ingo Molnar04289bb2008-12-11 08:38:42 +010016#include <linux/file.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010017#include <linux/poll.h>
18#include <linux/sysfs.h>
19#include <linux/ptrace.h>
20#include <linux/percpu.h>
Peter Zijlstrab9cacc72009-03-25 12:30:22 +010021#include <linux/vmstat.h>
22#include <linux/hardirq.h>
23#include <linux/rculist.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010024#include <linux/uaccess.h>
25#include <linux/syscalls.h>
26#include <linux/anon_inodes.h>
Ingo Molnaraa9c4c02008-12-17 14:10:57 +010027#include <linux/kernel_stat.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010028#include <linux/perf_counter.h>
Peter Zijlstra0a4a9392009-03-30 19:07:05 +020029#include <linux/dcache.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010030
Tim Blechmann4e193bd2009-03-14 14:29:25 +010031#include <asm/irq_regs.h>
32
Thomas Gleixner0793a612008-12-04 20:12:29 +010033/*
34 * Each CPU has a list of per CPU counters:
35 */
36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
Ingo Molnar088e2852008-12-14 20:21:00 +010038int perf_max_counters __read_mostly = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +010039static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1;
41
Peter Zijlstra7fc23a52009-05-08 18:52:21 +020042static atomic_t nr_counters __read_mostly;
Peter Zijlstra9ee318a2009-04-09 10:53:44 +020043static atomic_t nr_mmap_tracking __read_mostly;
44static atomic_t nr_munmap_tracking __read_mostly;
45static atomic_t nr_comm_tracking __read_mostly;
46
Peter Zijlstra1ccd1542009-04-09 10:53:45 +020047int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
Peter Zijlstrac5078f72009-05-05 17:50:24 +020048int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */
Peter Zijlstra1ccd1542009-04-09 10:53:45 +020049
Thomas Gleixner0793a612008-12-04 20:12:29 +010050/*
Ingo Molnar1dce8d92009-05-04 19:23:18 +020051 * Lock for (sysadmin-configurable) counter reservations:
Thomas Gleixner0793a612008-12-04 20:12:29 +010052 */
Ingo Molnar1dce8d92009-05-04 19:23:18 +020053static DEFINE_SPINLOCK(perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +010054
55/*
56 * Architecture provided APIs - weak aliases:
57 */
Robert Richter4aeb0b42009-04-29 12:47:03 +020058extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +010059{
Paul Mackerrasff6f0542009-01-09 16:19:25 +110060 return NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +010061}
62
Ingo Molnar01b28382008-12-11 13:45:51 +010063u64 __weak hw_perf_save_disable(void) { return 0; }
Yinghai Lu01ea1cc2008-12-26 21:05:06 -080064void __weak hw_perf_restore(u64 ctrl) { barrier(); }
Paul Mackerras01d02872009-01-14 13:44:19 +110065void __weak hw_perf_counter_setup(int cpu) { barrier(); }
Paul Mackerras3cbed422009-01-09 16:43:42 +110066int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
67 struct perf_cpu_context *cpuctx,
68 struct perf_counter_context *ctx, int cpu)
69{
70 return 0;
71}
Thomas Gleixner0793a612008-12-04 20:12:29 +010072
Paul Mackerras4eb96fc2009-01-09 17:24:34 +110073void __weak perf_counter_print_debug(void) { }
74
Ingo Molnar04289bb2008-12-11 08:38:42 +010075static void
76list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
77{
78 struct perf_counter *group_leader = counter->group_leader;
79
80 /*
81 * Depending on whether it is a standalone or sibling counter,
82 * add it straight to the context's counter list, or to the group
83 * leader's sibling list:
84 */
Peter Zijlstra3df5eda2009-05-08 18:52:22 +020085 if (group_leader == counter)
Ingo Molnar04289bb2008-12-11 08:38:42 +010086 list_add_tail(&counter->list_entry, &ctx->counter_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +010087 else {
Ingo Molnar04289bb2008-12-11 08:38:42 +010088 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
Peter Zijlstra5c148192009-03-25 12:30:23 +010089 group_leader->nr_siblings++;
90 }
Peter Zijlstra592903c2009-03-13 12:21:36 +010091
92 list_add_rcu(&counter->event_entry, &ctx->event_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +010093}
94
95static void
96list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
97{
98 struct perf_counter *sibling, *tmp;
99
100 list_del_init(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +0100101 list_del_rcu(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100102
Peter Zijlstra5c148192009-03-25 12:30:23 +0100103 if (counter->group_leader != counter)
104 counter->group_leader->nr_siblings--;
105
Ingo Molnar04289bb2008-12-11 08:38:42 +0100106 /*
107 * If this was a group counter with sibling counters then
108 * upgrade the siblings to singleton counters by adding them
109 * to the context list directly:
110 */
111 list_for_each_entry_safe(sibling, tmp,
112 &counter->sibling_list, list_entry) {
113
Peter Zijlstra75564232009-03-13 12:21:29 +0100114 list_move_tail(&sibling->list_entry, &ctx->counter_list);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100115 sibling->group_leader = sibling;
116 }
117}
118
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100119static void
120counter_sched_out(struct perf_counter *counter,
121 struct perf_cpu_context *cpuctx,
122 struct perf_counter_context *ctx)
123{
124 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
125 return;
126
127 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200128 counter->tstamp_stopped = ctx->time;
Robert Richter4aeb0b42009-04-29 12:47:03 +0200129 counter->pmu->disable(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100130 counter->oncpu = -1;
131
132 if (!is_software_counter(counter))
133 cpuctx->active_oncpu--;
134 ctx->nr_active--;
135 if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
136 cpuctx->exclusive = 0;
137}
138
Paul Mackerrasd859e292009-01-17 18:10:22 +1100139static void
140group_sched_out(struct perf_counter *group_counter,
141 struct perf_cpu_context *cpuctx,
142 struct perf_counter_context *ctx)
143{
144 struct perf_counter *counter;
145
146 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
147 return;
148
149 counter_sched_out(group_counter, cpuctx, ctx);
150
151 /*
152 * Schedule out siblings (if any):
153 */
154 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
155 counter_sched_out(counter, cpuctx, ctx);
156
157 if (group_counter->hw_event.exclusive)
158 cpuctx->exclusive = 0;
159}
160
Thomas Gleixner0793a612008-12-04 20:12:29 +0100161/*
162 * Cross CPU call to remove a performance counter
163 *
164 * We disable the counter on the hardware level first. After that we
165 * remove it from the context list.
166 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100167static void __perf_counter_remove_from_context(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100168{
169 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
170 struct perf_counter *counter = info;
171 struct perf_counter_context *ctx = counter->ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +0100172 unsigned long flags;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100173 u64 perf_flags;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100174
175 /*
176 * If this is a task context, we need to check whether it is
177 * the current task context of this cpu. If not it has been
178 * scheduled out before the smp call arrived.
179 */
180 if (ctx->task && cpuctx->task_ctx != ctx)
181 return;
182
Peter Zijlstra849691a2009-04-06 11:45:12 +0200183 spin_lock_irqsave(&ctx->lock, flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100184
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100185 counter_sched_out(counter, cpuctx, ctx);
186
187 counter->task = NULL;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100188 ctx->nr_counters--;
189
190 /*
191 * Protect the list operation against NMI by disabling the
192 * counters on a global level. NOP for non NMI based counters.
193 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100194 perf_flags = hw_perf_save_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +0100195 list_del_counter(counter, ctx);
Ingo Molnar01b28382008-12-11 13:45:51 +0100196 hw_perf_restore(perf_flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100197
198 if (!ctx->task) {
199 /*
200 * Allow more per task counters with respect to the
201 * reservation:
202 */
203 cpuctx->max_pertask =
204 min(perf_max_counters - ctx->nr_counters,
205 perf_max_counters - perf_reserved_percpu);
206 }
207
Peter Zijlstra849691a2009-04-06 11:45:12 +0200208 spin_unlock_irqrestore(&ctx->lock, flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100209}
210
211
212/*
213 * Remove the counter from a task's (or a CPU's) list of counters.
214 *
Paul Mackerrasd859e292009-01-17 18:10:22 +1100215 * Must be called with counter->mutex and ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100216 *
217 * CPU counters are removed with a smp call. For task counters we only
218 * call when the task is on a CPU.
219 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100220static void perf_counter_remove_from_context(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100221{
222 struct perf_counter_context *ctx = counter->ctx;
223 struct task_struct *task = ctx->task;
224
225 if (!task) {
226 /*
227 * Per cpu counters are removed via an smp call and
228 * the removal is always sucessful.
229 */
230 smp_call_function_single(counter->cpu,
Ingo Molnar04289bb2008-12-11 08:38:42 +0100231 __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100232 counter, 1);
233 return;
234 }
235
236retry:
Ingo Molnar04289bb2008-12-11 08:38:42 +0100237 task_oncpu_function_call(task, __perf_counter_remove_from_context,
Thomas Gleixner0793a612008-12-04 20:12:29 +0100238 counter);
239
240 spin_lock_irq(&ctx->lock);
241 /*
242 * If the context is active we need to retry the smp call.
243 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100244 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100245 spin_unlock_irq(&ctx->lock);
246 goto retry;
247 }
248
249 /*
250 * The lock prevents that this context is scheduled in so we
Ingo Molnar04289bb2008-12-11 08:38:42 +0100251 * can remove the counter safely, if the call above did not
Thomas Gleixner0793a612008-12-04 20:12:29 +0100252 * succeed.
253 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100254 if (!list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100255 ctx->nr_counters--;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100256 list_del_counter(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100257 counter->task = NULL;
258 }
259 spin_unlock_irq(&ctx->lock);
260}
261
Peter Zijlstra4af49982009-04-06 11:45:10 +0200262static inline u64 perf_clock(void)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100263{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200264 return cpu_clock(smp_processor_id());
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100265}
266
267/*
268 * Update the record of the current time in a context.
269 */
Peter Zijlstra4af49982009-04-06 11:45:10 +0200270static void update_context_time(struct perf_counter_context *ctx)
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100271{
Peter Zijlstra4af49982009-04-06 11:45:10 +0200272 u64 now = perf_clock();
273
274 ctx->time += now - ctx->timestamp;
275 ctx->timestamp = now;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100276}
277
278/*
279 * Update the total_time_enabled and total_time_running fields for a counter.
280 */
281static void update_counter_times(struct perf_counter *counter)
282{
283 struct perf_counter_context *ctx = counter->ctx;
284 u64 run_end;
285
Peter Zijlstra4af49982009-04-06 11:45:10 +0200286 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
287 return;
288
289 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
290
291 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
292 run_end = counter->tstamp_stopped;
293 else
294 run_end = ctx->time;
295
296 counter->total_time_running = run_end - counter->tstamp_running;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100297}
298
299/*
300 * Update total_time_enabled and total_time_running for all counters in a group.
301 */
302static void update_group_times(struct perf_counter *leader)
303{
304 struct perf_counter *counter;
305
306 update_counter_times(leader);
307 list_for_each_entry(counter, &leader->sibling_list, list_entry)
308 update_counter_times(counter);
309}
310
311/*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100312 * Cross CPU call to disable a performance counter
313 */
314static void __perf_counter_disable(void *info)
315{
316 struct perf_counter *counter = info;
317 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
318 struct perf_counter_context *ctx = counter->ctx;
319 unsigned long flags;
320
321 /*
322 * If this is a per-task counter, need to check whether this
323 * counter's task is the current task on this cpu.
324 */
325 if (ctx->task && cpuctx->task_ctx != ctx)
326 return;
327
Peter Zijlstra849691a2009-04-06 11:45:12 +0200328 spin_lock_irqsave(&ctx->lock, flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100329
330 /*
331 * If the counter is on, turn it off.
332 * If it is in error state, leave it in error state.
333 */
334 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
Peter Zijlstra4af49982009-04-06 11:45:10 +0200335 update_context_time(ctx);
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100336 update_counter_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100337 if (counter == counter->group_leader)
338 group_sched_out(counter, cpuctx, ctx);
339 else
340 counter_sched_out(counter, cpuctx, ctx);
341 counter->state = PERF_COUNTER_STATE_OFF;
342 }
343
Peter Zijlstra849691a2009-04-06 11:45:12 +0200344 spin_unlock_irqrestore(&ctx->lock, flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100345}
346
347/*
348 * Disable a counter.
349 */
350static void perf_counter_disable(struct perf_counter *counter)
351{
352 struct perf_counter_context *ctx = counter->ctx;
353 struct task_struct *task = ctx->task;
354
355 if (!task) {
356 /*
357 * Disable the counter on the cpu that it's on
358 */
359 smp_call_function_single(counter->cpu, __perf_counter_disable,
360 counter, 1);
361 return;
362 }
363
364 retry:
365 task_oncpu_function_call(task, __perf_counter_disable, counter);
366
367 spin_lock_irq(&ctx->lock);
368 /*
369 * If the counter is still active, we need to retry the cross-call.
370 */
371 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
372 spin_unlock_irq(&ctx->lock);
373 goto retry;
374 }
375
376 /*
377 * Since we have the lock this context can't be scheduled
378 * in, so we can change the state safely.
379 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100380 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
381 update_counter_times(counter);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100382 counter->state = PERF_COUNTER_STATE_OFF;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100383 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100384
385 spin_unlock_irq(&ctx->lock);
386}
387
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100388static int
389counter_sched_in(struct perf_counter *counter,
390 struct perf_cpu_context *cpuctx,
391 struct perf_counter_context *ctx,
392 int cpu)
393{
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100394 if (counter->state <= PERF_COUNTER_STATE_OFF)
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100395 return 0;
396
397 counter->state = PERF_COUNTER_STATE_ACTIVE;
398 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
399 /*
400 * The new state must be visible before we turn it on in the hardware:
401 */
402 smp_wmb();
403
Robert Richter4aeb0b42009-04-29 12:47:03 +0200404 if (counter->pmu->enable(counter)) {
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100405 counter->state = PERF_COUNTER_STATE_INACTIVE;
406 counter->oncpu = -1;
407 return -EAGAIN;
408 }
409
Peter Zijlstra4af49982009-04-06 11:45:10 +0200410 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100411
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100412 if (!is_software_counter(counter))
413 cpuctx->active_oncpu++;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100414 ctx->nr_active++;
415
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100416 if (counter->hw_event.exclusive)
417 cpuctx->exclusive = 1;
418
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100419 return 0;
420}
421
Thomas Gleixner0793a612008-12-04 20:12:29 +0100422/*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100423 * Return 1 for a group consisting entirely of software counters,
424 * 0 if the group contains any hardware counters.
425 */
426static int is_software_only_group(struct perf_counter *leader)
427{
428 struct perf_counter *counter;
429
430 if (!is_software_counter(leader))
431 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100432
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100433 list_for_each_entry(counter, &leader->sibling_list, list_entry)
434 if (!is_software_counter(counter))
435 return 0;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100436
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100437 return 1;
438}
439
440/*
441 * Work out whether we can put this counter group on the CPU now.
442 */
443static int group_can_go_on(struct perf_counter *counter,
444 struct perf_cpu_context *cpuctx,
445 int can_add_hw)
446{
447 /*
448 * Groups consisting entirely of software counters can always go on.
449 */
450 if (is_software_only_group(counter))
451 return 1;
452 /*
453 * If an exclusive group is already on, no other hardware
454 * counters can go on.
455 */
456 if (cpuctx->exclusive)
457 return 0;
458 /*
459 * If this group is exclusive and there are already
460 * counters on the CPU, it can't go on.
461 */
462 if (counter->hw_event.exclusive && cpuctx->active_oncpu)
463 return 0;
464 /*
465 * Otherwise, try to add it if all previous groups were able
466 * to go on.
467 */
468 return can_add_hw;
469}
470
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100471static void add_counter_to_ctx(struct perf_counter *counter,
472 struct perf_counter_context *ctx)
473{
474 list_add_counter(counter, ctx);
475 ctx->nr_counters++;
476 counter->prev_state = PERF_COUNTER_STATE_OFF;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200477 counter->tstamp_enabled = ctx->time;
478 counter->tstamp_running = ctx->time;
479 counter->tstamp_stopped = ctx->time;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100480}
481
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100482/*
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100483 * Cross CPU call to install and enable a performance counter
Thomas Gleixner0793a612008-12-04 20:12:29 +0100484 */
485static void __perf_install_in_context(void *info)
486{
487 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
488 struct perf_counter *counter = info;
489 struct perf_counter_context *ctx = counter->ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100490 struct perf_counter *leader = counter->group_leader;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100491 int cpu = smp_processor_id();
Ingo Molnar9b51f662008-12-12 13:49:45 +0100492 unsigned long flags;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100493 u64 perf_flags;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100494 int err;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100495
496 /*
497 * If this is a task context, we need to check whether it is
498 * the current task context of this cpu. If not it has been
499 * scheduled out before the smp call arrived.
500 */
501 if (ctx->task && cpuctx->task_ctx != ctx)
502 return;
503
Peter Zijlstra849691a2009-04-06 11:45:12 +0200504 spin_lock_irqsave(&ctx->lock, flags);
Peter Zijlstra4af49982009-04-06 11:45:10 +0200505 update_context_time(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100506
507 /*
508 * Protect the list operation against NMI by disabling the
509 * counters on a global level. NOP for non NMI based counters.
510 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100511 perf_flags = hw_perf_save_disable();
Thomas Gleixner0793a612008-12-04 20:12:29 +0100512
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100513 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100514
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100515 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100516 * Don't put the counter on if it is disabled or if
517 * it is in a group and the group isn't on.
518 */
519 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
520 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
521 goto unlock;
522
523 /*
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100524 * An exclusive counter can't go on if there are already active
525 * hardware counters, and no hardware counter can go on if there
526 * is already an exclusive counter on.
527 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100528 if (!group_can_go_on(counter, cpuctx, 1))
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100529 err = -EEXIST;
530 else
531 err = counter_sched_in(counter, cpuctx, ctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100532
Paul Mackerrasd859e292009-01-17 18:10:22 +1100533 if (err) {
534 /*
535 * This counter couldn't go on. If it is in a group
536 * then we have to pull the whole group off.
537 * If the counter group is pinned then put it in error state.
538 */
539 if (leader != counter)
540 group_sched_out(leader, cpuctx, ctx);
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100541 if (leader->hw_event.pinned) {
542 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100543 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100544 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100545 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100546
547 if (!err && !ctx->task && cpuctx->max_pertask)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100548 cpuctx->max_pertask--;
549
Paul Mackerrasd859e292009-01-17 18:10:22 +1100550 unlock:
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100551 hw_perf_restore(perf_flags);
552
Peter Zijlstra849691a2009-04-06 11:45:12 +0200553 spin_unlock_irqrestore(&ctx->lock, flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100554}
555
556/*
557 * Attach a performance counter to a context
558 *
559 * First we add the counter to the list with the hardware enable bit
560 * in counter->hw_config cleared.
561 *
562 * If the counter is attached to a task which is on a CPU we use a smp
563 * call to enable it in the task context. The task might have been
564 * scheduled away, but we check this in the smp call again.
Paul Mackerrasd859e292009-01-17 18:10:22 +1100565 *
566 * Must be called with ctx->mutex held.
Thomas Gleixner0793a612008-12-04 20:12:29 +0100567 */
568static void
569perf_install_in_context(struct perf_counter_context *ctx,
570 struct perf_counter *counter,
571 int cpu)
572{
573 struct task_struct *task = ctx->task;
574
Thomas Gleixner0793a612008-12-04 20:12:29 +0100575 if (!task) {
576 /*
577 * Per cpu counters are installed via an smp call and
578 * the install is always sucessful.
579 */
580 smp_call_function_single(cpu, __perf_install_in_context,
581 counter, 1);
582 return;
583 }
584
585 counter->task = task;
586retry:
587 task_oncpu_function_call(task, __perf_install_in_context,
588 counter);
589
590 spin_lock_irq(&ctx->lock);
591 /*
Thomas Gleixner0793a612008-12-04 20:12:29 +0100592 * we need to retry the smp call.
593 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100594 if (ctx->is_active && list_empty(&counter->list_entry)) {
Thomas Gleixner0793a612008-12-04 20:12:29 +0100595 spin_unlock_irq(&ctx->lock);
596 goto retry;
597 }
598
599 /*
600 * The lock prevents that this context is scheduled in so we
601 * can add the counter safely, if it the call above did not
602 * succeed.
603 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100604 if (list_empty(&counter->list_entry))
605 add_counter_to_ctx(counter, ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100606 spin_unlock_irq(&ctx->lock);
607}
608
Paul Mackerrasd859e292009-01-17 18:10:22 +1100609/*
610 * Cross CPU call to enable a performance counter
611 */
612static void __perf_counter_enable(void *info)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100613{
Paul Mackerrasd859e292009-01-17 18:10:22 +1100614 struct perf_counter *counter = info;
615 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
616 struct perf_counter_context *ctx = counter->ctx;
617 struct perf_counter *leader = counter->group_leader;
618 unsigned long flags;
619 int err;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100620
621 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100622 * If this is a per-task counter, need to check whether this
623 * counter's task is the current task on this cpu.
Ingo Molnar04289bb2008-12-11 08:38:42 +0100624 */
Paul Mackerrasd859e292009-01-17 18:10:22 +1100625 if (ctx->task && cpuctx->task_ctx != ctx)
626 return;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100627
Peter Zijlstra849691a2009-04-06 11:45:12 +0200628 spin_lock_irqsave(&ctx->lock, flags);
Peter Zijlstra4af49982009-04-06 11:45:10 +0200629 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100630
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100631 counter->prev_state = counter->state;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100632 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
633 goto unlock;
634 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200635 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100636
637 /*
638 * If the counter is in a group and isn't the group leader,
639 * then don't put it on unless the group is on.
640 */
641 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
642 goto unlock;
643
644 if (!group_can_go_on(counter, cpuctx, 1))
645 err = -EEXIST;
646 else
647 err = counter_sched_in(counter, cpuctx, ctx,
648 smp_processor_id());
649
650 if (err) {
651 /*
652 * If this counter can't go on and it's part of a
653 * group, then the whole group has to come off.
654 */
655 if (leader != counter)
656 group_sched_out(leader, cpuctx, ctx);
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100657 if (leader->hw_event.pinned) {
658 update_group_times(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100659 leader->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100660 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100661 }
662
663 unlock:
Peter Zijlstra849691a2009-04-06 11:45:12 +0200664 spin_unlock_irqrestore(&ctx->lock, flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100665}
666
667/*
668 * Enable a counter.
669 */
670static void perf_counter_enable(struct perf_counter *counter)
671{
672 struct perf_counter_context *ctx = counter->ctx;
673 struct task_struct *task = ctx->task;
674
675 if (!task) {
676 /*
677 * Enable the counter on the cpu that it's on
678 */
679 smp_call_function_single(counter->cpu, __perf_counter_enable,
680 counter, 1);
681 return;
682 }
683
684 spin_lock_irq(&ctx->lock);
685 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
686 goto out;
687
688 /*
689 * If the counter is in error state, clear that first.
690 * That way, if we see the counter in error state below, we
691 * know that it has gone back into error state, as distinct
692 * from the task having been scheduled away before the
693 * cross-call arrived.
694 */
695 if (counter->state == PERF_COUNTER_STATE_ERROR)
696 counter->state = PERF_COUNTER_STATE_OFF;
697
698 retry:
699 spin_unlock_irq(&ctx->lock);
700 task_oncpu_function_call(task, __perf_counter_enable, counter);
701
702 spin_lock_irq(&ctx->lock);
703
704 /*
705 * If the context is active and the counter is still off,
706 * we need to retry the cross-call.
707 */
708 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
709 goto retry;
710
711 /*
712 * Since we have the lock this context can't be scheduled
713 * in, so we can change the state safely.
714 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100715 if (counter->state == PERF_COUNTER_STATE_OFF) {
Paul Mackerrasd859e292009-01-17 18:10:22 +1100716 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200717 counter->tstamp_enabled =
718 ctx->time - counter->total_time_enabled;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100719 }
Paul Mackerrasd859e292009-01-17 18:10:22 +1100720 out:
721 spin_unlock_irq(&ctx->lock);
722}
723
Peter Zijlstra2023b352009-05-05 17:50:26 +0200724static int perf_counter_refresh(struct perf_counter *counter, int refresh)
Peter Zijlstra79f14642009-04-06 11:45:07 +0200725{
Peter Zijlstra2023b352009-05-05 17:50:26 +0200726 /*
727 * not supported on inherited counters
728 */
729 if (counter->hw_event.inherit)
730 return -EINVAL;
731
Peter Zijlstra79f14642009-04-06 11:45:07 +0200732 atomic_add(refresh, &counter->event_limit);
733 perf_counter_enable(counter);
Peter Zijlstra2023b352009-05-05 17:50:26 +0200734
735 return 0;
Peter Zijlstra79f14642009-04-06 11:45:07 +0200736}
737
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100738void __perf_counter_sched_out(struct perf_counter_context *ctx,
739 struct perf_cpu_context *cpuctx)
740{
741 struct perf_counter *counter;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100742 u64 flags;
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100743
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100744 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100745 ctx->is_active = 0;
746 if (likely(!ctx->nr_counters))
747 goto out;
Peter Zijlstra4af49982009-04-06 11:45:10 +0200748 update_context_time(ctx);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100749
Paul Mackerras3cbed422009-01-09 16:43:42 +1100750 flags = hw_perf_save_disable();
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100751 if (ctx->nr_active) {
752 list_for_each_entry(counter, &ctx->counter_list, list_entry)
753 group_sched_out(counter, cpuctx, ctx);
754 }
Paul Mackerras3cbed422009-01-09 16:43:42 +1100755 hw_perf_restore(flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100756 out:
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100757 spin_unlock(&ctx->lock);
758}
759
Thomas Gleixner0793a612008-12-04 20:12:29 +0100760/*
761 * Called from scheduler to remove the counters of the current task,
762 * with interrupts disabled.
763 *
764 * We stop each counter and update the counter value in counter->count.
765 *
Ingo Molnar76715812008-12-17 14:20:28 +0100766 * This does not protect us against NMI, but disable()
Thomas Gleixner0793a612008-12-04 20:12:29 +0100767 * sets the disabled bit in the control field of counter _before_
768 * accessing the counter control register. If a NMI hits, then it will
769 * not restart the counter.
770 */
771void perf_counter_task_sched_out(struct task_struct *task, int cpu)
772{
773 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
774 struct perf_counter_context *ctx = &task->perf_counter_ctx;
Peter Zijlstra4a0deca2009-03-19 20:26:12 +0100775 struct pt_regs *regs;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100776
777 if (likely(!cpuctx->task_ctx))
778 return;
779
Peter Zijlstrabce379b2009-04-06 11:45:13 +0200780 update_context_time(ctx);
781
Peter Zijlstra4a0deca2009-03-19 20:26:12 +0100782 regs = task_pt_regs(task);
Peter Zijlstra78f13e92009-04-08 15:01:33 +0200783 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100784 __perf_counter_sched_out(ctx, cpuctx);
785
Thomas Gleixner0793a612008-12-04 20:12:29 +0100786 cpuctx->task_ctx = NULL;
787}
788
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100789static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
Ingo Molnar04289bb2008-12-11 08:38:42 +0100790{
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100791 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
Ingo Molnar04289bb2008-12-11 08:38:42 +0100792}
793
Ingo Molnar79958882008-12-17 08:54:56 +0100794static int
Ingo Molnar04289bb2008-12-11 08:38:42 +0100795group_sched_in(struct perf_counter *group_counter,
796 struct perf_cpu_context *cpuctx,
797 struct perf_counter_context *ctx,
798 int cpu)
799{
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100800 struct perf_counter *counter, *partial_group;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100801 int ret;
802
803 if (group_counter->state == PERF_COUNTER_STATE_OFF)
804 return 0;
805
806 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
807 if (ret)
808 return ret < 0 ? ret : 0;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100809
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100810 group_counter->prev_state = group_counter->state;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100811 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
812 return -EAGAIN;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100813
814 /*
815 * Schedule in siblings as one group (if any):
816 */
Ingo Molnar79958882008-12-17 08:54:56 +0100817 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100818 counter->prev_state = counter->state;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100819 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
820 partial_group = counter;
821 goto group_error;
822 }
Ingo Molnar79958882008-12-17 08:54:56 +0100823 }
824
Paul Mackerras3cbed422009-01-09 16:43:42 +1100825 return 0;
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100826
827group_error:
828 /*
829 * Groups can be scheduled in as one unit only, so undo any
830 * partial group before returning:
831 */
832 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
833 if (counter == partial_group)
834 break;
835 counter_sched_out(counter, cpuctx, ctx);
836 }
837 counter_sched_out(group_counter, cpuctx, ctx);
838
839 return -EAGAIN;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100840}
841
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100842static void
843__perf_counter_sched_in(struct perf_counter_context *ctx,
844 struct perf_cpu_context *cpuctx, int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +0100845{
Thomas Gleixner0793a612008-12-04 20:12:29 +0100846 struct perf_counter *counter;
Paul Mackerras3cbed422009-01-09 16:43:42 +1100847 u64 flags;
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +1100848 int can_add_hw = 1;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100849
Thomas Gleixner0793a612008-12-04 20:12:29 +0100850 spin_lock(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100851 ctx->is_active = 1;
852 if (likely(!ctx->nr_counters))
853 goto out;
854
Peter Zijlstra4af49982009-04-06 11:45:10 +0200855 ctx->timestamp = perf_clock();
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100856
Paul Mackerras3cbed422009-01-09 16:43:42 +1100857 flags = hw_perf_save_disable();
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100858
859 /*
860 * First go through the list and put on any pinned groups
861 * in order to give them the best chance of going on.
862 */
Ingo Molnar04289bb2008-12-11 08:38:42 +0100863 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100864 if (counter->state <= PERF_COUNTER_STATE_OFF ||
865 !counter->hw_event.pinned)
866 continue;
867 if (counter->cpu != -1 && counter->cpu != cpu)
868 continue;
869
870 if (group_can_go_on(counter, cpuctx, 1))
871 group_sched_in(counter, cpuctx, ctx, cpu);
872
873 /*
874 * If this pinned group hasn't been scheduled,
875 * put it in error state.
876 */
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100877 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
878 update_group_times(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100879 counter->state = PERF_COUNTER_STATE_ERROR;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100880 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100881 }
882
883 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
884 /*
885 * Ignore counters in OFF or ERROR state, and
886 * ignore pinned counters since we did them already.
887 */
888 if (counter->state <= PERF_COUNTER_STATE_OFF ||
889 counter->hw_event.pinned)
890 continue;
891
Ingo Molnar04289bb2008-12-11 08:38:42 +0100892 /*
893 * Listen to the 'cpu' scheduling filter constraint
894 * of counters:
895 */
Thomas Gleixner0793a612008-12-04 20:12:29 +0100896 if (counter->cpu != -1 && counter->cpu != cpu)
897 continue;
898
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100899 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
Paul Mackerrasdd0e6ba2009-01-12 15:11:00 +1100900 if (group_sched_in(counter, cpuctx, ctx, cpu))
901 can_add_hw = 0;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100902 }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100903 }
Paul Mackerras3cbed422009-01-09 16:43:42 +1100904 hw_perf_restore(flags);
Paul Mackerrasd859e292009-01-17 18:10:22 +1100905 out:
Thomas Gleixner0793a612008-12-04 20:12:29 +0100906 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100907}
Ingo Molnar04289bb2008-12-11 08:38:42 +0100908
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100909/*
910 * Called from scheduler to add the counters of the current task
911 * with interrupts disabled.
912 *
913 * We restore the counter value and then enable it.
914 *
915 * This does not protect us against NMI, but enable()
916 * sets the enabled bit in the control field of counter _before_
917 * accessing the counter control register. If a NMI hits, then it will
918 * keep the counter running.
919 */
920void perf_counter_task_sched_in(struct task_struct *task, int cpu)
921{
922 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
923 struct perf_counter_context *ctx = &task->perf_counter_ctx;
924
925 __perf_counter_sched_in(ctx, cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100926 cpuctx->task_ctx = ctx;
927}
928
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100929static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
930{
931 struct perf_counter_context *ctx = &cpuctx->ctx;
932
933 __perf_counter_sched_in(ctx, cpuctx, cpu);
934}
935
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100936int perf_counter_task_disable(void)
937{
938 struct task_struct *curr = current;
939 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
940 struct perf_counter *counter;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100941 unsigned long flags;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100942 u64 perf_flags;
943 int cpu;
944
945 if (likely(!ctx->nr_counters))
946 return 0;
947
Peter Zijlstra849691a2009-04-06 11:45:12 +0200948 local_irq_save(flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100949 cpu = smp_processor_id();
950
951 perf_counter_task_sched_out(curr, cpu);
952
953 spin_lock(&ctx->lock);
954
955 /*
956 * Disable all the counters:
957 */
958 perf_flags = hw_perf_save_disable();
959
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100960 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100961 if (counter->state != PERF_COUNTER_STATE_ERROR) {
962 update_group_times(counter);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100963 counter->state = PERF_COUNTER_STATE_OFF;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100964 }
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100965 }
Ingo Molnar9b51f662008-12-12 13:49:45 +0100966
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100967 hw_perf_restore(perf_flags);
968
Peter Zijlstra849691a2009-04-06 11:45:12 +0200969 spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100970
971 return 0;
972}
973
974int perf_counter_task_enable(void)
975{
976 struct task_struct *curr = current;
977 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
978 struct perf_counter *counter;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +0100979 unsigned long flags;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100980 u64 perf_flags;
981 int cpu;
982
983 if (likely(!ctx->nr_counters))
984 return 0;
985
Peter Zijlstra849691a2009-04-06 11:45:12 +0200986 local_irq_save(flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100987 cpu = smp_processor_id();
988
Ingo Molnar235c7fc2008-12-21 14:43:25 +0100989 perf_counter_task_sched_out(curr, cpu);
990
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100991 spin_lock(&ctx->lock);
992
993 /*
994 * Disable all the counters:
995 */
996 perf_flags = hw_perf_save_disable();
997
998 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100999 if (counter->state > PERF_COUNTER_STATE_OFF)
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +01001000 continue;
Ingo Molnar6a930702008-12-11 15:17:03 +01001001 counter->state = PERF_COUNTER_STATE_INACTIVE;
Peter Zijlstra4af49982009-04-06 11:45:10 +02001002 counter->tstamp_enabled =
1003 ctx->time - counter->total_time_enabled;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001004 counter->hw_event.disabled = 0;
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +01001005 }
1006 hw_perf_restore(perf_flags);
1007
1008 spin_unlock(&ctx->lock);
1009
1010 perf_counter_task_sched_in(curr, cpu);
1011
Peter Zijlstra849691a2009-04-06 11:45:12 +02001012 local_irq_restore(flags);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +01001013
1014 return 0;
1015}
1016
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001017/*
1018 * Round-robin a context's counters:
1019 */
1020static void rotate_ctx(struct perf_counter_context *ctx)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001021{
Thomas Gleixner0793a612008-12-04 20:12:29 +01001022 struct perf_counter *counter;
Ingo Molnar5c92d122008-12-11 13:21:10 +01001023 u64 perf_flags;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001024
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001025 if (!ctx->nr_counters)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001026 return;
1027
Thomas Gleixner0793a612008-12-04 20:12:29 +01001028 spin_lock(&ctx->lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001029 /*
Ingo Molnar04289bb2008-12-11 08:38:42 +01001030 * Rotate the first entry last (works just fine for group counters too):
Thomas Gleixner0793a612008-12-04 20:12:29 +01001031 */
Ingo Molnar01b28382008-12-11 13:45:51 +01001032 perf_flags = hw_perf_save_disable();
Ingo Molnar04289bb2008-12-11 08:38:42 +01001033 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
Peter Zijlstra75564232009-03-13 12:21:29 +01001034 list_move_tail(&counter->list_entry, &ctx->counter_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001035 break;
1036 }
Ingo Molnar01b28382008-12-11 13:45:51 +01001037 hw_perf_restore(perf_flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001038
1039 spin_unlock(&ctx->lock);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001040}
Thomas Gleixner0793a612008-12-04 20:12:29 +01001041
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001042void perf_counter_task_tick(struct task_struct *curr, int cpu)
1043{
Peter Zijlstra7fc23a52009-05-08 18:52:21 +02001044 struct perf_cpu_context *cpuctx;
1045 struct perf_counter_context *ctx;
1046
1047 if (!atomic_read(&nr_counters))
1048 return;
1049
1050 cpuctx = &per_cpu(perf_cpu_context, cpu);
1051 ctx = &curr->perf_counter_ctx;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001052
Ingo Molnarb82914c2009-05-04 18:54:32 +02001053 perf_counter_cpu_sched_out(cpuctx);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001054 perf_counter_task_sched_out(curr, cpu);
1055
Ingo Molnarb82914c2009-05-04 18:54:32 +02001056 rotate_ctx(&cpuctx->ctx);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01001057 rotate_ctx(ctx);
1058
Ingo Molnarb82914c2009-05-04 18:54:32 +02001059 perf_counter_cpu_sched_in(cpuctx, cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001060 perf_counter_task_sched_in(curr, cpu);
1061}
1062
1063/*
Thomas Gleixner0793a612008-12-04 20:12:29 +01001064 * Cross CPU call to read the hardware counter
1065 */
Ingo Molnar76715812008-12-17 14:20:28 +01001066static void __read(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001067{
Ingo Molnar621a01e2008-12-11 12:46:46 +01001068 struct perf_counter *counter = info;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001069 struct perf_counter_context *ctx = counter->ctx;
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01001070 unsigned long flags;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001071
Peter Zijlstra849691a2009-04-06 11:45:12 +02001072 local_irq_save(flags);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001073 if (ctx->is_active)
Peter Zijlstra4af49982009-04-06 11:45:10 +02001074 update_context_time(ctx);
Robert Richter4aeb0b42009-04-29 12:47:03 +02001075 counter->pmu->read(counter);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001076 update_counter_times(counter);
Peter Zijlstra849691a2009-04-06 11:45:12 +02001077 local_irq_restore(flags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001078}
1079
Ingo Molnar04289bb2008-12-11 08:38:42 +01001080static u64 perf_counter_read(struct perf_counter *counter)
Thomas Gleixner0793a612008-12-04 20:12:29 +01001081{
1082 /*
1083 * If counter is enabled and currently active on a CPU, update the
1084 * value in the counter structure:
1085 */
Ingo Molnar6a930702008-12-11 15:17:03 +01001086 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
Thomas Gleixner0793a612008-12-04 20:12:29 +01001087 smp_call_function_single(counter->oncpu,
Ingo Molnar76715812008-12-17 14:20:28 +01001088 __read, counter, 1);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001089 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1090 update_counter_times(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001091 }
1092
Ingo Molnaree060942008-12-13 09:00:03 +01001093 return atomic64_read(&counter->count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001094}
1095
Thomas Gleixner0793a612008-12-04 20:12:29 +01001096static void put_context(struct perf_counter_context *ctx)
1097{
1098 if (ctx->task)
1099 put_task_struct(ctx->task);
1100}
1101
1102static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1103{
1104 struct perf_cpu_context *cpuctx;
1105 struct perf_counter_context *ctx;
1106 struct task_struct *task;
1107
1108 /*
1109 * If cpu is not a wildcard then this is a percpu counter:
1110 */
1111 if (cpu != -1) {
1112 /* Must be root to operate on a CPU counter: */
Peter Zijlstra1ccd1542009-04-09 10:53:45 +02001113 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
Thomas Gleixner0793a612008-12-04 20:12:29 +01001114 return ERR_PTR(-EACCES);
1115
1116 if (cpu < 0 || cpu > num_possible_cpus())
1117 return ERR_PTR(-EINVAL);
1118
1119 /*
1120 * We could be clever and allow to attach a counter to an
1121 * offline CPU and activate it when the CPU comes up, but
1122 * that's for later.
1123 */
1124 if (!cpu_isset(cpu, cpu_online_map))
1125 return ERR_PTR(-ENODEV);
1126
1127 cpuctx = &per_cpu(perf_cpu_context, cpu);
1128 ctx = &cpuctx->ctx;
1129
Thomas Gleixner0793a612008-12-04 20:12:29 +01001130 return ctx;
1131 }
1132
1133 rcu_read_lock();
1134 if (!pid)
1135 task = current;
1136 else
1137 task = find_task_by_vpid(pid);
1138 if (task)
1139 get_task_struct(task);
1140 rcu_read_unlock();
1141
1142 if (!task)
1143 return ERR_PTR(-ESRCH);
1144
1145 ctx = &task->perf_counter_ctx;
1146 ctx->task = task;
1147
1148 /* Reuse ptrace permission checks for now. */
1149 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1150 put_context(ctx);
1151 return ERR_PTR(-EACCES);
1152 }
1153
1154 return ctx;
1155}
1156
Peter Zijlstra592903c2009-03-13 12:21:36 +01001157static void free_counter_rcu(struct rcu_head *head)
1158{
1159 struct perf_counter *counter;
1160
1161 counter = container_of(head, struct perf_counter, rcu_head);
1162 kfree(counter);
1163}
1164
Peter Zijlstra925d5192009-03-30 19:07:02 +02001165static void perf_pending_sync(struct perf_counter *counter);
1166
Peter Zijlstraf1600952009-03-19 20:26:16 +01001167static void free_counter(struct perf_counter *counter)
1168{
Peter Zijlstra925d5192009-03-30 19:07:02 +02001169 perf_pending_sync(counter);
1170
Peter Zijlstra7fc23a52009-05-08 18:52:21 +02001171 atomic_dec(&nr_counters);
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02001172 if (counter->hw_event.mmap)
1173 atomic_dec(&nr_mmap_tracking);
1174 if (counter->hw_event.munmap)
1175 atomic_dec(&nr_munmap_tracking);
1176 if (counter->hw_event.comm)
1177 atomic_dec(&nr_comm_tracking);
1178
Peter Zijlstrae077df42009-03-19 20:26:17 +01001179 if (counter->destroy)
1180 counter->destroy(counter);
1181
Peter Zijlstraf1600952009-03-19 20:26:16 +01001182 call_rcu(&counter->rcu_head, free_counter_rcu);
1183}
1184
Thomas Gleixner0793a612008-12-04 20:12:29 +01001185/*
1186 * Called when the last reference to the file is gone.
1187 */
1188static int perf_release(struct inode *inode, struct file *file)
1189{
1190 struct perf_counter *counter = file->private_data;
1191 struct perf_counter_context *ctx = counter->ctx;
1192
1193 file->private_data = NULL;
1194
Paul Mackerrasd859e292009-01-17 18:10:22 +11001195 mutex_lock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001196 mutex_lock(&counter->mutex);
1197
Ingo Molnar04289bb2008-12-11 08:38:42 +01001198 perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001199
1200 mutex_unlock(&counter->mutex);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001201 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001202
Peter Zijlstraf1600952009-03-19 20:26:16 +01001203 free_counter(counter);
Mike Galbraith5af75912009-02-11 10:53:37 +01001204 put_context(ctx);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001205
1206 return 0;
1207}
1208
1209/*
1210 * Read the performance counter - simple non blocking version for now
1211 */
1212static ssize_t
1213perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1214{
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001215 u64 values[3];
1216 int n;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001217
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11001218 /*
1219 * Return end-of-file for a read on a counter that is in
1220 * error state (i.e. because it was pinned but it couldn't be
1221 * scheduled on to the CPU at some point).
1222 */
1223 if (counter->state == PERF_COUNTER_STATE_ERROR)
1224 return 0;
1225
Thomas Gleixner0793a612008-12-04 20:12:29 +01001226 mutex_lock(&counter->mutex);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001227 values[0] = perf_counter_read(counter);
1228 n = 1;
1229 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1230 values[n++] = counter->total_time_enabled +
1231 atomic64_read(&counter->child_total_time_enabled);
1232 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1233 values[n++] = counter->total_time_running +
1234 atomic64_read(&counter->child_total_time_running);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001235 mutex_unlock(&counter->mutex);
1236
Paul Mackerras53cfbf52009-03-25 22:46:58 +11001237 if (count < n * sizeof(u64))
1238 return -EINVAL;
1239 count = n * sizeof(u64);
1240
1241 if (copy_to_user(buf, values, count))
1242 return -EFAULT;
1243
1244 return count;
Thomas Gleixner0793a612008-12-04 20:12:29 +01001245}
1246
1247static ssize_t
Thomas Gleixner0793a612008-12-04 20:12:29 +01001248perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1249{
1250 struct perf_counter *counter = file->private_data;
1251
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001252 return perf_read_hw(counter, buf, count);
Thomas Gleixner0793a612008-12-04 20:12:29 +01001253}
1254
1255static unsigned int perf_poll(struct file *file, poll_table *wait)
1256{
1257 struct perf_counter *counter = file->private_data;
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001258 struct perf_mmap_data *data;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001259 unsigned int events = POLL_HUP;
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001260
1261 rcu_read_lock();
1262 data = rcu_dereference(counter->data);
1263 if (data)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001264 events = atomic_xchg(&data->poll, 0);
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001265 rcu_read_unlock();
Thomas Gleixner0793a612008-12-04 20:12:29 +01001266
1267 poll_wait(file, &counter->waitq, wait);
1268
Thomas Gleixner0793a612008-12-04 20:12:29 +01001269 return events;
1270}
1271
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001272static void perf_counter_reset(struct perf_counter *counter)
1273{
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001274 (void)perf_counter_read(counter);
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001275 atomic_set(&counter->count, 0);
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001276 perf_counter_update_userpage(counter);
1277}
1278
1279static void perf_counter_for_each_sibling(struct perf_counter *counter,
1280 void (*func)(struct perf_counter *))
1281{
1282 struct perf_counter_context *ctx = counter->ctx;
1283 struct perf_counter *sibling;
1284
1285 spin_lock_irq(&ctx->lock);
1286 counter = counter->group_leader;
1287
1288 func(counter);
1289 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1290 func(sibling);
1291 spin_unlock_irq(&ctx->lock);
1292}
1293
1294static void perf_counter_for_each_child(struct perf_counter *counter,
1295 void (*func)(struct perf_counter *))
1296{
1297 struct perf_counter *child;
1298
1299 mutex_lock(&counter->mutex);
1300 func(counter);
1301 list_for_each_entry(child, &counter->child_list, child_list)
1302 func(child);
1303 mutex_unlock(&counter->mutex);
1304}
1305
1306static void perf_counter_for_each(struct perf_counter *counter,
1307 void (*func)(struct perf_counter *))
1308{
1309 struct perf_counter *child;
1310
1311 mutex_lock(&counter->mutex);
1312 perf_counter_for_each_sibling(counter, func);
1313 list_for_each_entry(child, &counter->child_list, child_list)
1314 perf_counter_for_each_sibling(child, func);
1315 mutex_unlock(&counter->mutex);
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001316}
1317
Paul Mackerrasd859e292009-01-17 18:10:22 +11001318static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1319{
1320 struct perf_counter *counter = file->private_data;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001321 void (*func)(struct perf_counter *);
1322 u32 flags = arg;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001323
1324 switch (cmd) {
1325 case PERF_COUNTER_IOC_ENABLE:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001326 func = perf_counter_enable;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001327 break;
1328 case PERF_COUNTER_IOC_DISABLE:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001329 func = perf_counter_disable;
Peter Zijlstra79f14642009-04-06 11:45:07 +02001330 break;
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001331 case PERF_COUNTER_IOC_RESET:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001332 func = perf_counter_reset;
Peter Zijlstra6de6a7b2009-05-05 17:50:23 +02001333 break;
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001334
1335 case PERF_COUNTER_IOC_REFRESH:
1336 return perf_counter_refresh(counter, arg);
Paul Mackerrasd859e292009-01-17 18:10:22 +11001337 default:
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001338 return -ENOTTY;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001339 }
Peter Zijlstra3df5eda2009-05-08 18:52:22 +02001340
1341 if (flags & PERF_IOC_FLAG_GROUP)
1342 perf_counter_for_each(counter, func);
1343 else
1344 perf_counter_for_each_child(counter, func);
1345
1346 return 0;
Paul Mackerrasd859e292009-01-17 18:10:22 +11001347}
1348
Peter Zijlstra38ff6672009-03-30 19:07:03 +02001349/*
1350 * Callers need to ensure there can be no nesting of this function, otherwise
1351 * the seqlock logic goes bad. We can not serialize this because the arch
1352 * code calls this from NMI context.
1353 */
1354void perf_counter_update_userpage(struct perf_counter *counter)
Paul Mackerras37d81822009-03-23 18:22:08 +01001355{
Peter Zijlstra38ff6672009-03-30 19:07:03 +02001356 struct perf_mmap_data *data;
1357 struct perf_counter_mmap_page *userpg;
1358
1359 rcu_read_lock();
1360 data = rcu_dereference(counter->data);
1361 if (!data)
1362 goto unlock;
1363
1364 userpg = data->user_page;
Paul Mackerras37d81822009-03-23 18:22:08 +01001365
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001366 /*
1367 * Disable preemption so as to not let the corresponding user-space
1368 * spin too long if we get preempted.
1369 */
1370 preempt_disable();
Paul Mackerras37d81822009-03-23 18:22:08 +01001371 ++userpg->lock;
Peter Zijlstra92f22a32009-04-02 11:12:04 +02001372 barrier();
Paul Mackerras37d81822009-03-23 18:22:08 +01001373 userpg->index = counter->hw.idx;
1374 userpg->offset = atomic64_read(&counter->count);
1375 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1376 userpg->offset -= atomic64_read(&counter->hw.prev_count);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001377
Peter Zijlstra92f22a32009-04-02 11:12:04 +02001378 barrier();
Paul Mackerras37d81822009-03-23 18:22:08 +01001379 ++userpg->lock;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001380 preempt_enable();
Peter Zijlstra38ff6672009-03-30 19:07:03 +02001381unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001382 rcu_read_unlock();
Paul Mackerras37d81822009-03-23 18:22:08 +01001383}
1384
1385static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1386{
1387 struct perf_counter *counter = vma->vm_file->private_data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001388 struct perf_mmap_data *data;
1389 int ret = VM_FAULT_SIGBUS;
Paul Mackerras37d81822009-03-23 18:22:08 +01001390
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001391 rcu_read_lock();
1392 data = rcu_dereference(counter->data);
1393 if (!data)
1394 goto unlock;
Paul Mackerras37d81822009-03-23 18:22:08 +01001395
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001396 if (vmf->pgoff == 0) {
1397 vmf->page = virt_to_page(data->user_page);
1398 } else {
1399 int nr = vmf->pgoff - 1;
1400
1401 if ((unsigned)nr > data->nr_pages)
1402 goto unlock;
1403
1404 vmf->page = virt_to_page(data->data_pages[nr]);
1405 }
Paul Mackerras37d81822009-03-23 18:22:08 +01001406 get_page(vmf->page);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001407 ret = 0;
1408unlock:
1409 rcu_read_unlock();
1410
1411 return ret;
1412}
1413
1414static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1415{
1416 struct perf_mmap_data *data;
1417 unsigned long size;
1418 int i;
1419
1420 WARN_ON(atomic_read(&counter->mmap_count));
1421
1422 size = sizeof(struct perf_mmap_data);
1423 size += nr_pages * sizeof(void *);
1424
1425 data = kzalloc(size, GFP_KERNEL);
1426 if (!data)
1427 goto fail;
1428
1429 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1430 if (!data->user_page)
1431 goto fail_user_page;
1432
1433 for (i = 0; i < nr_pages; i++) {
1434 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1435 if (!data->data_pages[i])
1436 goto fail_data_pages;
1437 }
1438
1439 data->nr_pages = nr_pages;
Peter Zijlstra22c15582009-05-05 17:50:25 +02001440 atomic_set(&data->lock, -1);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001441
1442 rcu_assign_pointer(counter->data, data);
1443
Paul Mackerras37d81822009-03-23 18:22:08 +01001444 return 0;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001445
1446fail_data_pages:
1447 for (i--; i >= 0; i--)
1448 free_page((unsigned long)data->data_pages[i]);
1449
1450 free_page((unsigned long)data->user_page);
1451
1452fail_user_page:
1453 kfree(data);
1454
1455fail:
1456 return -ENOMEM;
1457}
1458
1459static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1460{
1461 struct perf_mmap_data *data = container_of(rcu_head,
1462 struct perf_mmap_data, rcu_head);
1463 int i;
1464
1465 free_page((unsigned long)data->user_page);
1466 for (i = 0; i < data->nr_pages; i++)
1467 free_page((unsigned long)data->data_pages[i]);
1468 kfree(data);
1469}
1470
1471static void perf_mmap_data_free(struct perf_counter *counter)
1472{
1473 struct perf_mmap_data *data = counter->data;
1474
1475 WARN_ON(atomic_read(&counter->mmap_count));
1476
1477 rcu_assign_pointer(counter->data, NULL);
1478 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1479}
1480
1481static void perf_mmap_open(struct vm_area_struct *vma)
1482{
1483 struct perf_counter *counter = vma->vm_file->private_data;
1484
1485 atomic_inc(&counter->mmap_count);
1486}
1487
1488static void perf_mmap_close(struct vm_area_struct *vma)
1489{
1490 struct perf_counter *counter = vma->vm_file->private_data;
1491
1492 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1493 &counter->mmap_mutex)) {
Peter Zijlstrac5078f72009-05-05 17:50:24 +02001494 vma->vm_mm->locked_vm -= counter->data->nr_locked;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001495 perf_mmap_data_free(counter);
1496 mutex_unlock(&counter->mmap_mutex);
1497 }
Paul Mackerras37d81822009-03-23 18:22:08 +01001498}
1499
1500static struct vm_operations_struct perf_mmap_vmops = {
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001501 .open = perf_mmap_open,
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001502 .close = perf_mmap_close,
Paul Mackerras37d81822009-03-23 18:22:08 +01001503 .fault = perf_mmap_fault,
1504};
1505
1506static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1507{
1508 struct perf_counter *counter = file->private_data;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001509 unsigned long vma_size;
1510 unsigned long nr_pages;
1511 unsigned long locked, lock_limit;
1512 int ret = 0;
Peter Zijlstrac5078f72009-05-05 17:50:24 +02001513 long extra;
Paul Mackerras37d81822009-03-23 18:22:08 +01001514
1515 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1516 return -EINVAL;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001517
1518 vma_size = vma->vm_end - vma->vm_start;
1519 nr_pages = (vma_size / PAGE_SIZE) - 1;
1520
Peter Zijlstra7730d862009-03-25 12:48:31 +01001521 /*
1522 * If we have data pages ensure they're a power-of-two number, so we
1523 * can do bitmasks instead of modulo.
1524 */
1525 if (nr_pages != 0 && !is_power_of_2(nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01001526 return -EINVAL;
1527
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001528 if (vma_size != PAGE_SIZE * (1 + nr_pages))
Paul Mackerras37d81822009-03-23 18:22:08 +01001529 return -EINVAL;
1530
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001531 if (vma->vm_pgoff != 0)
1532 return -EINVAL;
Paul Mackerras37d81822009-03-23 18:22:08 +01001533
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001534 mutex_lock(&counter->mmap_mutex);
1535 if (atomic_inc_not_zero(&counter->mmap_count)) {
1536 if (nr_pages != counter->data->nr_pages)
1537 ret = -EINVAL;
1538 goto unlock;
1539 }
1540
Peter Zijlstrac5078f72009-05-05 17:50:24 +02001541 extra = nr_pages /* + 1 only account the data pages */;
1542 extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1543 if (extra < 0)
1544 extra = 0;
1545
1546 locked = vma->vm_mm->locked_vm + extra;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001547
1548 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1549 lock_limit >>= PAGE_SHIFT;
1550
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001551 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1552 ret = -EPERM;
1553 goto unlock;
1554 }
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001555
1556 WARN_ON(counter->data);
1557 ret = perf_mmap_data_alloc(counter, nr_pages);
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001558 if (ret)
1559 goto unlock;
1560
1561 atomic_set(&counter->mmap_count, 1);
Peter Zijlstrac5078f72009-05-05 17:50:24 +02001562 vma->vm_mm->locked_vm += extra;
1563 counter->data->nr_locked = extra;
Peter Zijlstraebb3c4c2009-04-06 11:45:05 +02001564unlock:
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001565 mutex_unlock(&counter->mmap_mutex);
Paul Mackerras37d81822009-03-23 18:22:08 +01001566
1567 vma->vm_flags &= ~VM_MAYWRITE;
1568 vma->vm_flags |= VM_RESERVED;
1569 vma->vm_ops = &perf_mmap_vmops;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001570
1571 return ret;
Paul Mackerras37d81822009-03-23 18:22:08 +01001572}
1573
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02001574static int perf_fasync(int fd, struct file *filp, int on)
1575{
1576 struct perf_counter *counter = filp->private_data;
1577 struct inode *inode = filp->f_path.dentry->d_inode;
1578 int retval;
1579
1580 mutex_lock(&inode->i_mutex);
1581 retval = fasync_helper(fd, filp, on, &counter->fasync);
1582 mutex_unlock(&inode->i_mutex);
1583
1584 if (retval < 0)
1585 return retval;
1586
1587 return 0;
1588}
1589
Thomas Gleixner0793a612008-12-04 20:12:29 +01001590static const struct file_operations perf_fops = {
1591 .release = perf_release,
1592 .read = perf_read,
1593 .poll = perf_poll,
Paul Mackerrasd859e292009-01-17 18:10:22 +11001594 .unlocked_ioctl = perf_ioctl,
1595 .compat_ioctl = perf_ioctl,
Paul Mackerras37d81822009-03-23 18:22:08 +01001596 .mmap = perf_mmap,
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +02001597 .fasync = perf_fasync,
Thomas Gleixner0793a612008-12-04 20:12:29 +01001598};
1599
Peter Zijlstra15dbf272009-03-13 12:21:32 +01001600/*
Peter Zijlstra925d5192009-03-30 19:07:02 +02001601 * Perf counter wakeup
1602 *
1603 * If there's data, ensure we set the poll() state and publish everything
1604 * to user-space before waking everybody up.
1605 */
1606
1607void perf_counter_wakeup(struct perf_counter *counter)
1608{
Peter Zijlstra925d5192009-03-30 19:07:02 +02001609 wake_up_all(&counter->waitq);
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001610
1611 if (counter->pending_kill) {
1612 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1613 counter->pending_kill = 0;
1614 }
Peter Zijlstra925d5192009-03-30 19:07:02 +02001615}
1616
1617/*
1618 * Pending wakeups
1619 *
1620 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1621 *
1622 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1623 * single linked list and use cmpxchg() to add entries lockless.
1624 */
1625
Peter Zijlstra79f14642009-04-06 11:45:07 +02001626static void perf_pending_counter(struct perf_pending_entry *entry)
1627{
1628 struct perf_counter *counter = container_of(entry,
1629 struct perf_counter, pending);
1630
1631 if (counter->pending_disable) {
1632 counter->pending_disable = 0;
1633 perf_counter_disable(counter);
1634 }
1635
1636 if (counter->pending_wakeup) {
1637 counter->pending_wakeup = 0;
1638 perf_counter_wakeup(counter);
1639 }
1640}
1641
Peter Zijlstra671dec52009-04-06 11:45:02 +02001642#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02001643
Peter Zijlstra671dec52009-04-06 11:45:02 +02001644static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
Peter Zijlstra925d5192009-03-30 19:07:02 +02001645 PENDING_TAIL,
1646};
1647
Peter Zijlstra671dec52009-04-06 11:45:02 +02001648static void perf_pending_queue(struct perf_pending_entry *entry,
1649 void (*func)(struct perf_pending_entry *))
Peter Zijlstra925d5192009-03-30 19:07:02 +02001650{
Peter Zijlstra671dec52009-04-06 11:45:02 +02001651 struct perf_pending_entry **head;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001652
Peter Zijlstra671dec52009-04-06 11:45:02 +02001653 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
Peter Zijlstra925d5192009-03-30 19:07:02 +02001654 return;
1655
Peter Zijlstra671dec52009-04-06 11:45:02 +02001656 entry->func = func;
1657
1658 head = &get_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001659
1660 do {
Peter Zijlstra671dec52009-04-06 11:45:02 +02001661 entry->next = *head;
1662 } while (cmpxchg(head, entry->next, entry) != entry->next);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001663
1664 set_perf_counter_pending();
1665
Peter Zijlstra671dec52009-04-06 11:45:02 +02001666 put_cpu_var(perf_pending_head);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001667}
1668
1669static int __perf_pending_run(void)
1670{
Peter Zijlstra671dec52009-04-06 11:45:02 +02001671 struct perf_pending_entry *list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001672 int nr = 0;
1673
Peter Zijlstra671dec52009-04-06 11:45:02 +02001674 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001675 while (list != PENDING_TAIL) {
Peter Zijlstra671dec52009-04-06 11:45:02 +02001676 void (*func)(struct perf_pending_entry *);
1677 struct perf_pending_entry *entry = list;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001678
1679 list = list->next;
1680
Peter Zijlstra671dec52009-04-06 11:45:02 +02001681 func = entry->func;
1682 entry->next = NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001683 /*
1684 * Ensure we observe the unqueue before we issue the wakeup,
1685 * so that we won't be waiting forever.
1686 * -- see perf_not_pending().
1687 */
1688 smp_wmb();
1689
Peter Zijlstra671dec52009-04-06 11:45:02 +02001690 func(entry);
Peter Zijlstra925d5192009-03-30 19:07:02 +02001691 nr++;
1692 }
1693
1694 return nr;
1695}
1696
1697static inline int perf_not_pending(struct perf_counter *counter)
1698{
1699 /*
1700 * If we flush on whatever cpu we run, there is a chance we don't
1701 * need to wait.
1702 */
1703 get_cpu();
1704 __perf_pending_run();
1705 put_cpu();
1706
1707 /*
1708 * Ensure we see the proper queue state before going to sleep
1709 * so that we do not miss the wakeup. -- see perf_pending_handle()
1710 */
1711 smp_rmb();
Peter Zijlstra671dec52009-04-06 11:45:02 +02001712 return counter->pending.next == NULL;
Peter Zijlstra925d5192009-03-30 19:07:02 +02001713}
1714
1715static void perf_pending_sync(struct perf_counter *counter)
1716{
1717 wait_event(counter->waitq, perf_not_pending(counter));
1718}
1719
1720void perf_counter_do_pending(void)
1721{
1722 __perf_pending_run();
1723}
1724
1725/*
Peter Zijlstra394ee072009-03-30 19:07:14 +02001726 * Callchain support -- arch specific
1727 */
1728
Peter Zijlstra9c03d882009-04-06 11:45:00 +02001729__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
Peter Zijlstra394ee072009-03-30 19:07:14 +02001730{
1731 return NULL;
1732}
1733
1734/*
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001735 * Output
1736 */
1737
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001738struct perf_output_handle {
1739 struct perf_counter *counter;
1740 struct perf_mmap_data *data;
1741 unsigned int offset;
Peter Zijlstra63e35b22009-03-25 12:30:24 +01001742 unsigned int head;
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001743 int nmi;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001744 int overflow;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001745 int locked;
1746 unsigned long flags;
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001747};
1748
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001749static void perf_output_wakeup(struct perf_output_handle *handle)
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001750{
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001751 atomic_set(&handle->data->poll, POLL_IN);
1752
Peter Zijlstra671dec52009-04-06 11:45:02 +02001753 if (handle->nmi) {
Peter Zijlstra79f14642009-04-06 11:45:07 +02001754 handle->counter->pending_wakeup = 1;
Peter Zijlstra671dec52009-04-06 11:45:02 +02001755 perf_pending_queue(&handle->counter->pending,
Peter Zijlstra79f14642009-04-06 11:45:07 +02001756 perf_pending_counter);
Peter Zijlstra671dec52009-04-06 11:45:02 +02001757 } else
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001758 perf_counter_wakeup(handle->counter);
1759}
1760
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001761/*
1762 * Curious locking construct.
1763 *
1764 * We need to ensure a later event doesn't publish a head when a former
1765 * event isn't done writing. However since we need to deal with NMIs we
1766 * cannot fully serialize things.
1767 *
1768 * What we do is serialize between CPUs so we only have to deal with NMI
1769 * nesting on a single CPU.
1770 *
1771 * We only publish the head (and generate a wakeup) when the outer-most
1772 * event completes.
1773 */
1774static void perf_output_lock(struct perf_output_handle *handle)
1775{
1776 struct perf_mmap_data *data = handle->data;
1777 int cpu;
1778
1779 handle->locked = 0;
1780
1781 local_irq_save(handle->flags);
1782 cpu = smp_processor_id();
1783
1784 if (in_nmi() && atomic_read(&data->lock) == cpu)
1785 return;
1786
Peter Zijlstra22c15582009-05-05 17:50:25 +02001787 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001788 cpu_relax();
1789
1790 handle->locked = 1;
1791}
1792
1793static void perf_output_unlock(struct perf_output_handle *handle)
1794{
1795 struct perf_mmap_data *data = handle->data;
1796 int head, cpu;
1797
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02001798 data->done_head = data->head;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001799
1800 if (!handle->locked)
1801 goto out;
1802
1803again:
1804 /*
1805 * The xchg implies a full barrier that ensures all writes are done
1806 * before we publish the new head, matched by a rmb() in userspace when
1807 * reading this position.
1808 */
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02001809 while ((head = atomic_xchg(&data->done_head, 0)))
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001810 data->user_page->data_head = head;
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001811
1812 /*
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02001813 * NMI can happen here, which means we can miss a done_head update.
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001814 */
1815
Peter Zijlstra22c15582009-05-05 17:50:25 +02001816 cpu = atomic_xchg(&data->lock, -1);
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001817 WARN_ON_ONCE(cpu != smp_processor_id());
1818
1819 /*
1820 * Therefore we have to validate we did not indeed do so.
1821 */
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02001822 if (unlikely(atomic_read(&data->done_head))) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001823 /*
1824 * Since we had it locked, we can lock it again.
1825 */
Peter Zijlstra22c15582009-05-05 17:50:25 +02001826 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001827 cpu_relax();
1828
1829 goto again;
1830 }
1831
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02001832 if (atomic_xchg(&data->wakeup, 0))
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001833 perf_output_wakeup(handle);
1834out:
1835 local_irq_restore(handle->flags);
1836}
1837
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001838static int perf_output_begin(struct perf_output_handle *handle,
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001839 struct perf_counter *counter, unsigned int size,
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001840 int nmi, int overflow)
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001841{
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001842 struct perf_mmap_data *data;
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001843 unsigned int offset, head;
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001844
Peter Zijlstra2023b352009-05-05 17:50:26 +02001845 /*
1846 * For inherited counters we send all the output towards the parent.
1847 */
1848 if (counter->parent)
1849 counter = counter->parent;
1850
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001851 rcu_read_lock();
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001852 data = rcu_dereference(counter->data);
1853 if (!data)
1854 goto out;
Peter Zijlstra0322cd62009-03-19 20:26:19 +01001855
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001856 handle->data = data;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001857 handle->counter = counter;
1858 handle->nmi = nmi;
1859 handle->overflow = overflow;
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001860
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001861 if (!data->nr_pages)
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001862 goto fail;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001863
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001864 perf_output_lock(handle);
1865
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001866 do {
1867 offset = head = atomic_read(&data->head);
Peter Zijlstrac7138f32009-03-24 13:18:16 +01001868 head += size;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001869 } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1870
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001871 handle->offset = offset;
Peter Zijlstra63e35b22009-03-25 12:30:24 +01001872 handle->head = head;
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02001873
1874 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
1875 atomic_set(&data->wakeup, 1);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001876
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001877 return 0;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001878
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001879fail:
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001880 perf_output_wakeup(handle);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001881out:
1882 rcu_read_unlock();
1883
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001884 return -ENOSPC;
1885}
1886
1887static void perf_output_copy(struct perf_output_handle *handle,
1888 void *buf, unsigned int len)
1889{
1890 unsigned int pages_mask;
1891 unsigned int offset;
1892 unsigned int size;
1893 void **pages;
1894
1895 offset = handle->offset;
1896 pages_mask = handle->data->nr_pages - 1;
1897 pages = handle->data->data_pages;
1898
1899 do {
1900 unsigned int page_offset;
1901 int nr;
1902
1903 nr = (offset >> PAGE_SHIFT) & pages_mask;
1904 page_offset = offset & (PAGE_SIZE - 1);
1905 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
1906
1907 memcpy(pages[nr] + page_offset, buf, size);
1908
1909 len -= size;
1910 buf += size;
1911 offset += size;
1912 } while (len);
1913
1914 handle->offset = offset;
Peter Zijlstra63e35b22009-03-25 12:30:24 +01001915
1916 WARN_ON_ONCE(handle->offset > handle->head);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001917}
1918
Peter Zijlstra5c148192009-03-25 12:30:23 +01001919#define perf_output_put(handle, x) \
1920 perf_output_copy((handle), &(x), sizeof(x))
1921
Peter Zijlstra78d613e2009-03-30 19:07:11 +02001922static void perf_output_end(struct perf_output_handle *handle)
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001923{
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001924 struct perf_counter *counter = handle->counter;
1925 struct perf_mmap_data *data = handle->data;
1926
1927 int wakeup_events = counter->hw_event.wakeup_events;
Peter Zijlstrac4578102009-04-02 11:12:01 +02001928
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02001929 if (handle->overflow && wakeup_events) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001930 int events = atomic_inc_return(&data->events);
Peter Zijlstrac4578102009-04-02 11:12:01 +02001931 if (events >= wakeup_events) {
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001932 atomic_sub(wakeup_events, &data->events);
Peter Zijlstrac66de4a2009-05-05 17:50:22 +02001933 atomic_set(&data->wakeup, 1);
Peter Zijlstrac4578102009-04-02 11:12:01 +02001934 }
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +02001935 }
1936
1937 perf_output_unlock(handle);
Peter Zijlstrab9cacc72009-03-25 12:30:22 +01001938 rcu_read_unlock();
1939}
1940
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02001941static void perf_counter_output(struct perf_counter *counter,
Peter Zijlstra78f13e92009-04-08 15:01:33 +02001942 int nmi, struct pt_regs *regs, u64 addr)
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001943{
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001944 int ret;
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001945 u64 record_type = counter->hw_event.record_type;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001946 struct perf_output_handle handle;
1947 struct perf_event_header header;
1948 u64 ip;
Peter Zijlstra5c148192009-03-25 12:30:23 +01001949 struct {
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001950 u32 pid, tid;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001951 } tid_entry;
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001952 struct {
1953 u64 event;
1954 u64 counter;
1955 } group_entry;
Peter Zijlstra394ee072009-03-30 19:07:14 +02001956 struct perf_callchain_entry *callchain = NULL;
1957 int callchain_size = 0;
Peter Zijlstra339f7c92009-04-06 11:45:06 +02001958 u64 time;
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001959
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001960 header.type = 0;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001961 header.size = sizeof(header);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01001962
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001963 header.misc = PERF_EVENT_MISC_OVERFLOW;
1964 header.misc |= user_mode(regs) ?
Peter Zijlstra6fab0192009-04-08 15:01:26 +02001965 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
1966
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001967 if (record_type & PERF_RECORD_IP) {
1968 ip = instruction_pointer(regs);
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001969 header.type |= PERF_RECORD_IP;
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001970 header.size += sizeof(ip);
1971 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001972
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001973 if (record_type & PERF_RECORD_TID) {
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001974 /* namespace issues */
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001975 tid_entry.pid = current->group_leader->pid;
1976 tid_entry.tid = current->pid;
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001977
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001978 header.type |= PERF_RECORD_TID;
Peter Zijlstra5ed00412009-03-30 19:07:12 +02001979 header.size += sizeof(tid_entry);
1980 }
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01001981
Peter Zijlstra4d855452009-04-08 15:01:32 +02001982 if (record_type & PERF_RECORD_TIME) {
1983 /*
1984 * Maybe do better on x86 and provide cpu_clock_nmi()
1985 */
1986 time = sched_clock();
1987
1988 header.type |= PERF_RECORD_TIME;
1989 header.size += sizeof(u64);
1990 }
1991
Peter Zijlstra78f13e92009-04-08 15:01:33 +02001992 if (record_type & PERF_RECORD_ADDR) {
1993 header.type |= PERF_RECORD_ADDR;
1994 header.size += sizeof(u64);
1995 }
1996
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001997 if (record_type & PERF_RECORD_GROUP) {
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02001998 header.type |= PERF_RECORD_GROUP;
Peter Zijlstra8a057d82009-04-02 11:11:59 +02001999 header.size += sizeof(u64) +
2000 counter->nr_siblings * sizeof(group_entry);
2001 }
2002
2003 if (record_type & PERF_RECORD_CALLCHAIN) {
Peter Zijlstra394ee072009-03-30 19:07:14 +02002004 callchain = perf_callchain(regs);
2005
2006 if (callchain) {
Peter Zijlstra9c03d882009-04-06 11:45:00 +02002007 callchain_size = (1 + callchain->nr) * sizeof(u64);
Peter Zijlstra394ee072009-03-30 19:07:14 +02002008
Peter Zijlstra6b6e5482009-04-08 15:01:27 +02002009 header.type |= PERF_RECORD_CALLCHAIN;
Peter Zijlstra394ee072009-03-30 19:07:14 +02002010 header.size += callchain_size;
2011 }
2012 }
2013
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002014 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002015 if (ret)
2016 return;
Peter Zijlstraea5d20c2009-03-25 12:30:25 +01002017
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002018 perf_output_put(&handle, header);
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002019
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002020 if (record_type & PERF_RECORD_IP)
2021 perf_output_put(&handle, ip);
2022
2023 if (record_type & PERF_RECORD_TID)
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002024 perf_output_put(&handle, tid_entry);
2025
Peter Zijlstra4d855452009-04-08 15:01:32 +02002026 if (record_type & PERF_RECORD_TIME)
2027 perf_output_put(&handle, time);
2028
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002029 if (record_type & PERF_RECORD_ADDR)
2030 perf_output_put(&handle, addr);
2031
Peter Zijlstra2023b352009-05-05 17:50:26 +02002032 /*
2033 * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
2034 */
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002035 if (record_type & PERF_RECORD_GROUP) {
2036 struct perf_counter *leader, *sub;
2037 u64 nr = counter->nr_siblings;
2038
2039 perf_output_put(&handle, nr);
2040
2041 leader = counter->group_leader;
2042 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2043 if (sub != counter)
Robert Richter4aeb0b42009-04-29 12:47:03 +02002044 sub->pmu->read(sub);
Peter Zijlstra8a057d82009-04-02 11:11:59 +02002045
2046 group_entry.event = sub->hw_event.config;
2047 group_entry.counter = atomic64_read(&sub->count);
2048
2049 perf_output_put(&handle, group_entry);
2050 }
2051 }
2052
Peter Zijlstra394ee072009-03-30 19:07:14 +02002053 if (callchain)
2054 perf_output_copy(&handle, callchain, callchain_size);
2055
Peter Zijlstra5ed00412009-03-30 19:07:12 +02002056 perf_output_end(&handle);
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002057}
2058
Peter Zijlstra0322cd62009-03-19 20:26:19 +01002059/*
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02002060 * comm tracking
2061 */
2062
2063struct perf_comm_event {
2064 struct task_struct *task;
2065 char *comm;
2066 int comm_size;
2067
2068 struct {
2069 struct perf_event_header header;
2070
2071 u32 pid;
2072 u32 tid;
2073 } event;
2074};
2075
2076static void perf_counter_comm_output(struct perf_counter *counter,
2077 struct perf_comm_event *comm_event)
2078{
2079 struct perf_output_handle handle;
2080 int size = comm_event->event.header.size;
2081 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2082
2083 if (ret)
2084 return;
2085
2086 perf_output_put(&handle, comm_event->event);
2087 perf_output_copy(&handle, comm_event->comm,
2088 comm_event->comm_size);
2089 perf_output_end(&handle);
2090}
2091
2092static int perf_counter_comm_match(struct perf_counter *counter,
2093 struct perf_comm_event *comm_event)
2094{
2095 if (counter->hw_event.comm &&
2096 comm_event->event.header.type == PERF_EVENT_COMM)
2097 return 1;
2098
2099 return 0;
2100}
2101
2102static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2103 struct perf_comm_event *comm_event)
2104{
2105 struct perf_counter *counter;
2106
2107 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2108 return;
2109
2110 rcu_read_lock();
2111 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2112 if (perf_counter_comm_match(counter, comm_event))
2113 perf_counter_comm_output(counter, comm_event);
2114 }
2115 rcu_read_unlock();
2116}
2117
2118static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2119{
2120 struct perf_cpu_context *cpuctx;
2121 unsigned int size;
2122 char *comm = comm_event->task->comm;
2123
Ingo Molnar888fcee2009-04-09 09:48:22 +02002124 size = ALIGN(strlen(comm)+1, sizeof(u64));
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02002125
2126 comm_event->comm = comm;
2127 comm_event->comm_size = size;
2128
2129 comm_event->event.header.size = sizeof(comm_event->event) + size;
2130
2131 cpuctx = &get_cpu_var(perf_cpu_context);
2132 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2133 put_cpu_var(perf_cpu_context);
2134
2135 perf_counter_comm_ctx(&current->perf_counter_ctx, comm_event);
2136}
2137
2138void perf_counter_comm(struct task_struct *task)
2139{
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02002140 struct perf_comm_event comm_event;
2141
2142 if (!atomic_read(&nr_comm_tracking))
2143 return;
2144
2145 comm_event = (struct perf_comm_event){
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +02002146 .task = task,
2147 .event = {
2148 .header = { .type = PERF_EVENT_COMM, },
2149 .pid = task->group_leader->pid,
2150 .tid = task->pid,
2151 },
2152 };
2153
2154 perf_counter_comm_event(&comm_event);
2155}
2156
2157/*
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02002158 * mmap tracking
2159 */
2160
2161struct perf_mmap_event {
2162 struct file *file;
2163 char *file_name;
2164 int file_size;
2165
2166 struct {
2167 struct perf_event_header header;
2168
2169 u32 pid;
2170 u32 tid;
2171 u64 start;
2172 u64 len;
2173 u64 pgoff;
2174 } event;
2175};
2176
2177static void perf_counter_mmap_output(struct perf_counter *counter,
2178 struct perf_mmap_event *mmap_event)
2179{
2180 struct perf_output_handle handle;
2181 int size = mmap_event->event.header.size;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002182 int ret = perf_output_begin(&handle, counter, size, 0, 0);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02002183
2184 if (ret)
2185 return;
2186
2187 perf_output_put(&handle, mmap_event->event);
2188 perf_output_copy(&handle, mmap_event->file_name,
2189 mmap_event->file_size);
Peter Zijlstra78d613e2009-03-30 19:07:11 +02002190 perf_output_end(&handle);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02002191}
2192
2193static int perf_counter_mmap_match(struct perf_counter *counter,
2194 struct perf_mmap_event *mmap_event)
2195{
2196 if (counter->hw_event.mmap &&
2197 mmap_event->event.header.type == PERF_EVENT_MMAP)
2198 return 1;
2199
2200 if (counter->hw_event.munmap &&
2201 mmap_event->event.header.type == PERF_EVENT_MUNMAP)
2202 return 1;
2203
2204 return 0;
2205}
2206
2207static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2208 struct perf_mmap_event *mmap_event)
2209{
2210 struct perf_counter *counter;
2211
2212 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2213 return;
2214
2215 rcu_read_lock();
2216 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2217 if (perf_counter_mmap_match(counter, mmap_event))
2218 perf_counter_mmap_output(counter, mmap_event);
2219 }
2220 rcu_read_unlock();
2221}
2222
2223static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2224{
2225 struct perf_cpu_context *cpuctx;
2226 struct file *file = mmap_event->file;
2227 unsigned int size;
2228 char tmp[16];
2229 char *buf = NULL;
2230 char *name;
2231
2232 if (file) {
2233 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2234 if (!buf) {
2235 name = strncpy(tmp, "//enomem", sizeof(tmp));
2236 goto got_name;
2237 }
Peter Zijlstrad3d21c42009-04-09 10:53:46 +02002238 name = d_path(&file->f_path, buf, PATH_MAX);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02002239 if (IS_ERR(name)) {
2240 name = strncpy(tmp, "//toolong", sizeof(tmp));
2241 goto got_name;
2242 }
2243 } else {
2244 name = strncpy(tmp, "//anon", sizeof(tmp));
2245 goto got_name;
2246 }
2247
2248got_name:
Ingo Molnar888fcee2009-04-09 09:48:22 +02002249 size = ALIGN(strlen(name)+1, sizeof(u64));
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02002250
2251 mmap_event->file_name = name;
2252 mmap_event->file_size = size;
2253
2254 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2255
2256 cpuctx = &get_cpu_var(perf_cpu_context);
2257 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2258 put_cpu_var(perf_cpu_context);
2259
2260 perf_counter_mmap_ctx(&current->perf_counter_ctx, mmap_event);
2261
2262 kfree(buf);
2263}
2264
2265void perf_counter_mmap(unsigned long addr, unsigned long len,
2266 unsigned long pgoff, struct file *file)
2267{
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02002268 struct perf_mmap_event mmap_event;
2269
2270 if (!atomic_read(&nr_mmap_tracking))
2271 return;
2272
2273 mmap_event = (struct perf_mmap_event){
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02002274 .file = file,
2275 .event = {
2276 .header = { .type = PERF_EVENT_MMAP, },
2277 .pid = current->group_leader->pid,
2278 .tid = current->pid,
2279 .start = addr,
2280 .len = len,
2281 .pgoff = pgoff,
2282 },
2283 };
2284
2285 perf_counter_mmap_event(&mmap_event);
2286}
2287
2288void perf_counter_munmap(unsigned long addr, unsigned long len,
2289 unsigned long pgoff, struct file *file)
2290{
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02002291 struct perf_mmap_event mmap_event;
2292
2293 if (!atomic_read(&nr_munmap_tracking))
2294 return;
2295
2296 mmap_event = (struct perf_mmap_event){
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02002297 .file = file,
2298 .event = {
2299 .header = { .type = PERF_EVENT_MUNMAP, },
2300 .pid = current->group_leader->pid,
2301 .tid = current->pid,
2302 .start = addr,
2303 .len = len,
2304 .pgoff = pgoff,
2305 },
2306 };
2307
2308 perf_counter_mmap_event(&mmap_event);
2309}
2310
2311/*
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002312 * Generic counter overflow handling.
2313 */
2314
2315int perf_counter_overflow(struct perf_counter *counter,
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002316 int nmi, struct pt_regs *regs, u64 addr)
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002317{
Peter Zijlstra79f14642009-04-06 11:45:07 +02002318 int events = atomic_read(&counter->event_limit);
2319 int ret = 0;
2320
Peter Zijlstra2023b352009-05-05 17:50:26 +02002321 /*
2322 * XXX event_limit might not quite work as expected on inherited
2323 * counters
2324 */
2325
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002326 counter->pending_kill = POLL_IN;
Peter Zijlstra79f14642009-04-06 11:45:07 +02002327 if (events && atomic_dec_and_test(&counter->event_limit)) {
2328 ret = 1;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +02002329 counter->pending_kill = POLL_HUP;
Peter Zijlstra79f14642009-04-06 11:45:07 +02002330 if (nmi) {
2331 counter->pending_disable = 1;
2332 perf_pending_queue(&counter->pending,
2333 perf_pending_counter);
2334 } else
2335 perf_counter_disable(counter);
2336 }
2337
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002338 perf_counter_output(counter, nmi, regs, addr);
Peter Zijlstra79f14642009-04-06 11:45:07 +02002339 return ret;
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002340}
2341
2342/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002343 * Generic software counter infrastructure
2344 */
2345
2346static void perf_swcounter_update(struct perf_counter *counter)
2347{
2348 struct hw_perf_counter *hwc = &counter->hw;
2349 u64 prev, now;
2350 s64 delta;
2351
2352again:
2353 prev = atomic64_read(&hwc->prev_count);
2354 now = atomic64_read(&hwc->count);
2355 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2356 goto again;
2357
2358 delta = now - prev;
2359
2360 atomic64_add(delta, &counter->count);
2361 atomic64_sub(delta, &hwc->period_left);
2362}
2363
2364static void perf_swcounter_set_period(struct perf_counter *counter)
2365{
2366 struct hw_perf_counter *hwc = &counter->hw;
2367 s64 left = atomic64_read(&hwc->period_left);
2368 s64 period = hwc->irq_period;
2369
2370 if (unlikely(left <= -period)) {
2371 left = period;
2372 atomic64_set(&hwc->period_left, left);
2373 }
2374
2375 if (unlikely(left <= 0)) {
2376 left += period;
2377 atomic64_add(period, &hwc->period_left);
2378 }
2379
2380 atomic64_set(&hwc->prev_count, -left);
2381 atomic64_set(&hwc->count, -left);
2382}
2383
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002384static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2385{
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002386 enum hrtimer_restart ret = HRTIMER_RESTART;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002387 struct perf_counter *counter;
2388 struct pt_regs *regs;
2389
2390 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
Robert Richter4aeb0b42009-04-29 12:47:03 +02002391 counter->pmu->read(counter);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002392
2393 regs = get_irq_regs();
2394 /*
2395 * In case we exclude kernel IPs or are somehow not in interrupt
2396 * context, provide the next best thing, the user IP.
2397 */
2398 if ((counter->hw_event.exclude_kernel || !regs) &&
2399 !counter->hw_event.exclude_user)
2400 regs = task_pt_regs(current);
2401
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002402 if (regs) {
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002403 if (perf_counter_overflow(counter, 0, regs, 0))
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002404 ret = HRTIMER_NORESTART;
2405 }
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002406
2407 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
2408
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002409 return ret;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002410}
2411
2412static void perf_swcounter_overflow(struct perf_counter *counter,
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002413 int nmi, struct pt_regs *regs, u64 addr)
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002414{
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002415 perf_swcounter_update(counter);
2416 perf_swcounter_set_period(counter);
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002417 if (perf_counter_overflow(counter, nmi, regs, addr))
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +02002418 /* soft-disable the counter */
2419 ;
2420
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002421}
2422
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002423static int perf_swcounter_match(struct perf_counter *counter,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002424 enum perf_event_types type,
2425 u32 event, struct pt_regs *regs)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002426{
2427 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2428 return 0;
2429
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002430 if (perf_event_raw(&counter->hw_event))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002431 return 0;
2432
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002433 if (perf_event_type(&counter->hw_event) != type)
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002434 return 0;
2435
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002436 if (perf_event_id(&counter->hw_event) != event)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002437 return 0;
2438
2439 if (counter->hw_event.exclude_user && user_mode(regs))
2440 return 0;
2441
2442 if (counter->hw_event.exclude_kernel && !user_mode(regs))
2443 return 0;
2444
2445 return 1;
2446}
2447
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002448static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002449 int nmi, struct pt_regs *regs, u64 addr)
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002450{
2451 int neg = atomic64_add_negative(nr, &counter->hw.count);
2452 if (counter->hw.irq_period && !neg)
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002453 perf_swcounter_overflow(counter, nmi, regs, addr);
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002454}
2455
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002456static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002457 enum perf_event_types type, u32 event,
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002458 u64 nr, int nmi, struct pt_regs *regs,
2459 u64 addr)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002460{
2461 struct perf_counter *counter;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002462
Peter Zijlstra01ef09d2009-03-19 20:26:11 +01002463 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002464 return;
2465
Peter Zijlstra592903c2009-03-13 12:21:36 +01002466 rcu_read_lock();
2467 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002468 if (perf_swcounter_match(counter, type, event, regs))
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002469 perf_swcounter_add(counter, nr, nmi, regs, addr);
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002470 }
Peter Zijlstra592903c2009-03-13 12:21:36 +01002471 rcu_read_unlock();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002472}
2473
Peter Zijlstra96f6d442009-03-23 18:22:07 +01002474static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
2475{
2476 if (in_nmi())
2477 return &cpuctx->recursion[3];
2478
2479 if (in_irq())
2480 return &cpuctx->recursion[2];
2481
2482 if (in_softirq())
2483 return &cpuctx->recursion[1];
2484
2485 return &cpuctx->recursion[0];
2486}
2487
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002488static void __perf_swcounter_event(enum perf_event_types type, u32 event,
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002489 u64 nr, int nmi, struct pt_regs *regs,
2490 u64 addr)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002491{
2492 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
Peter Zijlstra96f6d442009-03-23 18:22:07 +01002493 int *recursion = perf_swcounter_recursion_context(cpuctx);
2494
2495 if (*recursion)
2496 goto out;
2497
2498 (*recursion)++;
2499 barrier();
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002500
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002501 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
2502 nr, nmi, regs, addr);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002503 if (cpuctx->task_ctx) {
2504 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002505 nr, nmi, regs, addr);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002506 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002507
Peter Zijlstra96f6d442009-03-23 18:22:07 +01002508 barrier();
2509 (*recursion)--;
2510
2511out:
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002512 put_cpu_var(perf_cpu_context);
2513}
2514
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002515void
2516perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002517{
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002518 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002519}
2520
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002521static void perf_swcounter_read(struct perf_counter *counter)
2522{
2523 perf_swcounter_update(counter);
2524}
2525
2526static int perf_swcounter_enable(struct perf_counter *counter)
2527{
2528 perf_swcounter_set_period(counter);
2529 return 0;
2530}
2531
2532static void perf_swcounter_disable(struct perf_counter *counter)
2533{
2534 perf_swcounter_update(counter);
2535}
2536
Robert Richter4aeb0b42009-04-29 12:47:03 +02002537static const struct pmu perf_ops_generic = {
Peter Zijlstraac17dc82009-03-13 12:21:34 +01002538 .enable = perf_swcounter_enable,
2539 .disable = perf_swcounter_disable,
2540 .read = perf_swcounter_read,
2541};
2542
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002543/*
2544 * Software counter: cpu wall time clock
2545 */
2546
Paul Mackerras9abf8a02009-01-09 16:26:43 +11002547static void cpu_clock_perf_counter_update(struct perf_counter *counter)
2548{
2549 int cpu = raw_smp_processor_id();
2550 s64 prev;
2551 u64 now;
2552
2553 now = cpu_clock(cpu);
2554 prev = atomic64_read(&counter->hw.prev_count);
2555 atomic64_set(&counter->hw.prev_count, now);
2556 atomic64_add(now - prev, &counter->count);
2557}
2558
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002559static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
2560{
2561 struct hw_perf_counter *hwc = &counter->hw;
2562 int cpu = raw_smp_processor_id();
2563
2564 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
Peter Zijlstra039fc912009-03-13 16:43:47 +01002565 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2566 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002567 if (hwc->irq_period) {
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002568 __hrtimer_start_range_ns(&hwc->hrtimer,
2569 ns_to_ktime(hwc->irq_period), 0,
2570 HRTIMER_MODE_REL, 0);
2571 }
2572
2573 return 0;
2574}
2575
Ingo Molnar5c92d122008-12-11 13:21:10 +01002576static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
2577{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002578 hrtimer_cancel(&counter->hw.hrtimer);
Paul Mackerras9abf8a02009-01-09 16:26:43 +11002579 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01002580}
2581
2582static void cpu_clock_perf_counter_read(struct perf_counter *counter)
2583{
Paul Mackerras9abf8a02009-01-09 16:26:43 +11002584 cpu_clock_perf_counter_update(counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +01002585}
2586
Robert Richter4aeb0b42009-04-29 12:47:03 +02002587static const struct pmu perf_ops_cpu_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01002588 .enable = cpu_clock_perf_counter_enable,
2589 .disable = cpu_clock_perf_counter_disable,
2590 .read = cpu_clock_perf_counter_read,
Ingo Molnar5c92d122008-12-11 13:21:10 +01002591};
2592
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01002593/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002594 * Software counter: task time clock
2595 */
2596
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02002597static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
Ingo Molnarbae43c92008-12-11 14:03:20 +01002598{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02002599 u64 prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002600 s64 delta;
Ingo Molnarbae43c92008-12-11 14:03:20 +01002601
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02002602 prev = atomic64_xchg(&counter->hw.prev_count, now);
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002603 delta = now - prev;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002604 atomic64_add(delta, &counter->count);
Ingo Molnarbae43c92008-12-11 14:03:20 +01002605}
2606
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01002607static int task_clock_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002608{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002609 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02002610 u64 now;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002611
Peter Zijlstraa39d6f22009-04-06 11:45:11 +02002612 now = counter->ctx->time;
2613
2614 atomic64_set(&hwc->prev_count, now);
Peter Zijlstra039fc912009-03-13 16:43:47 +01002615 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2616 hwc->hrtimer.function = perf_swcounter_hrtimer;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002617 if (hwc->irq_period) {
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002618 __hrtimer_start_range_ns(&hwc->hrtimer,
2619 ns_to_ktime(hwc->irq_period), 0,
2620 HRTIMER_MODE_REL, 0);
2621 }
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01002622
2623 return 0;
Ingo Molnar8cb391e2008-12-14 12:22:31 +01002624}
2625
2626static void task_clock_perf_counter_disable(struct perf_counter *counter)
2627{
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002628 hrtimer_cancel(&counter->hw.hrtimer);
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02002629 task_clock_perf_counter_update(counter, counter->ctx->time);
2630
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002631}
Ingo Molnaraa9c4c02008-12-17 14:10:57 +01002632
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002633static void task_clock_perf_counter_read(struct perf_counter *counter)
2634{
Peter Zijlstrae30e08f2009-04-08 15:01:25 +02002635 u64 time;
2636
2637 if (!in_nmi()) {
2638 update_context_time(counter->ctx);
2639 time = counter->ctx->time;
2640 } else {
2641 u64 now = perf_clock();
2642 u64 delta = now - counter->ctx->timestamp;
2643 time = counter->ctx->time + delta;
2644 }
2645
2646 task_clock_perf_counter_update(counter, time);
Ingo Molnarbae43c92008-12-11 14:03:20 +01002647}
2648
Robert Richter4aeb0b42009-04-29 12:47:03 +02002649static const struct pmu perf_ops_task_clock = {
Ingo Molnar76715812008-12-17 14:20:28 +01002650 .enable = task_clock_perf_counter_enable,
2651 .disable = task_clock_perf_counter_disable,
2652 .read = task_clock_perf_counter_read,
Ingo Molnarbae43c92008-12-11 14:03:20 +01002653};
2654
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002655/*
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002656 * Software counter: cpu migrations
2657 */
2658
Paul Mackerras23a185c2009-02-09 22:42:47 +11002659static inline u64 get_cpu_migrations(struct perf_counter *counter)
Ingo Molnar6c594c22008-12-14 12:34:15 +01002660{
Paul Mackerras23a185c2009-02-09 22:42:47 +11002661 struct task_struct *curr = counter->ctx->task;
2662
2663 if (curr)
2664 return curr->se.nr_migrations;
2665 return cpu_nr_migrations(smp_processor_id());
Ingo Molnar6c594c22008-12-14 12:34:15 +01002666}
2667
2668static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
2669{
2670 u64 prev, now;
2671 s64 delta;
2672
2673 prev = atomic64_read(&counter->hw.prev_count);
Paul Mackerras23a185c2009-02-09 22:42:47 +11002674 now = get_cpu_migrations(counter);
Ingo Molnar6c594c22008-12-14 12:34:15 +01002675
2676 atomic64_set(&counter->hw.prev_count, now);
2677
2678 delta = now - prev;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002679
2680 atomic64_add(delta, &counter->count);
2681}
2682
2683static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
2684{
2685 cpu_migrations_perf_counter_update(counter);
2686}
2687
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01002688static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
Ingo Molnar6c594c22008-12-14 12:34:15 +01002689{
Paul Mackerrasc07c99b2009-02-13 22:10:34 +11002690 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
2691 atomic64_set(&counter->hw.prev_count,
2692 get_cpu_migrations(counter));
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01002693 return 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002694}
2695
2696static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
2697{
2698 cpu_migrations_perf_counter_update(counter);
2699}
2700
Robert Richter4aeb0b42009-04-29 12:47:03 +02002701static const struct pmu perf_ops_cpu_migrations = {
Ingo Molnar76715812008-12-17 14:20:28 +01002702 .enable = cpu_migrations_perf_counter_enable,
2703 .disable = cpu_migrations_perf_counter_disable,
2704 .read = cpu_migrations_perf_counter_read,
Ingo Molnar6c594c22008-12-14 12:34:15 +01002705};
2706
Peter Zijlstrae077df42009-03-19 20:26:17 +01002707#ifdef CONFIG_EVENT_PROFILE
2708void perf_tpcounter_event(int event_id)
2709{
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002710 struct pt_regs *regs = get_irq_regs();
2711
2712 if (!regs)
2713 regs = task_pt_regs(current);
2714
Peter Zijlstra78f13e92009-04-08 15:01:33 +02002715 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
Peter Zijlstrae077df42009-03-19 20:26:17 +01002716}
Steven Whitehouseff7b1b42009-04-15 16:55:05 +01002717EXPORT_SYMBOL_GPL(perf_tpcounter_event);
Peter Zijlstrae077df42009-03-19 20:26:17 +01002718
2719extern int ftrace_profile_enable(int);
2720extern void ftrace_profile_disable(int);
2721
2722static void tp_perf_counter_destroy(struct perf_counter *counter)
2723{
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002724 ftrace_profile_disable(perf_event_id(&counter->hw_event));
Peter Zijlstrae077df42009-03-19 20:26:17 +01002725}
2726
Robert Richter4aeb0b42009-04-29 12:47:03 +02002727static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
Peter Zijlstrae077df42009-03-19 20:26:17 +01002728{
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002729 int event_id = perf_event_id(&counter->hw_event);
Peter Zijlstrae077df42009-03-19 20:26:17 +01002730 int ret;
2731
2732 ret = ftrace_profile_enable(event_id);
2733 if (ret)
2734 return NULL;
2735
2736 counter->destroy = tp_perf_counter_destroy;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002737 counter->hw.irq_period = counter->hw_event.irq_period;
Peter Zijlstrae077df42009-03-19 20:26:17 +01002738
2739 return &perf_ops_generic;
2740}
2741#else
Robert Richter4aeb0b42009-04-29 12:47:03 +02002742static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
Peter Zijlstrae077df42009-03-19 20:26:17 +01002743{
2744 return NULL;
2745}
2746#endif
2747
Robert Richter4aeb0b42009-04-29 12:47:03 +02002748static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar5c92d122008-12-11 13:21:10 +01002749{
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002750 struct perf_counter_hw_event *hw_event = &counter->hw_event;
Robert Richter4aeb0b42009-04-29 12:47:03 +02002751 const struct pmu *pmu = NULL;
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002752 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar5c92d122008-12-11 13:21:10 +01002753
Paul Mackerras0475f9e2009-02-11 14:35:35 +11002754 /*
2755 * Software counters (currently) can't in general distinguish
2756 * between user, kernel and hypervisor events.
2757 * However, context switches and cpu migrations are considered
2758 * to be kernel events, and page faults are never hypervisor
2759 * events.
2760 */
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002761 switch (perf_event_id(&counter->hw_event)) {
Ingo Molnar5c92d122008-12-11 13:21:10 +01002762 case PERF_COUNT_CPU_CLOCK:
Robert Richter4aeb0b42009-04-29 12:47:03 +02002763 pmu = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002764
2765 if (hw_event->irq_period && hw_event->irq_period < 10000)
2766 hw_event->irq_period = 10000;
Ingo Molnar5c92d122008-12-11 13:21:10 +01002767 break;
Ingo Molnarbae43c92008-12-11 14:03:20 +01002768 case PERF_COUNT_TASK_CLOCK:
Paul Mackerras23a185c2009-02-09 22:42:47 +11002769 /*
2770 * If the user instantiates this as a per-cpu counter,
2771 * use the cpu_clock counter instead.
2772 */
2773 if (counter->ctx->task)
Robert Richter4aeb0b42009-04-29 12:47:03 +02002774 pmu = &perf_ops_task_clock;
Paul Mackerras23a185c2009-02-09 22:42:47 +11002775 else
Robert Richter4aeb0b42009-04-29 12:47:03 +02002776 pmu = &perf_ops_cpu_clock;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +01002777
2778 if (hw_event->irq_period && hw_event->irq_period < 10000)
2779 hw_event->irq_period = 10000;
Ingo Molnarbae43c92008-12-11 14:03:20 +01002780 break;
Ingo Molnare06c61a2008-12-14 14:44:31 +01002781 case PERF_COUNT_PAGE_FAULTS:
Peter Zijlstraac17dc82009-03-13 12:21:34 +01002782 case PERF_COUNT_PAGE_FAULTS_MIN:
2783 case PERF_COUNT_PAGE_FAULTS_MAJ:
Ingo Molnar5d6a27d2008-12-14 12:28:33 +01002784 case PERF_COUNT_CONTEXT_SWITCHES:
Robert Richter4aeb0b42009-04-29 12:47:03 +02002785 pmu = &perf_ops_generic;
Ingo Molnar5d6a27d2008-12-14 12:28:33 +01002786 break;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002787 case PERF_COUNT_CPU_MIGRATIONS:
Paul Mackerras0475f9e2009-02-11 14:35:35 +11002788 if (!counter->hw_event.exclude_kernel)
Robert Richter4aeb0b42009-04-29 12:47:03 +02002789 pmu = &perf_ops_cpu_migrations;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002790 break;
Ingo Molnar5c92d122008-12-11 13:21:10 +01002791 }
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002792
Robert Richter4aeb0b42009-04-29 12:47:03 +02002793 if (pmu)
Peter Zijlstra15dbf272009-03-13 12:21:32 +01002794 hwc->irq_period = hw_event->irq_period;
2795
Robert Richter4aeb0b42009-04-29 12:47:03 +02002796 return pmu;
Ingo Molnar5c92d122008-12-11 13:21:10 +01002797}
2798
Thomas Gleixner0793a612008-12-04 20:12:29 +01002799/*
2800 * Allocate and initialize a counter structure
2801 */
2802static struct perf_counter *
Ingo Molnar04289bb2008-12-11 08:38:42 +01002803perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2804 int cpu,
Paul Mackerras23a185c2009-02-09 22:42:47 +11002805 struct perf_counter_context *ctx,
Ingo Molnar9b51f662008-12-12 13:49:45 +01002806 struct perf_counter *group_leader,
2807 gfp_t gfpflags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002808{
Robert Richter4aeb0b42009-04-29 12:47:03 +02002809 const struct pmu *pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002810 struct perf_counter *counter;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002811 long err;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002812
Ingo Molnar9b51f662008-12-12 13:49:45 +01002813 counter = kzalloc(sizeof(*counter), gfpflags);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002814 if (!counter)
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002815 return ERR_PTR(-ENOMEM);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002816
Ingo Molnar04289bb2008-12-11 08:38:42 +01002817 /*
2818 * Single counters are their own group leaders, with an
2819 * empty sibling list:
2820 */
2821 if (!group_leader)
2822 group_leader = counter;
2823
Thomas Gleixner0793a612008-12-04 20:12:29 +01002824 mutex_init(&counter->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002825 INIT_LIST_HEAD(&counter->list_entry);
Peter Zijlstra592903c2009-03-13 12:21:36 +01002826 INIT_LIST_HEAD(&counter->event_entry);
Ingo Molnar04289bb2008-12-11 08:38:42 +01002827 INIT_LIST_HEAD(&counter->sibling_list);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002828 init_waitqueue_head(&counter->waitq);
2829
Peter Zijlstra7b732a72009-03-23 18:22:10 +01002830 mutex_init(&counter->mmap_mutex);
2831
Paul Mackerrasd859e292009-01-17 18:10:22 +11002832 INIT_LIST_HEAD(&counter->child_list);
2833
Ingo Molnar9f66a382008-12-10 12:33:23 +01002834 counter->cpu = cpu;
2835 counter->hw_event = *hw_event;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002836 counter->group_leader = group_leader;
Robert Richter4aeb0b42009-04-29 12:47:03 +02002837 counter->pmu = NULL;
Paul Mackerras23a185c2009-02-09 22:42:47 +11002838 counter->ctx = ctx;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002839
Ingo Molnar235c7fc2008-12-21 14:43:25 +01002840 counter->state = PERF_COUNTER_STATE_INACTIVE;
Ingo Molnara86ed502008-12-17 00:43:10 +01002841 if (hw_event->disabled)
2842 counter->state = PERF_COUNTER_STATE_OFF;
2843
Robert Richter4aeb0b42009-04-29 12:47:03 +02002844 pmu = NULL;
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002845
Peter Zijlstra2023b352009-05-05 17:50:26 +02002846 /*
2847 * we currently do not support PERF_RECORD_GROUP on inherited counters
2848 */
2849 if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
2850 goto done;
2851
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002852 if (perf_event_raw(hw_event)) {
Robert Richter4aeb0b42009-04-29 12:47:03 +02002853 pmu = hw_perf_counter_init(counter);
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002854 goto done;
2855 }
2856
2857 switch (perf_event_type(hw_event)) {
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002858 case PERF_TYPE_HARDWARE:
Robert Richter4aeb0b42009-04-29 12:47:03 +02002859 pmu = hw_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002860 break;
2861
2862 case PERF_TYPE_SOFTWARE:
Robert Richter4aeb0b42009-04-29 12:47:03 +02002863 pmu = sw_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002864 break;
2865
2866 case PERF_TYPE_TRACEPOINT:
Robert Richter4aeb0b42009-04-29 12:47:03 +02002867 pmu = tp_perf_counter_init(counter);
Peter Zijlstrab8e83512009-03-19 20:26:18 +01002868 break;
2869 }
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +01002870done:
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002871 err = 0;
Robert Richter4aeb0b42009-04-29 12:47:03 +02002872 if (!pmu)
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002873 err = -EINVAL;
Robert Richter4aeb0b42009-04-29 12:47:03 +02002874 else if (IS_ERR(pmu))
2875 err = PTR_ERR(pmu);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002876
2877 if (err) {
2878 kfree(counter);
2879 return ERR_PTR(err);
2880 }
2881
Robert Richter4aeb0b42009-04-29 12:47:03 +02002882 counter->pmu = pmu;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002883
Peter Zijlstra7fc23a52009-05-08 18:52:21 +02002884 atomic_inc(&nr_counters);
Peter Zijlstra9ee318a2009-04-09 10:53:44 +02002885 if (counter->hw_event.mmap)
2886 atomic_inc(&nr_mmap_tracking);
2887 if (counter->hw_event.munmap)
2888 atomic_inc(&nr_munmap_tracking);
2889 if (counter->hw_event.comm)
2890 atomic_inc(&nr_comm_tracking);
2891
Thomas Gleixner0793a612008-12-04 20:12:29 +01002892 return counter;
2893}
2894
2895/**
Paul Mackerras2743a5b2009-03-04 20:36:51 +11002896 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
Ingo Molnar9f66a382008-12-10 12:33:23 +01002897 *
2898 * @hw_event_uptr: event type attributes for monitoring/sampling
Thomas Gleixner0793a612008-12-04 20:12:29 +01002899 * @pid: target pid
Ingo Molnar9f66a382008-12-10 12:33:23 +01002900 * @cpu: target cpu
2901 * @group_fd: group leader counter fd
Thomas Gleixner0793a612008-12-04 20:12:29 +01002902 */
Paul Mackerras2743a5b2009-03-04 20:36:51 +11002903SYSCALL_DEFINE5(perf_counter_open,
Paul Mackerrasf3dfd262009-02-26 22:43:46 +11002904 const struct perf_counter_hw_event __user *, hw_event_uptr,
Paul Mackerras2743a5b2009-03-04 20:36:51 +11002905 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
Thomas Gleixner0793a612008-12-04 20:12:29 +01002906{
Ingo Molnar04289bb2008-12-11 08:38:42 +01002907 struct perf_counter *counter, *group_leader;
Ingo Molnar9f66a382008-12-10 12:33:23 +01002908 struct perf_counter_hw_event hw_event;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002909 struct perf_counter_context *ctx;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002910 struct file *counter_file = NULL;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002911 struct file *group_file = NULL;
2912 int fput_needed = 0;
Ingo Molnar9b51f662008-12-12 13:49:45 +01002913 int fput_needed2 = 0;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002914 int ret;
2915
Paul Mackerras2743a5b2009-03-04 20:36:51 +11002916 /* for future expandability... */
2917 if (flags)
2918 return -EINVAL;
2919
Ingo Molnar9f66a382008-12-10 12:33:23 +01002920 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
Thomas Gleixnereab656a2008-12-08 19:26:59 +01002921 return -EFAULT;
2922
Ingo Molnar04289bb2008-12-11 08:38:42 +01002923 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01002924 * Get the target context (task or percpu):
2925 */
2926 ctx = find_get_context(pid, cpu);
2927 if (IS_ERR(ctx))
2928 return PTR_ERR(ctx);
2929
2930 /*
2931 * Look up the group leader (we will attach this counter to it):
Ingo Molnar04289bb2008-12-11 08:38:42 +01002932 */
2933 group_leader = NULL;
2934 if (group_fd != -1) {
2935 ret = -EINVAL;
2936 group_file = fget_light(group_fd, &fput_needed);
2937 if (!group_file)
Ingo Molnarccff2862008-12-11 11:26:29 +01002938 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002939 if (group_file->f_op != &perf_fops)
Ingo Molnarccff2862008-12-11 11:26:29 +01002940 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002941
2942 group_leader = group_file->private_data;
2943 /*
Ingo Molnarccff2862008-12-11 11:26:29 +01002944 * Do not allow a recursive hierarchy (this new sibling
2945 * becoming part of another group-sibling):
Ingo Molnar04289bb2008-12-11 08:38:42 +01002946 */
Ingo Molnarccff2862008-12-11 11:26:29 +01002947 if (group_leader->group_leader != group_leader)
2948 goto err_put_context;
2949 /*
2950 * Do not allow to attach to a group in a different
2951 * task or CPU context:
2952 */
2953 if (group_leader->ctx != ctx)
2954 goto err_put_context;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +11002955 /*
2956 * Only a group leader can be exclusive or pinned
2957 */
2958 if (hw_event.exclusive || hw_event.pinned)
2959 goto err_put_context;
Ingo Molnar04289bb2008-12-11 08:38:42 +01002960 }
2961
Paul Mackerras23a185c2009-02-09 22:42:47 +11002962 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2963 GFP_KERNEL);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02002964 ret = PTR_ERR(counter);
2965 if (IS_ERR(counter))
Thomas Gleixner0793a612008-12-04 20:12:29 +01002966 goto err_put_context;
2967
Thomas Gleixner0793a612008-12-04 20:12:29 +01002968 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
2969 if (ret < 0)
Ingo Molnar9b51f662008-12-12 13:49:45 +01002970 goto err_free_put_context;
2971
2972 counter_file = fget_light(ret, &fput_needed2);
2973 if (!counter_file)
2974 goto err_free_put_context;
2975
2976 counter->filp = counter_file;
Paul Mackerrasd859e292009-01-17 18:10:22 +11002977 mutex_lock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002978 perf_install_in_context(ctx, counter, cpu);
Paul Mackerrasd859e292009-01-17 18:10:22 +11002979 mutex_unlock(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01002980
2981 fput_light(counter_file, fput_needed2);
Thomas Gleixner0793a612008-12-04 20:12:29 +01002982
Ingo Molnar04289bb2008-12-11 08:38:42 +01002983out_fput:
2984 fput_light(group_file, fput_needed);
2985
Thomas Gleixner0793a612008-12-04 20:12:29 +01002986 return ret;
2987
Ingo Molnar9b51f662008-12-12 13:49:45 +01002988err_free_put_context:
Thomas Gleixner0793a612008-12-04 20:12:29 +01002989 kfree(counter);
2990
2991err_put_context:
2992 put_context(ctx);
2993
Ingo Molnar04289bb2008-12-11 08:38:42 +01002994 goto out_fput;
Thomas Gleixner0793a612008-12-04 20:12:29 +01002995}
2996
Ingo Molnar9b51f662008-12-12 13:49:45 +01002997/*
2998 * Initialize the perf_counter context in a task_struct:
2999 */
3000static void
3001__perf_counter_init_context(struct perf_counter_context *ctx,
3002 struct task_struct *task)
3003{
3004 memset(ctx, 0, sizeof(*ctx));
3005 spin_lock_init(&ctx->lock);
Paul Mackerrasd859e292009-01-17 18:10:22 +11003006 mutex_init(&ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01003007 INIT_LIST_HEAD(&ctx->counter_list);
Peter Zijlstra592903c2009-03-13 12:21:36 +01003008 INIT_LIST_HEAD(&ctx->event_list);
Ingo Molnar9b51f662008-12-12 13:49:45 +01003009 ctx->task = task;
3010}
3011
3012/*
3013 * inherit a counter from parent task to child task:
3014 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11003015static struct perf_counter *
Ingo Molnar9b51f662008-12-12 13:49:45 +01003016inherit_counter(struct perf_counter *parent_counter,
3017 struct task_struct *parent,
3018 struct perf_counter_context *parent_ctx,
3019 struct task_struct *child,
Paul Mackerrasd859e292009-01-17 18:10:22 +11003020 struct perf_counter *group_leader,
Ingo Molnar9b51f662008-12-12 13:49:45 +01003021 struct perf_counter_context *child_ctx)
3022{
3023 struct perf_counter *child_counter;
3024
Paul Mackerrasd859e292009-01-17 18:10:22 +11003025 /*
3026 * Instead of creating recursive hierarchies of counters,
3027 * we link inherited counters back to the original parent,
3028 * which has a filp for sure, which we use as the reference
3029 * count:
3030 */
3031 if (parent_counter->parent)
3032 parent_counter = parent_counter->parent;
3033
Ingo Molnar9b51f662008-12-12 13:49:45 +01003034 child_counter = perf_counter_alloc(&parent_counter->hw_event,
Paul Mackerras23a185c2009-02-09 22:42:47 +11003035 parent_counter->cpu, child_ctx,
3036 group_leader, GFP_KERNEL);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02003037 if (IS_ERR(child_counter))
3038 return child_counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01003039
3040 /*
3041 * Link it up in the child's context:
3042 */
Ingo Molnar9b51f662008-12-12 13:49:45 +01003043 child_counter->task = child;
Paul Mackerras53cfbf52009-03-25 22:46:58 +11003044 add_counter_to_ctx(child_counter, child_ctx);
Ingo Molnar9b51f662008-12-12 13:49:45 +01003045
3046 child_counter->parent = parent_counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01003047 /*
3048 * inherit into child's child as well:
3049 */
3050 child_counter->hw_event.inherit = 1;
3051
3052 /*
3053 * Get a reference to the parent filp - we will fput it
3054 * when the child counter exits. This is safe to do because
3055 * we are in the parent and we know that the filp still
3056 * exists and has a nonzero count:
3057 */
3058 atomic_long_inc(&parent_counter->filp->f_count);
3059
Paul Mackerrasd859e292009-01-17 18:10:22 +11003060 /*
3061 * Link this into the parent counter's child list
3062 */
3063 mutex_lock(&parent_counter->mutex);
3064 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3065
3066 /*
3067 * Make the child state follow the state of the parent counter,
3068 * not its hw_event.disabled bit. We hold the parent's mutex,
3069 * so we won't race with perf_counter_{en,dis}able_family.
3070 */
3071 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3072 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3073 else
3074 child_counter->state = PERF_COUNTER_STATE_OFF;
3075
3076 mutex_unlock(&parent_counter->mutex);
3077
3078 return child_counter;
3079}
3080
3081static int inherit_group(struct perf_counter *parent_counter,
3082 struct task_struct *parent,
3083 struct perf_counter_context *parent_ctx,
3084 struct task_struct *child,
3085 struct perf_counter_context *child_ctx)
3086{
3087 struct perf_counter *leader;
3088 struct perf_counter *sub;
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02003089 struct perf_counter *child_ctr;
Paul Mackerrasd859e292009-01-17 18:10:22 +11003090
3091 leader = inherit_counter(parent_counter, parent, parent_ctx,
3092 child, NULL, child_ctx);
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02003093 if (IS_ERR(leader))
3094 return PTR_ERR(leader);
Paul Mackerrasd859e292009-01-17 18:10:22 +11003095 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
Paul Mackerrasd5d2bc0d2009-03-30 19:07:08 +02003096 child_ctr = inherit_counter(sub, parent, parent_ctx,
3097 child, leader, child_ctx);
3098 if (IS_ERR(child_ctr))
3099 return PTR_ERR(child_ctr);
Paul Mackerrasd859e292009-01-17 18:10:22 +11003100 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01003101 return 0;
3102}
3103
Paul Mackerrasd859e292009-01-17 18:10:22 +11003104static void sync_child_counter(struct perf_counter *child_counter,
3105 struct perf_counter *parent_counter)
3106{
3107 u64 parent_val, child_val;
3108
3109 parent_val = atomic64_read(&parent_counter->count);
3110 child_val = atomic64_read(&child_counter->count);
3111
3112 /*
3113 * Add back the child's count to the parent's count:
3114 */
3115 atomic64_add(child_val, &parent_counter->count);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11003116 atomic64_add(child_counter->total_time_enabled,
3117 &parent_counter->child_total_time_enabled);
3118 atomic64_add(child_counter->total_time_running,
3119 &parent_counter->child_total_time_running);
Paul Mackerrasd859e292009-01-17 18:10:22 +11003120
3121 /*
3122 * Remove this counter from the parent's list
3123 */
3124 mutex_lock(&parent_counter->mutex);
3125 list_del_init(&child_counter->child_list);
3126 mutex_unlock(&parent_counter->mutex);
3127
3128 /*
3129 * Release the parent counter, if this was the last
3130 * reference to it.
3131 */
3132 fput(parent_counter->filp);
3133}
3134
Ingo Molnar9b51f662008-12-12 13:49:45 +01003135static void
3136__perf_counter_exit_task(struct task_struct *child,
3137 struct perf_counter *child_counter,
3138 struct perf_counter_context *child_ctx)
3139{
3140 struct perf_counter *parent_counter;
Paul Mackerrasd859e292009-01-17 18:10:22 +11003141 struct perf_counter *sub, *tmp;
Ingo Molnar9b51f662008-12-12 13:49:45 +01003142
3143 /*
Ingo Molnar235c7fc2008-12-21 14:43:25 +01003144 * If we do not self-reap then we have to wait for the
3145 * child task to unschedule (it will happen for sure),
3146 * so that its counter is at its final count. (This
3147 * condition triggers rarely - child tasks usually get
3148 * off their CPU before the parent has a chance to
3149 * get this far into the reaping action)
Ingo Molnar9b51f662008-12-12 13:49:45 +01003150 */
Ingo Molnar235c7fc2008-12-21 14:43:25 +01003151 if (child != current) {
3152 wait_task_inactive(child, 0);
3153 list_del_init(&child_counter->list_entry);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11003154 update_counter_times(child_counter);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01003155 } else {
Ingo Molnar0cc0c022008-12-14 23:20:36 +01003156 struct perf_cpu_context *cpuctx;
Ingo Molnar235c7fc2008-12-21 14:43:25 +01003157 unsigned long flags;
3158 u64 perf_flags;
3159
3160 /*
3161 * Disable and unlink this counter.
3162 *
3163 * Be careful about zapping the list - IRQ/NMI context
3164 * could still be processing it:
3165 */
Peter Zijlstra849691a2009-04-06 11:45:12 +02003166 local_irq_save(flags);
Ingo Molnar235c7fc2008-12-21 14:43:25 +01003167 perf_flags = hw_perf_save_disable();
Ingo Molnar0cc0c022008-12-14 23:20:36 +01003168
3169 cpuctx = &__get_cpu_var(perf_cpu_context);
3170
Paul Mackerrasd859e292009-01-17 18:10:22 +11003171 group_sched_out(child_counter, cpuctx, child_ctx);
Paul Mackerras53cfbf52009-03-25 22:46:58 +11003172 update_counter_times(child_counter);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01003173
Ingo Molnar235c7fc2008-12-21 14:43:25 +01003174 list_del_init(&child_counter->list_entry);
3175
3176 child_ctx->nr_counters--;
3177
3178 hw_perf_restore(perf_flags);
Peter Zijlstra849691a2009-04-06 11:45:12 +02003179 local_irq_restore(flags);
Ingo Molnar0cc0c022008-12-14 23:20:36 +01003180 }
3181
Ingo Molnar9b51f662008-12-12 13:49:45 +01003182 parent_counter = child_counter->parent;
3183 /*
3184 * It can happen that parent exits first, and has counters
3185 * that are still around due to the child reference. These
3186 * counters need to be zapped - but otherwise linger.
3187 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11003188 if (parent_counter) {
3189 sync_child_counter(child_counter, parent_counter);
3190 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
3191 list_entry) {
Paul Mackerras4bcf3492009-02-11 13:53:19 +01003192 if (sub->parent) {
Paul Mackerrasd859e292009-01-17 18:10:22 +11003193 sync_child_counter(sub, sub->parent);
Peter Zijlstraf1600952009-03-19 20:26:16 +01003194 free_counter(sub);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01003195 }
Paul Mackerrasd859e292009-01-17 18:10:22 +11003196 }
Peter Zijlstraf1600952009-03-19 20:26:16 +01003197 free_counter(child_counter);
Paul Mackerras4bcf3492009-02-11 13:53:19 +01003198 }
Ingo Molnar9b51f662008-12-12 13:49:45 +01003199}
3200
3201/*
Paul Mackerrasd859e292009-01-17 18:10:22 +11003202 * When a child task exits, feed back counter values to parent counters.
Ingo Molnar9b51f662008-12-12 13:49:45 +01003203 *
Paul Mackerrasd859e292009-01-17 18:10:22 +11003204 * Note: we may be running in child context, but the PID is not hashed
Ingo Molnar9b51f662008-12-12 13:49:45 +01003205 * anymore so new counters will not be added.
3206 */
3207void perf_counter_exit_task(struct task_struct *child)
3208{
3209 struct perf_counter *child_counter, *tmp;
3210 struct perf_counter_context *child_ctx;
3211
3212 child_ctx = &child->perf_counter_ctx;
3213
3214 if (likely(!child_ctx->nr_counters))
3215 return;
3216
3217 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3218 list_entry)
3219 __perf_counter_exit_task(child, child_counter, child_ctx);
3220}
3221
3222/*
3223 * Initialize the perf_counter context in task_struct
3224 */
3225void perf_counter_init_task(struct task_struct *child)
3226{
3227 struct perf_counter_context *child_ctx, *parent_ctx;
Paul Mackerrasd859e292009-01-17 18:10:22 +11003228 struct perf_counter *counter;
Ingo Molnar9b51f662008-12-12 13:49:45 +01003229 struct task_struct *parent = current;
Ingo Molnar9b51f662008-12-12 13:49:45 +01003230
3231 child_ctx = &child->perf_counter_ctx;
3232 parent_ctx = &parent->perf_counter_ctx;
3233
3234 __perf_counter_init_context(child_ctx, child);
3235
3236 /*
3237 * This is executed from the parent task context, so inherit
3238 * counters that have been marked for cloning:
3239 */
3240
3241 if (likely(!parent_ctx->nr_counters))
3242 return;
3243
3244 /*
3245 * Lock the parent list. No need to lock the child - not PID
3246 * hashed yet and not running, so nobody can access it.
3247 */
Paul Mackerrasd859e292009-01-17 18:10:22 +11003248 mutex_lock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01003249
3250 /*
3251 * We dont have to disable NMIs - we are only looking at
3252 * the list, not manipulating it:
3253 */
3254 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
Paul Mackerrasd859e292009-01-17 18:10:22 +11003255 if (!counter->hw_event.inherit)
Ingo Molnar9b51f662008-12-12 13:49:45 +01003256 continue;
3257
Paul Mackerrasd859e292009-01-17 18:10:22 +11003258 if (inherit_group(counter, parent,
Ingo Molnar9b51f662008-12-12 13:49:45 +01003259 parent_ctx, child, child_ctx))
3260 break;
3261 }
3262
Paul Mackerrasd859e292009-01-17 18:10:22 +11003263 mutex_unlock(&parent_ctx->mutex);
Ingo Molnar9b51f662008-12-12 13:49:45 +01003264}
3265
Ingo Molnar04289bb2008-12-11 08:38:42 +01003266static void __cpuinit perf_counter_init_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01003267{
Ingo Molnar04289bb2008-12-11 08:38:42 +01003268 struct perf_cpu_context *cpuctx;
Thomas Gleixner0793a612008-12-04 20:12:29 +01003269
Ingo Molnar04289bb2008-12-11 08:38:42 +01003270 cpuctx = &per_cpu(perf_cpu_context, cpu);
3271 __perf_counter_init_context(&cpuctx->ctx, NULL);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003272
Ingo Molnar1dce8d92009-05-04 19:23:18 +02003273 spin_lock(&perf_resource_lock);
Ingo Molnar04289bb2008-12-11 08:38:42 +01003274 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
Ingo Molnar1dce8d92009-05-04 19:23:18 +02003275 spin_unlock(&perf_resource_lock);
Ingo Molnar04289bb2008-12-11 08:38:42 +01003276
Paul Mackerras01d02872009-01-14 13:44:19 +11003277 hw_perf_counter_setup(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003278}
3279
3280#ifdef CONFIG_HOTPLUG_CPU
Ingo Molnar04289bb2008-12-11 08:38:42 +01003281static void __perf_counter_exit_cpu(void *info)
Thomas Gleixner0793a612008-12-04 20:12:29 +01003282{
3283 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3284 struct perf_counter_context *ctx = &cpuctx->ctx;
3285 struct perf_counter *counter, *tmp;
3286
Ingo Molnar04289bb2008-12-11 08:38:42 +01003287 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
3288 __perf_counter_remove_from_context(counter);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003289}
Ingo Molnar04289bb2008-12-11 08:38:42 +01003290static void perf_counter_exit_cpu(int cpu)
Thomas Gleixner0793a612008-12-04 20:12:29 +01003291{
Paul Mackerrasd859e292009-01-17 18:10:22 +11003292 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3293 struct perf_counter_context *ctx = &cpuctx->ctx;
3294
3295 mutex_lock(&ctx->mutex);
Ingo Molnar04289bb2008-12-11 08:38:42 +01003296 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
Paul Mackerrasd859e292009-01-17 18:10:22 +11003297 mutex_unlock(&ctx->mutex);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003298}
3299#else
Ingo Molnar04289bb2008-12-11 08:38:42 +01003300static inline void perf_counter_exit_cpu(int cpu) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +01003301#endif
3302
3303static int __cpuinit
3304perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
3305{
3306 unsigned int cpu = (long)hcpu;
3307
3308 switch (action) {
3309
3310 case CPU_UP_PREPARE:
3311 case CPU_UP_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01003312 perf_counter_init_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003313 break;
3314
3315 case CPU_DOWN_PREPARE:
3316 case CPU_DOWN_PREPARE_FROZEN:
Ingo Molnar04289bb2008-12-11 08:38:42 +01003317 perf_counter_exit_cpu(cpu);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003318 break;
3319
3320 default:
3321 break;
3322 }
3323
3324 return NOTIFY_OK;
3325}
3326
3327static struct notifier_block __cpuinitdata perf_cpu_nb = {
3328 .notifier_call = perf_cpu_notify,
3329};
3330
Ingo Molnar0d905bc2009-05-04 19:13:30 +02003331void __init perf_counter_init(void)
Thomas Gleixner0793a612008-12-04 20:12:29 +01003332{
3333 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
3334 (void *)(long)smp_processor_id());
3335 register_cpu_notifier(&perf_cpu_nb);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003336}
Thomas Gleixner0793a612008-12-04 20:12:29 +01003337
3338static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
3339{
3340 return sprintf(buf, "%d\n", perf_reserved_percpu);
3341}
3342
3343static ssize_t
3344perf_set_reserve_percpu(struct sysdev_class *class,
3345 const char *buf,
3346 size_t count)
3347{
3348 struct perf_cpu_context *cpuctx;
3349 unsigned long val;
3350 int err, cpu, mpt;
3351
3352 err = strict_strtoul(buf, 10, &val);
3353 if (err)
3354 return err;
3355 if (val > perf_max_counters)
3356 return -EINVAL;
3357
Ingo Molnar1dce8d92009-05-04 19:23:18 +02003358 spin_lock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003359 perf_reserved_percpu = val;
3360 for_each_online_cpu(cpu) {
3361 cpuctx = &per_cpu(perf_cpu_context, cpu);
3362 spin_lock_irq(&cpuctx->ctx.lock);
3363 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
3364 perf_max_counters - perf_reserved_percpu);
3365 cpuctx->max_pertask = mpt;
3366 spin_unlock_irq(&cpuctx->ctx.lock);
3367 }
Ingo Molnar1dce8d92009-05-04 19:23:18 +02003368 spin_unlock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003369
3370 return count;
3371}
3372
3373static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
3374{
3375 return sprintf(buf, "%d\n", perf_overcommit);
3376}
3377
3378static ssize_t
3379perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
3380{
3381 unsigned long val;
3382 int err;
3383
3384 err = strict_strtoul(buf, 10, &val);
3385 if (err)
3386 return err;
3387 if (val > 1)
3388 return -EINVAL;
3389
Ingo Molnar1dce8d92009-05-04 19:23:18 +02003390 spin_lock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003391 perf_overcommit = val;
Ingo Molnar1dce8d92009-05-04 19:23:18 +02003392 spin_unlock(&perf_resource_lock);
Thomas Gleixner0793a612008-12-04 20:12:29 +01003393
3394 return count;
3395}
3396
3397static SYSDEV_CLASS_ATTR(
3398 reserve_percpu,
3399 0644,
3400 perf_show_reserve_percpu,
3401 perf_set_reserve_percpu
3402 );
3403
3404static SYSDEV_CLASS_ATTR(
3405 overcommit,
3406 0644,
3407 perf_show_overcommit,
3408 perf_set_overcommit
3409 );
3410
3411static struct attribute *perfclass_attrs[] = {
3412 &attr_reserve_percpu.attr,
3413 &attr_overcommit.attr,
3414 NULL
3415};
3416
3417static struct attribute_group perfclass_attr_group = {
3418 .attrs = perfclass_attrs,
3419 .name = "perf_counters",
3420};
3421
3422static int __init perf_counter_sysfs_init(void)
3423{
3424 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
3425 &perfclass_attr_group);
3426}
3427device_initcall(perf_counter_sysfs_init);