blob: 574ee58a3046a4bff950aa9051e73a2d5dfd7d8b [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
16#include <linux/file.h>
17#include <linux/poll.h>
18#include <linux/sysfs.h>
19#include <linux/dcache.h>
20#include <linux/percpu.h>
21#include <linux/ptrace.h>
22#include <linux/vmstat.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020023#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020024#include <linux/hardirq.h>
25#include <linux/rculist.h>
26#include <linux/uaccess.h>
27#include <linux/syscalls.h>
28#include <linux/anon_inodes.h>
29#include <linux/kernel_stat.h>
30#include <linux/perf_event.h>
Li Zefan6fb29152009-10-15 11:21:42 +080031#include <linux/ftrace_event.h>
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020032#include <linux/hw_breakpoint.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020033
34#include <asm/irq_regs.h>
35
36/*
37 * Each CPU has a list of per CPU events:
38 */
Xiao Guangrongaa5452d2009-12-09 11:28:13 +080039static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020040
41int perf_max_events __read_mostly = 1;
42static int perf_reserved_percpu __read_mostly;
43static int perf_overcommit __read_mostly = 1;
44
45static atomic_t nr_events __read_mostly;
46static atomic_t nr_mmap_events __read_mostly;
47static atomic_t nr_comm_events __read_mostly;
48static atomic_t nr_task_events __read_mostly;
49
50/*
51 * perf event paranoia level:
52 * -1 - not paranoid at all
53 * 0 - disallow raw tracepoint access for unpriv
54 * 1 - disallow cpu events for unpriv
55 * 2 - disallow kernel profiling for unpriv
56 */
57int sysctl_perf_event_paranoid __read_mostly = 1;
58
Ingo Molnarcdd6c482009-09-21 12:02:48 +020059int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
60
61/*
62 * max perf event sample rate
63 */
64int sysctl_perf_event_sample_rate __read_mostly = 100000;
65
66static atomic64_t perf_event_id;
67
68/*
69 * Lock for (sysadmin-configurable) event reservations:
70 */
71static DEFINE_SPINLOCK(perf_resource_lock);
72
73/*
74 * Architecture provided APIs - weak aliases:
75 */
76extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
77{
78 return NULL;
79}
80
81void __weak hw_perf_disable(void) { barrier(); }
82void __weak hw_perf_enable(void) { barrier(); }
83
Ingo Molnarcdd6c482009-09-21 12:02:48 +020084int __weak
85hw_perf_group_sched_in(struct perf_event *group_leader,
86 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +010087 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020088{
89 return 0;
90}
91
92void __weak perf_event_print_debug(void) { }
93
94static DEFINE_PER_CPU(int, perf_disable_count);
95
Ingo Molnarcdd6c482009-09-21 12:02:48 +020096void perf_disable(void)
97{
Peter Zijlstra32975a42010-03-06 19:49:19 +010098 if (!__get_cpu_var(perf_disable_count)++)
99 hw_perf_disable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200100}
101
102void perf_enable(void)
103{
Peter Zijlstra32975a42010-03-06 19:49:19 +0100104 if (!--__get_cpu_var(perf_disable_count))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200105 hw_perf_enable();
106}
107
108static void get_ctx(struct perf_event_context *ctx)
109{
110 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
111}
112
113static void free_ctx(struct rcu_head *head)
114{
115 struct perf_event_context *ctx;
116
117 ctx = container_of(head, struct perf_event_context, rcu_head);
118 kfree(ctx);
119}
120
121static void put_ctx(struct perf_event_context *ctx)
122{
123 if (atomic_dec_and_test(&ctx->refcount)) {
124 if (ctx->parent_ctx)
125 put_ctx(ctx->parent_ctx);
126 if (ctx->task)
127 put_task_struct(ctx->task);
128 call_rcu(&ctx->rcu_head, free_ctx);
129 }
130}
131
132static void unclone_ctx(struct perf_event_context *ctx)
133{
134 if (ctx->parent_ctx) {
135 put_ctx(ctx->parent_ctx);
136 ctx->parent_ctx = NULL;
137 }
138}
139
140/*
141 * If we inherit events we want to return the parent event id
142 * to userspace.
143 */
144static u64 primary_event_id(struct perf_event *event)
145{
146 u64 id = event->id;
147
148 if (event->parent)
149 id = event->parent->id;
150
151 return id;
152}
153
154/*
155 * Get the perf_event_context for a task and lock it.
156 * This has to cope with with the fact that until it is locked,
157 * the context could get moved to another task.
158 */
159static struct perf_event_context *
160perf_lock_task_context(struct task_struct *task, unsigned long *flags)
161{
162 struct perf_event_context *ctx;
163
164 rcu_read_lock();
165 retry:
166 ctx = rcu_dereference(task->perf_event_ctxp);
167 if (ctx) {
168 /*
169 * If this context is a clone of another, it might
170 * get swapped for another underneath us by
171 * perf_event_task_sched_out, though the
172 * rcu_read_lock() protects us from any context
173 * getting freed. Lock the context and check if it
174 * got swapped before we could get the lock, and retry
175 * if so. If we locked the right context, then it
176 * can't get swapped on us any more.
177 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100178 raw_spin_lock_irqsave(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200179 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100180 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200181 goto retry;
182 }
183
184 if (!atomic_inc_not_zero(&ctx->refcount)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100185 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200186 ctx = NULL;
187 }
188 }
189 rcu_read_unlock();
190 return ctx;
191}
192
193/*
194 * Get the context for a task and increment its pin_count so it
195 * can't get swapped to another task. This also increments its
196 * reference count so that the context can't get freed.
197 */
198static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
199{
200 struct perf_event_context *ctx;
201 unsigned long flags;
202
203 ctx = perf_lock_task_context(task, &flags);
204 if (ctx) {
205 ++ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100206 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200207 }
208 return ctx;
209}
210
211static void perf_unpin_context(struct perf_event_context *ctx)
212{
213 unsigned long flags;
214
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100215 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200216 --ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100217 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200218 put_ctx(ctx);
219}
220
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100221static inline u64 perf_clock(void)
222{
Peter Zijlstra24691ea2010-02-26 16:36:23 +0100223 return cpu_clock(raw_smp_processor_id());
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100224}
225
226/*
227 * Update the record of the current time in a context.
228 */
229static void update_context_time(struct perf_event_context *ctx)
230{
231 u64 now = perf_clock();
232
233 ctx->time += now - ctx->timestamp;
234 ctx->timestamp = now;
235}
236
237/*
238 * Update the total_time_enabled and total_time_running fields for a event.
239 */
240static void update_event_times(struct perf_event *event)
241{
242 struct perf_event_context *ctx = event->ctx;
243 u64 run_end;
244
245 if (event->state < PERF_EVENT_STATE_INACTIVE ||
246 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
247 return;
248
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100249 if (ctx->is_active)
250 run_end = ctx->time;
251 else
252 run_end = event->tstamp_stopped;
253
254 event->total_time_enabled = run_end - event->tstamp_enabled;
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100255
256 if (event->state == PERF_EVENT_STATE_INACTIVE)
257 run_end = event->tstamp_stopped;
258 else
259 run_end = ctx->time;
260
261 event->total_time_running = run_end - event->tstamp_running;
262}
263
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100264static struct list_head *
265ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
266{
267 if (event->attr.pinned)
268 return &ctx->pinned_groups;
269 else
270 return &ctx->flexible_groups;
271}
272
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200273/*
274 * Add a event from the lists for its context.
275 * Must be called with ctx->mutex and ctx->lock held.
276 */
277static void
278list_add_event(struct perf_event *event, struct perf_event_context *ctx)
279{
280 struct perf_event *group_leader = event->group_leader;
281
282 /*
283 * Depending on whether it is a standalone or sibling event,
284 * add it straight to the context's event list, or to the group
285 * leader's sibling list:
286 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100287 if (group_leader == event) {
288 struct list_head *list;
289
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100290 if (is_software_event(event))
291 event->group_flags |= PERF_GROUP_SOFTWARE;
292
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100293 list = ctx_group_list(event, ctx);
294 list_add_tail(&event->group_entry, list);
295 } else {
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100296 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
297 !is_software_event(event))
298 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
299
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200300 list_add_tail(&event->group_entry, &group_leader->sibling_list);
301 group_leader->nr_siblings++;
302 }
303
304 list_add_rcu(&event->event_entry, &ctx->event_list);
305 ctx->nr_events++;
306 if (event->attr.inherit_stat)
307 ctx->nr_stat++;
308}
309
310/*
311 * Remove a event from the lists for its context.
312 * Must be called with ctx->mutex and ctx->lock held.
313 */
314static void
315list_del_event(struct perf_event *event, struct perf_event_context *ctx)
316{
317 struct perf_event *sibling, *tmp;
318
319 if (list_empty(&event->group_entry))
320 return;
321 ctx->nr_events--;
322 if (event->attr.inherit_stat)
323 ctx->nr_stat--;
324
325 list_del_init(&event->group_entry);
326 list_del_rcu(&event->event_entry);
327
328 if (event->group_leader != event)
329 event->group_leader->nr_siblings--;
330
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100331 update_event_times(event);
Stephane Eranianb2e74a22009-11-26 09:24:30 -0800332
333 /*
334 * If event was in error state, then keep it
335 * that way, otherwise bogus counts will be
336 * returned on read(). The only way to get out
337 * of error state is by explicit re-enabling
338 * of the event
339 */
340 if (event->state > PERF_EVENT_STATE_OFF)
341 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra2e2af502009-11-23 11:37:25 +0100342
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200343 /*
344 * If this was a group event with sibling events then
345 * upgrade the siblings to singleton events by adding them
346 * to the context list directly:
347 */
348 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100349 struct list_head *list;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200350
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100351 list = ctx_group_list(event, ctx);
352 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200353 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100354
355 /* Inherit group flags from the previous leader */
356 sibling->group_flags = event->group_flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200357 }
358}
359
360static void
361event_sched_out(struct perf_event *event,
362 struct perf_cpu_context *cpuctx,
363 struct perf_event_context *ctx)
364{
365 if (event->state != PERF_EVENT_STATE_ACTIVE)
366 return;
367
368 event->state = PERF_EVENT_STATE_INACTIVE;
369 if (event->pending_disable) {
370 event->pending_disable = 0;
371 event->state = PERF_EVENT_STATE_OFF;
372 }
373 event->tstamp_stopped = ctx->time;
374 event->pmu->disable(event);
375 event->oncpu = -1;
376
377 if (!is_software_event(event))
378 cpuctx->active_oncpu--;
379 ctx->nr_active--;
380 if (event->attr.exclusive || !cpuctx->active_oncpu)
381 cpuctx->exclusive = 0;
382}
383
384static void
385group_sched_out(struct perf_event *group_event,
386 struct perf_cpu_context *cpuctx,
387 struct perf_event_context *ctx)
388{
389 struct perf_event *event;
390
391 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
392 return;
393
394 event_sched_out(group_event, cpuctx, ctx);
395
396 /*
397 * Schedule out siblings (if any):
398 */
399 list_for_each_entry(event, &group_event->sibling_list, group_entry)
400 event_sched_out(event, cpuctx, ctx);
401
402 if (group_event->attr.exclusive)
403 cpuctx->exclusive = 0;
404}
405
406/*
407 * Cross CPU call to remove a performance event
408 *
409 * We disable the event on the hardware level first. After that we
410 * remove it from the context list.
411 */
412static void __perf_event_remove_from_context(void *info)
413{
414 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
415 struct perf_event *event = info;
416 struct perf_event_context *ctx = event->ctx;
417
418 /*
419 * If this is a task context, we need to check whether it is
420 * the current task context of this cpu. If not it has been
421 * scheduled out before the smp call arrived.
422 */
423 if (ctx->task && cpuctx->task_ctx != ctx)
424 return;
425
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100426 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200427 /*
428 * Protect the list operation against NMI by disabling the
429 * events on a global level.
430 */
431 perf_disable();
432
433 event_sched_out(event, cpuctx, ctx);
434
435 list_del_event(event, ctx);
436
437 if (!ctx->task) {
438 /*
439 * Allow more per task events with respect to the
440 * reservation:
441 */
442 cpuctx->max_pertask =
443 min(perf_max_events - ctx->nr_events,
444 perf_max_events - perf_reserved_percpu);
445 }
446
447 perf_enable();
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100448 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200449}
450
451
452/*
453 * Remove the event from a task's (or a CPU's) list of events.
454 *
455 * Must be called with ctx->mutex held.
456 *
457 * CPU events are removed with a smp call. For task events we only
458 * call when the task is on a CPU.
459 *
460 * If event->ctx is a cloned context, callers must make sure that
461 * every task struct that event->ctx->task could possibly point to
462 * remains valid. This is OK when called from perf_release since
463 * that only calls us on the top-level context, which can't be a clone.
464 * When called from perf_event_exit_task, it's OK because the
465 * context has been detached from its task.
466 */
467static void perf_event_remove_from_context(struct perf_event *event)
468{
469 struct perf_event_context *ctx = event->ctx;
470 struct task_struct *task = ctx->task;
471
472 if (!task) {
473 /*
474 * Per cpu events are removed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200475 * the removal is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200476 */
477 smp_call_function_single(event->cpu,
478 __perf_event_remove_from_context,
479 event, 1);
480 return;
481 }
482
483retry:
484 task_oncpu_function_call(task, __perf_event_remove_from_context,
485 event);
486
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100487 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200488 /*
489 * If the context is active we need to retry the smp call.
490 */
491 if (ctx->nr_active && !list_empty(&event->group_entry)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100492 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200493 goto retry;
494 }
495
496 /*
497 * The lock prevents that this context is scheduled in so we
498 * can remove the event safely, if the call above did not
499 * succeed.
500 */
Peter Zijlstra6c2bfcb2009-11-23 11:37:24 +0100501 if (!list_empty(&event->group_entry))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200502 list_del_event(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100503 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200504}
505
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200506/*
507 * Update total_time_enabled and total_time_running for all events in a group.
508 */
509static void update_group_times(struct perf_event *leader)
510{
511 struct perf_event *event;
512
513 update_event_times(leader);
514 list_for_each_entry(event, &leader->sibling_list, group_entry)
515 update_event_times(event);
516}
517
518/*
519 * Cross CPU call to disable a performance event
520 */
521static void __perf_event_disable(void *info)
522{
523 struct perf_event *event = info;
524 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
525 struct perf_event_context *ctx = event->ctx;
526
527 /*
528 * If this is a per-task event, need to check whether this
529 * event's task is the current task on this cpu.
530 */
531 if (ctx->task && cpuctx->task_ctx != ctx)
532 return;
533
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100534 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200535
536 /*
537 * If the event is on, turn it off.
538 * If it is in error state, leave it in error state.
539 */
540 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
541 update_context_time(ctx);
542 update_group_times(event);
543 if (event == event->group_leader)
544 group_sched_out(event, cpuctx, ctx);
545 else
546 event_sched_out(event, cpuctx, ctx);
547 event->state = PERF_EVENT_STATE_OFF;
548 }
549
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100550 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200551}
552
553/*
554 * Disable a event.
555 *
556 * If event->ctx is a cloned context, callers must make sure that
557 * every task struct that event->ctx->task could possibly point to
558 * remains valid. This condition is satisifed when called through
559 * perf_event_for_each_child or perf_event_for_each because they
560 * hold the top-level event's child_mutex, so any descendant that
561 * goes to exit will block in sync_child_event.
562 * When called from perf_pending_event it's OK because event->ctx
563 * is the current context on this CPU and preemption is disabled,
564 * hence we can't get into perf_event_task_sched_out for this context.
565 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100566void perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200567{
568 struct perf_event_context *ctx = event->ctx;
569 struct task_struct *task = ctx->task;
570
571 if (!task) {
572 /*
573 * Disable the event on the cpu that it's on
574 */
575 smp_call_function_single(event->cpu, __perf_event_disable,
576 event, 1);
577 return;
578 }
579
580 retry:
581 task_oncpu_function_call(task, __perf_event_disable, event);
582
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100583 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200584 /*
585 * If the event is still active, we need to retry the cross-call.
586 */
587 if (event->state == PERF_EVENT_STATE_ACTIVE) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100588 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200589 goto retry;
590 }
591
592 /*
593 * Since we have the lock this context can't be scheduled
594 * in, so we can change the state safely.
595 */
596 if (event->state == PERF_EVENT_STATE_INACTIVE) {
597 update_group_times(event);
598 event->state = PERF_EVENT_STATE_OFF;
599 }
600
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100601 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200602}
603
604static int
605event_sched_in(struct perf_event *event,
606 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100607 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200608{
609 if (event->state <= PERF_EVENT_STATE_OFF)
610 return 0;
611
612 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +0100613 event->oncpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200614 /*
615 * The new state must be visible before we turn it on in the hardware:
616 */
617 smp_wmb();
618
619 if (event->pmu->enable(event)) {
620 event->state = PERF_EVENT_STATE_INACTIVE;
621 event->oncpu = -1;
622 return -EAGAIN;
623 }
624
625 event->tstamp_running += ctx->time - event->tstamp_stopped;
626
627 if (!is_software_event(event))
628 cpuctx->active_oncpu++;
629 ctx->nr_active++;
630
631 if (event->attr.exclusive)
632 cpuctx->exclusive = 1;
633
634 return 0;
635}
636
637static int
638group_sched_in(struct perf_event *group_event,
639 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100640 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200641{
642 struct perf_event *event, *partial_group;
643 int ret;
644
645 if (group_event->state == PERF_EVENT_STATE_OFF)
646 return 0;
647
Peter Zijlstra6e377382010-02-11 13:21:58 +0100648 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200649 if (ret)
650 return ret < 0 ? ret : 0;
651
Peter Zijlstra6e377382010-02-11 13:21:58 +0100652 if (event_sched_in(group_event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200653 return -EAGAIN;
654
655 /*
656 * Schedule in siblings as one group (if any):
657 */
658 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Peter Zijlstra6e377382010-02-11 13:21:58 +0100659 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200660 partial_group = event;
661 goto group_error;
662 }
663 }
664
665 return 0;
666
667group_error:
668 /*
669 * Groups can be scheduled in as one unit only, so undo any
670 * partial group before returning:
671 */
672 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
673 if (event == partial_group)
674 break;
675 event_sched_out(event, cpuctx, ctx);
676 }
677 event_sched_out(group_event, cpuctx, ctx);
678
679 return -EAGAIN;
680}
681
682/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200683 * Work out whether we can put this event group on the CPU now.
684 */
685static int group_can_go_on(struct perf_event *event,
686 struct perf_cpu_context *cpuctx,
687 int can_add_hw)
688{
689 /*
690 * Groups consisting entirely of software events can always go on.
691 */
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100692 if (event->group_flags & PERF_GROUP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200693 return 1;
694 /*
695 * If an exclusive group is already on, no other hardware
696 * events can go on.
697 */
698 if (cpuctx->exclusive)
699 return 0;
700 /*
701 * If this group is exclusive and there are already
702 * events on the CPU, it can't go on.
703 */
704 if (event->attr.exclusive && cpuctx->active_oncpu)
705 return 0;
706 /*
707 * Otherwise, try to add it if all previous groups were able
708 * to go on.
709 */
710 return can_add_hw;
711}
712
713static void add_event_to_ctx(struct perf_event *event,
714 struct perf_event_context *ctx)
715{
716 list_add_event(event, ctx);
717 event->tstamp_enabled = ctx->time;
718 event->tstamp_running = ctx->time;
719 event->tstamp_stopped = ctx->time;
720}
721
722/*
723 * Cross CPU call to install and enable a performance event
724 *
725 * Must be called with ctx->mutex held
726 */
727static void __perf_install_in_context(void *info)
728{
729 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
730 struct perf_event *event = info;
731 struct perf_event_context *ctx = event->ctx;
732 struct perf_event *leader = event->group_leader;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200733 int err;
734
735 /*
736 * If this is a task context, we need to check whether it is
737 * the current task context of this cpu. If not it has been
738 * scheduled out before the smp call arrived.
739 * Or possibly this is the right context but it isn't
740 * on this cpu because it had no events.
741 */
742 if (ctx->task && cpuctx->task_ctx != ctx) {
743 if (cpuctx->task_ctx || ctx->task != current)
744 return;
745 cpuctx->task_ctx = ctx;
746 }
747
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100748 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200749 ctx->is_active = 1;
750 update_context_time(ctx);
751
752 /*
753 * Protect the list operation against NMI by disabling the
754 * events on a global level. NOP for non NMI based events.
755 */
756 perf_disable();
757
758 add_event_to_ctx(event, ctx);
759
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100760 if (event->cpu != -1 && event->cpu != smp_processor_id())
761 goto unlock;
762
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200763 /*
764 * Don't put the event on if it is disabled or if
765 * it is in a group and the group isn't on.
766 */
767 if (event->state != PERF_EVENT_STATE_INACTIVE ||
768 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
769 goto unlock;
770
771 /*
772 * An exclusive event can't go on if there are already active
773 * hardware events, and no hardware event can go on if there
774 * is already an exclusive event on.
775 */
776 if (!group_can_go_on(event, cpuctx, 1))
777 err = -EEXIST;
778 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100779 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200780
781 if (err) {
782 /*
783 * This event couldn't go on. If it is in a group
784 * then we have to pull the whole group off.
785 * If the event group is pinned then put it in error state.
786 */
787 if (leader != event)
788 group_sched_out(leader, cpuctx, ctx);
789 if (leader->attr.pinned) {
790 update_group_times(leader);
791 leader->state = PERF_EVENT_STATE_ERROR;
792 }
793 }
794
795 if (!err && !ctx->task && cpuctx->max_pertask)
796 cpuctx->max_pertask--;
797
798 unlock:
799 perf_enable();
800
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100801 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200802}
803
804/*
805 * Attach a performance event to a context
806 *
807 * First we add the event to the list with the hardware enable bit
808 * in event->hw_config cleared.
809 *
810 * If the event is attached to a task which is on a CPU we use a smp
811 * call to enable it in the task context. The task might have been
812 * scheduled away, but we check this in the smp call again.
813 *
814 * Must be called with ctx->mutex held.
815 */
816static void
817perf_install_in_context(struct perf_event_context *ctx,
818 struct perf_event *event,
819 int cpu)
820{
821 struct task_struct *task = ctx->task;
822
823 if (!task) {
824 /*
825 * Per cpu events are installed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200826 * the install is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200827 */
828 smp_call_function_single(cpu, __perf_install_in_context,
829 event, 1);
830 return;
831 }
832
833retry:
834 task_oncpu_function_call(task, __perf_install_in_context,
835 event);
836
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100837 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200838 /*
839 * we need to retry the smp call.
840 */
841 if (ctx->is_active && list_empty(&event->group_entry)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100842 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200843 goto retry;
844 }
845
846 /*
847 * The lock prevents that this context is scheduled in so we
848 * can add the event safely, if it the call above did not
849 * succeed.
850 */
851 if (list_empty(&event->group_entry))
852 add_event_to_ctx(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100853 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200854}
855
856/*
857 * Put a event into inactive state and update time fields.
858 * Enabling the leader of a group effectively enables all
859 * the group members that aren't explicitly disabled, so we
860 * have to update their ->tstamp_enabled also.
861 * Note: this works for group members as well as group leaders
862 * since the non-leader members' sibling_lists will be empty.
863 */
864static void __perf_event_mark_enabled(struct perf_event *event,
865 struct perf_event_context *ctx)
866{
867 struct perf_event *sub;
868
869 event->state = PERF_EVENT_STATE_INACTIVE;
870 event->tstamp_enabled = ctx->time - event->total_time_enabled;
871 list_for_each_entry(sub, &event->sibling_list, group_entry)
872 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
873 sub->tstamp_enabled =
874 ctx->time - sub->total_time_enabled;
875}
876
877/*
878 * Cross CPU call to enable a performance event
879 */
880static void __perf_event_enable(void *info)
881{
882 struct perf_event *event = info;
883 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
884 struct perf_event_context *ctx = event->ctx;
885 struct perf_event *leader = event->group_leader;
886 int err;
887
888 /*
889 * If this is a per-task event, need to check whether this
890 * event's task is the current task on this cpu.
891 */
892 if (ctx->task && cpuctx->task_ctx != ctx) {
893 if (cpuctx->task_ctx || ctx->task != current)
894 return;
895 cpuctx->task_ctx = ctx;
896 }
897
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100898 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200899 ctx->is_active = 1;
900 update_context_time(ctx);
901
902 if (event->state >= PERF_EVENT_STATE_INACTIVE)
903 goto unlock;
904 __perf_event_mark_enabled(event, ctx);
905
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100906 if (event->cpu != -1 && event->cpu != smp_processor_id())
907 goto unlock;
908
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200909 /*
910 * If the event is in a group and isn't the group leader,
911 * then don't put it on unless the group is on.
912 */
913 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
914 goto unlock;
915
916 if (!group_can_go_on(event, cpuctx, 1)) {
917 err = -EEXIST;
918 } else {
919 perf_disable();
920 if (event == leader)
Peter Zijlstra6e377382010-02-11 13:21:58 +0100921 err = group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200922 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100923 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200924 perf_enable();
925 }
926
927 if (err) {
928 /*
929 * If this event can't go on and it's part of a
930 * group, then the whole group has to come off.
931 */
932 if (leader != event)
933 group_sched_out(leader, cpuctx, ctx);
934 if (leader->attr.pinned) {
935 update_group_times(leader);
936 leader->state = PERF_EVENT_STATE_ERROR;
937 }
938 }
939
940 unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100941 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200942}
943
944/*
945 * Enable a event.
946 *
947 * If event->ctx is a cloned context, callers must make sure that
948 * every task struct that event->ctx->task could possibly point to
949 * remains valid. This condition is satisfied when called through
950 * perf_event_for_each_child or perf_event_for_each as described
951 * for perf_event_disable.
952 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100953void perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200954{
955 struct perf_event_context *ctx = event->ctx;
956 struct task_struct *task = ctx->task;
957
958 if (!task) {
959 /*
960 * Enable the event on the cpu that it's on
961 */
962 smp_call_function_single(event->cpu, __perf_event_enable,
963 event, 1);
964 return;
965 }
966
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100967 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200968 if (event->state >= PERF_EVENT_STATE_INACTIVE)
969 goto out;
970
971 /*
972 * If the event is in error state, clear that first.
973 * That way, if we see the event in error state below, we
974 * know that it has gone back into error state, as distinct
975 * from the task having been scheduled away before the
976 * cross-call arrived.
977 */
978 if (event->state == PERF_EVENT_STATE_ERROR)
979 event->state = PERF_EVENT_STATE_OFF;
980
981 retry:
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100982 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200983 task_oncpu_function_call(task, __perf_event_enable, event);
984
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100985 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200986
987 /*
988 * If the context is active and the event is still off,
989 * we need to retry the cross-call.
990 */
991 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
992 goto retry;
993
994 /*
995 * Since we have the lock this context can't be scheduled
996 * in, so we can change the state safely.
997 */
998 if (event->state == PERF_EVENT_STATE_OFF)
999 __perf_event_mark_enabled(event, ctx);
1000
1001 out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001002 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001003}
1004
1005static int perf_event_refresh(struct perf_event *event, int refresh)
1006{
1007 /*
1008 * not supported on inherited events
1009 */
1010 if (event->attr.inherit)
1011 return -EINVAL;
1012
1013 atomic_add(refresh, &event->event_limit);
1014 perf_event_enable(event);
1015
1016 return 0;
1017}
1018
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001019enum event_type_t {
1020 EVENT_FLEXIBLE = 0x1,
1021 EVENT_PINNED = 0x2,
1022 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1023};
1024
1025static void ctx_sched_out(struct perf_event_context *ctx,
1026 struct perf_cpu_context *cpuctx,
1027 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001028{
1029 struct perf_event *event;
1030
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001031 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001032 ctx->is_active = 0;
1033 if (likely(!ctx->nr_events))
1034 goto out;
1035 update_context_time(ctx);
1036
1037 perf_disable();
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001038 if (!ctx->nr_active)
1039 goto out_enable;
1040
1041 if (event_type & EVENT_PINNED)
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001042 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1043 group_sched_out(event, cpuctx, ctx);
1044
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001045 if (event_type & EVENT_FLEXIBLE)
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001046 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001047 group_sched_out(event, cpuctx, ctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001048
1049 out_enable:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001050 perf_enable();
1051 out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001052 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001053}
1054
1055/*
1056 * Test whether two contexts are equivalent, i.e. whether they
1057 * have both been cloned from the same version of the same context
1058 * and they both have the same number of enabled events.
1059 * If the number of enabled events is the same, then the set
1060 * of enabled events should be the same, because these are both
1061 * inherited contexts, therefore we can't access individual events
1062 * in them directly with an fd; we can only enable/disable all
1063 * events via prctl, or enable/disable all events in a family
1064 * via ioctl, which will have the same effect on both contexts.
1065 */
1066static int context_equiv(struct perf_event_context *ctx1,
1067 struct perf_event_context *ctx2)
1068{
1069 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1070 && ctx1->parent_gen == ctx2->parent_gen
1071 && !ctx1->pin_count && !ctx2->pin_count;
1072}
1073
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001074static void __perf_event_sync_stat(struct perf_event *event,
1075 struct perf_event *next_event)
1076{
1077 u64 value;
1078
1079 if (!event->attr.inherit_stat)
1080 return;
1081
1082 /*
1083 * Update the event value, we cannot use perf_event_read()
1084 * because we're in the middle of a context switch and have IRQs
1085 * disabled, which upsets smp_call_function_single(), however
1086 * we know the event must be on the current CPU, therefore we
1087 * don't need to use it.
1088 */
1089 switch (event->state) {
1090 case PERF_EVENT_STATE_ACTIVE:
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01001091 event->pmu->read(event);
1092 /* fall-through */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001093
1094 case PERF_EVENT_STATE_INACTIVE:
1095 update_event_times(event);
1096 break;
1097
1098 default:
1099 break;
1100 }
1101
1102 /*
1103 * In order to keep per-task stats reliable we need to flip the event
1104 * values when we flip the contexts.
1105 */
1106 value = atomic64_read(&next_event->count);
1107 value = atomic64_xchg(&event->count, value);
1108 atomic64_set(&next_event->count, value);
1109
1110 swap(event->total_time_enabled, next_event->total_time_enabled);
1111 swap(event->total_time_running, next_event->total_time_running);
1112
1113 /*
1114 * Since we swizzled the values, update the user visible data too.
1115 */
1116 perf_event_update_userpage(event);
1117 perf_event_update_userpage(next_event);
1118}
1119
1120#define list_next_entry(pos, member) \
1121 list_entry(pos->member.next, typeof(*pos), member)
1122
1123static void perf_event_sync_stat(struct perf_event_context *ctx,
1124 struct perf_event_context *next_ctx)
1125{
1126 struct perf_event *event, *next_event;
1127
1128 if (!ctx->nr_stat)
1129 return;
1130
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01001131 update_context_time(ctx);
1132
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001133 event = list_first_entry(&ctx->event_list,
1134 struct perf_event, event_entry);
1135
1136 next_event = list_first_entry(&next_ctx->event_list,
1137 struct perf_event, event_entry);
1138
1139 while (&event->event_entry != &ctx->event_list &&
1140 &next_event->event_entry != &next_ctx->event_list) {
1141
1142 __perf_event_sync_stat(event, next_event);
1143
1144 event = list_next_entry(event, event_entry);
1145 next_event = list_next_entry(next_event, event_entry);
1146 }
1147}
1148
1149/*
1150 * Called from scheduler to remove the events of the current task,
1151 * with interrupts disabled.
1152 *
1153 * We stop each event and update the event value in event->count.
1154 *
1155 * This does not protect us against NMI, but disable()
1156 * sets the disabled bit in the control field of event _before_
1157 * accessing the event control register. If a NMI hits, then it will
1158 * not restart the event.
1159 */
1160void perf_event_task_sched_out(struct task_struct *task,
Peter Zijlstra49f47432009-12-27 11:51:52 +01001161 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001162{
Peter Zijlstra49f47432009-12-27 11:51:52 +01001163 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001164 struct perf_event_context *ctx = task->perf_event_ctxp;
1165 struct perf_event_context *next_ctx;
1166 struct perf_event_context *parent;
1167 struct pt_regs *regs;
1168 int do_switch = 1;
1169
1170 regs = task_pt_regs(task);
1171 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1172
1173 if (likely(!ctx || !cpuctx->task_ctx))
1174 return;
1175
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001176 rcu_read_lock();
1177 parent = rcu_dereference(ctx->parent_ctx);
1178 next_ctx = next->perf_event_ctxp;
1179 if (parent && next_ctx &&
1180 rcu_dereference(next_ctx->parent_ctx) == parent) {
1181 /*
1182 * Looks like the two contexts are clones, so we might be
1183 * able to optimize the context switch. We lock both
1184 * contexts and check that they are clones under the
1185 * lock (including re-checking that neither has been
1186 * uncloned in the meantime). It doesn't matter which
1187 * order we take the locks because no other cpu could
1188 * be trying to lock both of these tasks.
1189 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001190 raw_spin_lock(&ctx->lock);
1191 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001192 if (context_equiv(ctx, next_ctx)) {
1193 /*
1194 * XXX do we need a memory barrier of sorts
1195 * wrt to rcu_dereference() of perf_event_ctxp
1196 */
1197 task->perf_event_ctxp = next_ctx;
1198 next->perf_event_ctxp = ctx;
1199 ctx->task = next;
1200 next_ctx->task = task;
1201 do_switch = 0;
1202
1203 perf_event_sync_stat(ctx, next_ctx);
1204 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001205 raw_spin_unlock(&next_ctx->lock);
1206 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001207 }
1208 rcu_read_unlock();
1209
1210 if (do_switch) {
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001211 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001212 cpuctx->task_ctx = NULL;
1213 }
1214}
1215
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001216static void task_ctx_sched_out(struct perf_event_context *ctx,
1217 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001218{
1219 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1220
1221 if (!cpuctx->task_ctx)
1222 return;
1223
1224 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1225 return;
1226
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001227 ctx_sched_out(ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001228 cpuctx->task_ctx = NULL;
1229}
1230
1231/*
1232 * Called with IRQs disabled
1233 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001234static void __perf_event_task_sched_out(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001235{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001236 task_ctx_sched_out(ctx, EVENT_ALL);
1237}
1238
1239/*
1240 * Called with IRQs disabled
1241 */
1242static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1243 enum event_type_t event_type)
1244{
1245 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001246}
1247
1248static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001249ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001250 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001251{
1252 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001253
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001254 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1255 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001256 continue;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001257 if (event->cpu != -1 && event->cpu != smp_processor_id())
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001258 continue;
1259
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001260 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01001261 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001262
1263 /*
1264 * If this pinned group hasn't been scheduled,
1265 * put it in error state.
1266 */
1267 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1268 update_group_times(event);
1269 event->state = PERF_EVENT_STATE_ERROR;
1270 }
1271 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001272}
1273
1274static void
1275ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001276 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001277{
1278 struct perf_event *event;
1279 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001280
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001281 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1282 /* Ignore events in OFF or ERROR state */
1283 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001284 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001285 /*
1286 * Listen to the 'cpu' scheduling filter constraint
1287 * of events:
1288 */
Peter Zijlstra6e377382010-02-11 13:21:58 +01001289 if (event->cpu != -1 && event->cpu != smp_processor_id())
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001290 continue;
1291
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001292 if (group_can_go_on(event, cpuctx, can_add_hw))
Peter Zijlstra6e377382010-02-11 13:21:58 +01001293 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001294 can_add_hw = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001295 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001296}
1297
1298static void
1299ctx_sched_in(struct perf_event_context *ctx,
1300 struct perf_cpu_context *cpuctx,
1301 enum event_type_t event_type)
1302{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001303 raw_spin_lock(&ctx->lock);
1304 ctx->is_active = 1;
1305 if (likely(!ctx->nr_events))
1306 goto out;
1307
1308 ctx->timestamp = perf_clock();
1309
1310 perf_disable();
1311
1312 /*
1313 * First go through the list and put on any pinned groups
1314 * in order to give them the best chance of going on.
1315 */
1316 if (event_type & EVENT_PINNED)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001317 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001318
1319 /* Then walk through the lower prio flexible groups */
1320 if (event_type & EVENT_FLEXIBLE)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001321 ctx_flexible_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001322
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001323 perf_enable();
1324 out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001325 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001326}
1327
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001328static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1329 enum event_type_t event_type)
1330{
1331 struct perf_event_context *ctx = &cpuctx->ctx;
1332
1333 ctx_sched_in(ctx, cpuctx, event_type);
1334}
1335
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001336static void task_ctx_sched_in(struct task_struct *task,
1337 enum event_type_t event_type)
1338{
1339 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1340 struct perf_event_context *ctx = task->perf_event_ctxp;
1341
1342 if (likely(!ctx))
1343 return;
1344 if (cpuctx->task_ctx == ctx)
1345 return;
1346 ctx_sched_in(ctx, cpuctx, event_type);
1347 cpuctx->task_ctx = ctx;
1348}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001349/*
1350 * Called from scheduler to add the events of the current task
1351 * with interrupts disabled.
1352 *
1353 * We restore the event value and then enable it.
1354 *
1355 * This does not protect us against NMI, but enable()
1356 * sets the enabled bit in the control field of event _before_
1357 * accessing the event control register. If a NMI hits, then it will
1358 * keep the event running.
1359 */
Peter Zijlstra49f47432009-12-27 11:51:52 +01001360void perf_event_task_sched_in(struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001361{
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001362 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1363 struct perf_event_context *ctx = task->perf_event_ctxp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001364
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001365 if (likely(!ctx))
1366 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001367
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001368 if (cpuctx->task_ctx == ctx)
1369 return;
1370
1371 /*
1372 * We want to keep the following priority order:
1373 * cpu pinned (that don't need to move), task pinned,
1374 * cpu flexible, task flexible.
1375 */
1376 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1377
1378 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1379 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1380 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1381
1382 cpuctx->task_ctx = ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001383}
1384
1385#define MAX_INTERRUPTS (~0ULL)
1386
1387static void perf_log_throttle(struct perf_event *event, int enable);
1388
Peter Zijlstraabd50712010-01-26 18:50:16 +01001389static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1390{
1391 u64 frequency = event->attr.sample_freq;
1392 u64 sec = NSEC_PER_SEC;
1393 u64 divisor, dividend;
1394
1395 int count_fls, nsec_fls, frequency_fls, sec_fls;
1396
1397 count_fls = fls64(count);
1398 nsec_fls = fls64(nsec);
1399 frequency_fls = fls64(frequency);
1400 sec_fls = 30;
1401
1402 /*
1403 * We got @count in @nsec, with a target of sample_freq HZ
1404 * the target period becomes:
1405 *
1406 * @count * 10^9
1407 * period = -------------------
1408 * @nsec * sample_freq
1409 *
1410 */
1411
1412 /*
1413 * Reduce accuracy by one bit such that @a and @b converge
1414 * to a similar magnitude.
1415 */
1416#define REDUCE_FLS(a, b) \
1417do { \
1418 if (a##_fls > b##_fls) { \
1419 a >>= 1; \
1420 a##_fls--; \
1421 } else { \
1422 b >>= 1; \
1423 b##_fls--; \
1424 } \
1425} while (0)
1426
1427 /*
1428 * Reduce accuracy until either term fits in a u64, then proceed with
1429 * the other, so that finally we can do a u64/u64 division.
1430 */
1431 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1432 REDUCE_FLS(nsec, frequency);
1433 REDUCE_FLS(sec, count);
1434 }
1435
1436 if (count_fls + sec_fls > 64) {
1437 divisor = nsec * frequency;
1438
1439 while (count_fls + sec_fls > 64) {
1440 REDUCE_FLS(count, sec);
1441 divisor >>= 1;
1442 }
1443
1444 dividend = count * sec;
1445 } else {
1446 dividend = count * sec;
1447
1448 while (nsec_fls + frequency_fls > 64) {
1449 REDUCE_FLS(nsec, frequency);
1450 dividend >>= 1;
1451 }
1452
1453 divisor = nsec * frequency;
1454 }
1455
1456 return div64_u64(dividend, divisor);
1457}
1458
Stephane Eraniand76a0812010-02-08 17:06:01 +02001459static void perf_event_stop(struct perf_event *event)
1460{
1461 if (!event->pmu->stop)
1462 return event->pmu->disable(event);
1463
1464 return event->pmu->stop(event);
1465}
1466
1467static int perf_event_start(struct perf_event *event)
1468{
1469 if (!event->pmu->start)
1470 return event->pmu->enable(event);
1471
1472 return event->pmu->start(event);
1473}
1474
Peter Zijlstraabd50712010-01-26 18:50:16 +01001475static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001476{
1477 struct hw_perf_event *hwc = &event->hw;
1478 u64 period, sample_period;
1479 s64 delta;
1480
Peter Zijlstraabd50712010-01-26 18:50:16 +01001481 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001482
1483 delta = (s64)(period - hwc->sample_period);
1484 delta = (delta + 7) / 8; /* low pass filter */
1485
1486 sample_period = hwc->sample_period + delta;
1487
1488 if (!sample_period)
1489 sample_period = 1;
1490
1491 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001492
1493 if (atomic64_read(&hwc->period_left) > 8*sample_period) {
1494 perf_disable();
Stephane Eraniand76a0812010-02-08 17:06:01 +02001495 perf_event_stop(event);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001496 atomic64_set(&hwc->period_left, 0);
Stephane Eraniand76a0812010-02-08 17:06:01 +02001497 perf_event_start(event);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001498 perf_enable();
1499 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001500}
1501
1502static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1503{
1504 struct perf_event *event;
1505 struct hw_perf_event *hwc;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001506 u64 interrupts, now;
1507 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001508
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001509 raw_spin_lock(&ctx->lock);
Paul Mackerras03541f82009-10-14 16:58:03 +11001510 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001511 if (event->state != PERF_EVENT_STATE_ACTIVE)
1512 continue;
1513
Peter Zijlstra5d27c232009-12-17 13:16:32 +01001514 if (event->cpu != -1 && event->cpu != smp_processor_id())
1515 continue;
1516
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001517 hwc = &event->hw;
1518
1519 interrupts = hwc->interrupts;
1520 hwc->interrupts = 0;
1521
1522 /*
1523 * unthrottle events on the tick
1524 */
1525 if (interrupts == MAX_INTERRUPTS) {
1526 perf_log_throttle(event, 1);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001527 perf_disable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001528 event->pmu->unthrottle(event);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001529 perf_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001530 }
1531
1532 if (!event->attr.freq || !event->attr.sample_freq)
1533 continue;
1534
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001535 perf_disable();
Peter Zijlstraabd50712010-01-26 18:50:16 +01001536 event->pmu->read(event);
1537 now = atomic64_read(&event->count);
1538 delta = now - hwc->freq_count_stamp;
1539 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001540
Peter Zijlstraabd50712010-01-26 18:50:16 +01001541 if (delta > 0)
1542 perf_adjust_period(event, TICK_NSEC, delta);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001543 perf_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001544 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001545 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001546}
1547
1548/*
1549 * Round-robin a context's events:
1550 */
1551static void rotate_ctx(struct perf_event_context *ctx)
1552{
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001553 raw_spin_lock(&ctx->lock);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001554
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001555 /* Rotate the first entry last of non-pinned groups */
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001556 list_rotate_left(&ctx->flexible_groups);
1557
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001558 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001559}
1560
Peter Zijlstra49f47432009-12-27 11:51:52 +01001561void perf_event_task_tick(struct task_struct *curr)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001562{
1563 struct perf_cpu_context *cpuctx;
1564 struct perf_event_context *ctx;
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001565 int rotate = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001566
1567 if (!atomic_read(&nr_events))
1568 return;
1569
Peter Zijlstra49f47432009-12-27 11:51:52 +01001570 cpuctx = &__get_cpu_var(perf_cpu_context);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001571 if (cpuctx->ctx.nr_events &&
1572 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1573 rotate = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001574
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001575 ctx = curr->perf_event_ctxp;
1576 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1577 rotate = 1;
Peter Zijlstra9717e6c2010-01-28 13:57:44 +01001578
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001579 perf_ctx_adjust_freq(&cpuctx->ctx);
1580 if (ctx)
1581 perf_ctx_adjust_freq(ctx);
1582
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001583 if (!rotate)
1584 return;
1585
1586 perf_disable();
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001587 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001588 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001589 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001590
1591 rotate_ctx(&cpuctx->ctx);
1592 if (ctx)
1593 rotate_ctx(ctx);
1594
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001595 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001596 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001597 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
Peter Zijlstra9717e6c2010-01-28 13:57:44 +01001598 perf_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001599}
1600
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001601static int event_enable_on_exec(struct perf_event *event,
1602 struct perf_event_context *ctx)
1603{
1604 if (!event->attr.enable_on_exec)
1605 return 0;
1606
1607 event->attr.enable_on_exec = 0;
1608 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1609 return 0;
1610
1611 __perf_event_mark_enabled(event, ctx);
1612
1613 return 1;
1614}
1615
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001616/*
1617 * Enable all of a task's events that have been marked enable-on-exec.
1618 * This expects task == current.
1619 */
1620static void perf_event_enable_on_exec(struct task_struct *task)
1621{
1622 struct perf_event_context *ctx;
1623 struct perf_event *event;
1624 unsigned long flags;
1625 int enabled = 0;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001626 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001627
1628 local_irq_save(flags);
1629 ctx = task->perf_event_ctxp;
1630 if (!ctx || !ctx->nr_events)
1631 goto out;
1632
1633 __perf_event_task_sched_out(ctx);
1634
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001635 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001636
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001637 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1638 ret = event_enable_on_exec(event, ctx);
1639 if (ret)
1640 enabled = 1;
1641 }
1642
1643 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1644 ret = event_enable_on_exec(event, ctx);
1645 if (ret)
1646 enabled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001647 }
1648
1649 /*
1650 * Unclone this context if we enabled any event.
1651 */
1652 if (enabled)
1653 unclone_ctx(ctx);
1654
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001655 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001656
Peter Zijlstra49f47432009-12-27 11:51:52 +01001657 perf_event_task_sched_in(task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001658 out:
1659 local_irq_restore(flags);
1660}
1661
1662/*
1663 * Cross CPU call to read the hardware event
1664 */
1665static void __perf_event_read(void *info)
1666{
1667 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1668 struct perf_event *event = info;
1669 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001670
1671 /*
1672 * If this is a task context, we need to check whether it is
1673 * the current task context of this cpu. If not it has been
1674 * scheduled out before the smp call arrived. In that case
1675 * event->count would have been updated to a recent sample
1676 * when the event was scheduled out.
1677 */
1678 if (ctx->task && cpuctx->task_ctx != ctx)
1679 return;
1680
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001681 raw_spin_lock(&ctx->lock);
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001682 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001683 update_event_times(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001684 raw_spin_unlock(&ctx->lock);
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001685
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001686 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001687}
1688
1689static u64 perf_event_read(struct perf_event *event)
1690{
1691 /*
1692 * If event is enabled and currently active on a CPU, update the
1693 * value in the event structure:
1694 */
1695 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1696 smp_call_function_single(event->oncpu,
1697 __perf_event_read, event, 1);
1698 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001699 struct perf_event_context *ctx = event->ctx;
1700 unsigned long flags;
1701
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001702 raw_spin_lock_irqsave(&ctx->lock, flags);
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001703 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001704 update_event_times(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001705 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001706 }
1707
1708 return atomic64_read(&event->count);
1709}
1710
1711/*
1712 * Initialize the perf_event context in a task_struct:
1713 */
1714static void
1715__perf_event_init_context(struct perf_event_context *ctx,
1716 struct task_struct *task)
1717{
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001718 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001719 mutex_init(&ctx->mutex);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001720 INIT_LIST_HEAD(&ctx->pinned_groups);
1721 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001722 INIT_LIST_HEAD(&ctx->event_list);
1723 atomic_set(&ctx->refcount, 1);
1724 ctx->task = task;
1725}
1726
1727static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1728{
1729 struct perf_event_context *ctx;
1730 struct perf_cpu_context *cpuctx;
1731 struct task_struct *task;
1732 unsigned long flags;
1733 int err;
1734
Peter Zijlstraf4c41762009-12-16 17:55:54 +01001735 if (pid == -1 && cpu != -1) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001736 /* Must be root to operate on a CPU event: */
1737 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1738 return ERR_PTR(-EACCES);
1739
Paul Mackerras0f624e72009-12-15 19:40:32 +11001740 if (cpu < 0 || cpu >= nr_cpumask_bits)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001741 return ERR_PTR(-EINVAL);
1742
1743 /*
1744 * We could be clever and allow to attach a event to an
1745 * offline CPU and activate it when the CPU comes up, but
1746 * that's for later.
1747 */
Rusty Russellf6325e32009-12-17 11:43:08 -06001748 if (!cpu_online(cpu))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001749 return ERR_PTR(-ENODEV);
1750
1751 cpuctx = &per_cpu(perf_cpu_context, cpu);
1752 ctx = &cpuctx->ctx;
1753 get_ctx(ctx);
1754
1755 return ctx;
1756 }
1757
1758 rcu_read_lock();
1759 if (!pid)
1760 task = current;
1761 else
1762 task = find_task_by_vpid(pid);
1763 if (task)
1764 get_task_struct(task);
1765 rcu_read_unlock();
1766
1767 if (!task)
1768 return ERR_PTR(-ESRCH);
1769
1770 /*
1771 * Can't attach events to a dying task.
1772 */
1773 err = -ESRCH;
1774 if (task->flags & PF_EXITING)
1775 goto errout;
1776
1777 /* Reuse ptrace permission checks for now. */
1778 err = -EACCES;
1779 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1780 goto errout;
1781
1782 retry:
1783 ctx = perf_lock_task_context(task, &flags);
1784 if (ctx) {
1785 unclone_ctx(ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001786 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001787 }
1788
1789 if (!ctx) {
Xiao Guangrongaa5452d2009-12-09 11:28:13 +08001790 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001791 err = -ENOMEM;
1792 if (!ctx)
1793 goto errout;
1794 __perf_event_init_context(ctx, task);
1795 get_ctx(ctx);
1796 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1797 /*
1798 * We raced with some other task; use
1799 * the context they set.
1800 */
1801 kfree(ctx);
1802 goto retry;
1803 }
1804 get_task_struct(task);
1805 }
1806
1807 put_task_struct(task);
1808 return ctx;
1809
1810 errout:
1811 put_task_struct(task);
1812 return ERR_PTR(err);
1813}
1814
Li Zefan6fb29152009-10-15 11:21:42 +08001815static void perf_event_free_filter(struct perf_event *event);
1816
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001817static void free_event_rcu(struct rcu_head *head)
1818{
1819 struct perf_event *event;
1820
1821 event = container_of(head, struct perf_event, rcu_head);
1822 if (event->ns)
1823 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08001824 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001825 kfree(event);
1826}
1827
1828static void perf_pending_sync(struct perf_event *event);
1829
1830static void free_event(struct perf_event *event)
1831{
1832 perf_pending_sync(event);
1833
1834 if (!event->parent) {
1835 atomic_dec(&nr_events);
1836 if (event->attr.mmap)
1837 atomic_dec(&nr_mmap_events);
1838 if (event->attr.comm)
1839 atomic_dec(&nr_comm_events);
1840 if (event->attr.task)
1841 atomic_dec(&nr_task_events);
1842 }
1843
1844 if (event->output) {
1845 fput(event->output->filp);
1846 event->output = NULL;
1847 }
1848
1849 if (event->destroy)
1850 event->destroy(event);
1851
1852 put_ctx(event->ctx);
1853 call_rcu(&event->rcu_head, free_event_rcu);
1854}
1855
Arjan van de Venfb0459d2009-09-25 12:25:56 +02001856int perf_event_release_kernel(struct perf_event *event)
1857{
1858 struct perf_event_context *ctx = event->ctx;
1859
1860 WARN_ON_ONCE(ctx->parent_ctx);
1861 mutex_lock(&ctx->mutex);
1862 perf_event_remove_from_context(event);
1863 mutex_unlock(&ctx->mutex);
1864
1865 mutex_lock(&event->owner->perf_event_mutex);
1866 list_del_init(&event->owner_entry);
1867 mutex_unlock(&event->owner->perf_event_mutex);
1868 put_task_struct(event->owner);
1869
1870 free_event(event);
1871
1872 return 0;
1873}
1874EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1875
Peter Zijlstraa66a3052009-11-23 11:37:23 +01001876/*
1877 * Called when the last reference to the file is gone.
1878 */
1879static int perf_release(struct inode *inode, struct file *file)
1880{
1881 struct perf_event *event = file->private_data;
1882
1883 file->private_data = NULL;
1884
1885 return perf_event_release_kernel(event);
1886}
1887
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001888static int perf_event_read_size(struct perf_event *event)
1889{
1890 int entry = sizeof(u64); /* value */
1891 int size = 0;
1892 int nr = 1;
1893
1894 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1895 size += sizeof(u64);
1896
1897 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1898 size += sizeof(u64);
1899
1900 if (event->attr.read_format & PERF_FORMAT_ID)
1901 entry += sizeof(u64);
1902
1903 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1904 nr += event->group_leader->nr_siblings;
1905 size += sizeof(u64);
1906 }
1907
1908 size += entry * nr;
1909
1910 return size;
1911}
1912
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001913u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001914{
1915 struct perf_event *child;
1916 u64 total = 0;
1917
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001918 *enabled = 0;
1919 *running = 0;
1920
Peter Zijlstra6f105812009-11-20 22:19:56 +01001921 mutex_lock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001922 total += perf_event_read(event);
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001923 *enabled += event->total_time_enabled +
1924 atomic64_read(&event->child_total_time_enabled);
1925 *running += event->total_time_running +
1926 atomic64_read(&event->child_total_time_running);
1927
1928 list_for_each_entry(child, &event->child_list, child_list) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001929 total += perf_event_read(child);
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001930 *enabled += child->total_time_enabled;
1931 *running += child->total_time_running;
1932 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01001933 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001934
1935 return total;
1936}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02001937EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001938
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001939static int perf_event_read_group(struct perf_event *event,
1940 u64 read_format, char __user *buf)
1941{
1942 struct perf_event *leader = event->group_leader, *sub;
Peter Zijlstra6f105812009-11-20 22:19:56 +01001943 int n = 0, size = 0, ret = -EFAULT;
1944 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstraabf48682009-11-20 22:19:49 +01001945 u64 values[5];
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001946 u64 count, enabled, running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01001947
Peter Zijlstra6f105812009-11-20 22:19:56 +01001948 mutex_lock(&ctx->mutex);
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001949 count = perf_event_read_value(leader, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001950
1951 values[n++] = 1 + leader->nr_siblings;
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001952 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1953 values[n++] = enabled;
1954 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1955 values[n++] = running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01001956 values[n++] = count;
1957 if (read_format & PERF_FORMAT_ID)
1958 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001959
1960 size = n * sizeof(u64);
1961
1962 if (copy_to_user(buf, values, size))
Peter Zijlstra6f105812009-11-20 22:19:56 +01001963 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001964
Peter Zijlstra6f105812009-11-20 22:19:56 +01001965 ret = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001966
1967 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstraabf48682009-11-20 22:19:49 +01001968 n = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001969
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001970 values[n++] = perf_event_read_value(sub, &enabled, &running);
Peter Zijlstraabf48682009-11-20 22:19:49 +01001971 if (read_format & PERF_FORMAT_ID)
1972 values[n++] = primary_event_id(sub);
1973
1974 size = n * sizeof(u64);
1975
Stephane Eranian184d3da2009-11-23 21:40:49 -08001976 if (copy_to_user(buf + ret, values, size)) {
Peter Zijlstra6f105812009-11-20 22:19:56 +01001977 ret = -EFAULT;
1978 goto unlock;
1979 }
Peter Zijlstraabf48682009-11-20 22:19:49 +01001980
1981 ret += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001982 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01001983unlock:
1984 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001985
Peter Zijlstraabf48682009-11-20 22:19:49 +01001986 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001987}
1988
1989static int perf_event_read_one(struct perf_event *event,
1990 u64 read_format, char __user *buf)
1991{
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001992 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001993 u64 values[4];
1994 int n = 0;
1995
Peter Zijlstra59ed446f2009-11-20 22:19:55 +01001996 values[n++] = perf_event_read_value(event, &enabled, &running);
1997 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1998 values[n++] = enabled;
1999 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2000 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002001 if (read_format & PERF_FORMAT_ID)
2002 values[n++] = primary_event_id(event);
2003
2004 if (copy_to_user(buf, values, n * sizeof(u64)))
2005 return -EFAULT;
2006
2007 return n * sizeof(u64);
2008}
2009
2010/*
2011 * Read the performance event - simple non blocking version for now
2012 */
2013static ssize_t
2014perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2015{
2016 u64 read_format = event->attr.read_format;
2017 int ret;
2018
2019 /*
2020 * Return end-of-file for a read on a event that is in
2021 * error state (i.e. because it was pinned but it couldn't be
2022 * scheduled on to the CPU at some point).
2023 */
2024 if (event->state == PERF_EVENT_STATE_ERROR)
2025 return 0;
2026
2027 if (count < perf_event_read_size(event))
2028 return -ENOSPC;
2029
2030 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002031 if (read_format & PERF_FORMAT_GROUP)
2032 ret = perf_event_read_group(event, read_format, buf);
2033 else
2034 ret = perf_event_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002035
2036 return ret;
2037}
2038
2039static ssize_t
2040perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2041{
2042 struct perf_event *event = file->private_data;
2043
2044 return perf_read_hw(event, buf, count);
2045}
2046
2047static unsigned int perf_poll(struct file *file, poll_table *wait)
2048{
2049 struct perf_event *event = file->private_data;
2050 struct perf_mmap_data *data;
2051 unsigned int events = POLL_HUP;
2052
2053 rcu_read_lock();
2054 data = rcu_dereference(event->data);
2055 if (data)
2056 events = atomic_xchg(&data->poll, 0);
2057 rcu_read_unlock();
2058
2059 poll_wait(file, &event->waitq, wait);
2060
2061 return events;
2062}
2063
2064static void perf_event_reset(struct perf_event *event)
2065{
2066 (void)perf_event_read(event);
2067 atomic64_set(&event->count, 0);
2068 perf_event_update_userpage(event);
2069}
2070
2071/*
2072 * Holding the top-level event's child_mutex means that any
2073 * descendant process that has inherited this event will block
2074 * in sync_child_event if it goes to exit, thus satisfying the
2075 * task existence requirements of perf_event_enable/disable.
2076 */
2077static void perf_event_for_each_child(struct perf_event *event,
2078 void (*func)(struct perf_event *))
2079{
2080 struct perf_event *child;
2081
2082 WARN_ON_ONCE(event->ctx->parent_ctx);
2083 mutex_lock(&event->child_mutex);
2084 func(event);
2085 list_for_each_entry(child, &event->child_list, child_list)
2086 func(child);
2087 mutex_unlock(&event->child_mutex);
2088}
2089
2090static void perf_event_for_each(struct perf_event *event,
2091 void (*func)(struct perf_event *))
2092{
2093 struct perf_event_context *ctx = event->ctx;
2094 struct perf_event *sibling;
2095
2096 WARN_ON_ONCE(ctx->parent_ctx);
2097 mutex_lock(&ctx->mutex);
2098 event = event->group_leader;
2099
2100 perf_event_for_each_child(event, func);
2101 func(event);
2102 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2103 perf_event_for_each_child(event, func);
2104 mutex_unlock(&ctx->mutex);
2105}
2106
2107static int perf_event_period(struct perf_event *event, u64 __user *arg)
2108{
2109 struct perf_event_context *ctx = event->ctx;
2110 unsigned long size;
2111 int ret = 0;
2112 u64 value;
2113
2114 if (!event->attr.sample_period)
2115 return -EINVAL;
2116
2117 size = copy_from_user(&value, arg, sizeof(value));
2118 if (size != sizeof(value))
2119 return -EFAULT;
2120
2121 if (!value)
2122 return -EINVAL;
2123
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002124 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002125 if (event->attr.freq) {
2126 if (value > sysctl_perf_event_sample_rate) {
2127 ret = -EINVAL;
2128 goto unlock;
2129 }
2130
2131 event->attr.sample_freq = value;
2132 } else {
2133 event->attr.sample_period = value;
2134 event->hw.sample_period = value;
2135 }
2136unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002137 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002138
2139 return ret;
2140}
2141
Li Zefan6fb29152009-10-15 11:21:42 +08002142static int perf_event_set_output(struct perf_event *event, int output_fd);
2143static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002144
2145static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2146{
2147 struct perf_event *event = file->private_data;
2148 void (*func)(struct perf_event *);
2149 u32 flags = arg;
2150
2151 switch (cmd) {
2152 case PERF_EVENT_IOC_ENABLE:
2153 func = perf_event_enable;
2154 break;
2155 case PERF_EVENT_IOC_DISABLE:
2156 func = perf_event_disable;
2157 break;
2158 case PERF_EVENT_IOC_RESET:
2159 func = perf_event_reset;
2160 break;
2161
2162 case PERF_EVENT_IOC_REFRESH:
2163 return perf_event_refresh(event, arg);
2164
2165 case PERF_EVENT_IOC_PERIOD:
2166 return perf_event_period(event, (u64 __user *)arg);
2167
2168 case PERF_EVENT_IOC_SET_OUTPUT:
2169 return perf_event_set_output(event, arg);
2170
Li Zefan6fb29152009-10-15 11:21:42 +08002171 case PERF_EVENT_IOC_SET_FILTER:
2172 return perf_event_set_filter(event, (void __user *)arg);
2173
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002174 default:
2175 return -ENOTTY;
2176 }
2177
2178 if (flags & PERF_IOC_FLAG_GROUP)
2179 perf_event_for_each(event, func);
2180 else
2181 perf_event_for_each_child(event, func);
2182
2183 return 0;
2184}
2185
2186int perf_event_task_enable(void)
2187{
2188 struct perf_event *event;
2189
2190 mutex_lock(&current->perf_event_mutex);
2191 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2192 perf_event_for_each_child(event, perf_event_enable);
2193 mutex_unlock(&current->perf_event_mutex);
2194
2195 return 0;
2196}
2197
2198int perf_event_task_disable(void)
2199{
2200 struct perf_event *event;
2201
2202 mutex_lock(&current->perf_event_mutex);
2203 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2204 perf_event_for_each_child(event, perf_event_disable);
2205 mutex_unlock(&current->perf_event_mutex);
2206
2207 return 0;
2208}
2209
2210#ifndef PERF_EVENT_INDEX_OFFSET
2211# define PERF_EVENT_INDEX_OFFSET 0
2212#endif
2213
2214static int perf_event_index(struct perf_event *event)
2215{
2216 if (event->state != PERF_EVENT_STATE_ACTIVE)
2217 return 0;
2218
2219 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2220}
2221
2222/*
2223 * Callers need to ensure there can be no nesting of this function, otherwise
2224 * the seqlock logic goes bad. We can not serialize this because the arch
2225 * code calls this from NMI context.
2226 */
2227void perf_event_update_userpage(struct perf_event *event)
2228{
2229 struct perf_event_mmap_page *userpg;
2230 struct perf_mmap_data *data;
2231
2232 rcu_read_lock();
2233 data = rcu_dereference(event->data);
2234 if (!data)
2235 goto unlock;
2236
2237 userpg = data->user_page;
2238
2239 /*
2240 * Disable preemption so as to not let the corresponding user-space
2241 * spin too long if we get preempted.
2242 */
2243 preempt_disable();
2244 ++userpg->lock;
2245 barrier();
2246 userpg->index = perf_event_index(event);
2247 userpg->offset = atomic64_read(&event->count);
2248 if (event->state == PERF_EVENT_STATE_ACTIVE)
2249 userpg->offset -= atomic64_read(&event->hw.prev_count);
2250
2251 userpg->time_enabled = event->total_time_enabled +
2252 atomic64_read(&event->child_total_time_enabled);
2253
2254 userpg->time_running = event->total_time_running +
2255 atomic64_read(&event->child_total_time_running);
2256
2257 barrier();
2258 ++userpg->lock;
2259 preempt_enable();
2260unlock:
2261 rcu_read_unlock();
2262}
2263
Peter Zijlstra906010b2009-09-21 16:08:49 +02002264static unsigned long perf_data_size(struct perf_mmap_data *data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002265{
Peter Zijlstra906010b2009-09-21 16:08:49 +02002266 return data->nr_pages << (PAGE_SHIFT + data->data_order);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002267}
2268
Peter Zijlstra906010b2009-09-21 16:08:49 +02002269#ifndef CONFIG_PERF_USE_VMALLOC
2270
2271/*
2272 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2273 */
2274
2275static struct page *
2276perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2277{
2278 if (pgoff > data->nr_pages)
2279 return NULL;
2280
2281 if (pgoff == 0)
2282 return virt_to_page(data->user_page);
2283
2284 return virt_to_page(data->data_pages[pgoff - 1]);
2285}
2286
2287static struct perf_mmap_data *
2288perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002289{
2290 struct perf_mmap_data *data;
2291 unsigned long size;
2292 int i;
2293
2294 WARN_ON(atomic_read(&event->mmap_count));
2295
2296 size = sizeof(struct perf_mmap_data);
2297 size += nr_pages * sizeof(void *);
2298
2299 data = kzalloc(size, GFP_KERNEL);
2300 if (!data)
2301 goto fail;
2302
2303 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
2304 if (!data->user_page)
2305 goto fail_user_page;
2306
2307 for (i = 0; i < nr_pages; i++) {
2308 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
2309 if (!data->data_pages[i])
2310 goto fail_data_pages;
2311 }
2312
Peter Zijlstra906010b2009-09-21 16:08:49 +02002313 data->data_order = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002314 data->nr_pages = nr_pages;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002315
Peter Zijlstra906010b2009-09-21 16:08:49 +02002316 return data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002317
2318fail_data_pages:
2319 for (i--; i >= 0; i--)
2320 free_page((unsigned long)data->data_pages[i]);
2321
2322 free_page((unsigned long)data->user_page);
2323
2324fail_user_page:
2325 kfree(data);
2326
2327fail:
Peter Zijlstra906010b2009-09-21 16:08:49 +02002328 return NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002329}
2330
2331static void perf_mmap_free_page(unsigned long addr)
2332{
2333 struct page *page = virt_to_page((void *)addr);
2334
2335 page->mapping = NULL;
2336 __free_page(page);
2337}
2338
Peter Zijlstra906010b2009-09-21 16:08:49 +02002339static void perf_mmap_data_free(struct perf_mmap_data *data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002340{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002341 int i;
2342
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002343 perf_mmap_free_page((unsigned long)data->user_page);
2344 for (i = 0; i < data->nr_pages; i++)
2345 perf_mmap_free_page((unsigned long)data->data_pages[i]);
Kristian Høgsbergec70ccd2009-12-01 15:05:01 -05002346 kfree(data);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002347}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002348
Peter Zijlstra906010b2009-09-21 16:08:49 +02002349#else
2350
2351/*
2352 * Back perf_mmap() with vmalloc memory.
2353 *
2354 * Required for architectures that have d-cache aliasing issues.
2355 */
2356
2357static struct page *
2358perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2359{
2360 if (pgoff > (1UL << data->data_order))
2361 return NULL;
2362
2363 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2364}
2365
2366static void perf_mmap_unmark_page(void *addr)
2367{
2368 struct page *page = vmalloc_to_page(addr);
2369
2370 page->mapping = NULL;
2371}
2372
2373static void perf_mmap_data_free_work(struct work_struct *work)
2374{
2375 struct perf_mmap_data *data;
2376 void *base;
2377 int i, nr;
2378
2379 data = container_of(work, struct perf_mmap_data, work);
2380 nr = 1 << data->data_order;
2381
2382 base = data->user_page;
2383 for (i = 0; i < nr + 1; i++)
2384 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2385
2386 vfree(base);
Kristian Høgsbergec70ccd2009-12-01 15:05:01 -05002387 kfree(data);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002388}
2389
2390static void perf_mmap_data_free(struct perf_mmap_data *data)
2391{
2392 schedule_work(&data->work);
2393}
2394
2395static struct perf_mmap_data *
2396perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2397{
2398 struct perf_mmap_data *data;
2399 unsigned long size;
2400 void *all_buf;
2401
2402 WARN_ON(atomic_read(&event->mmap_count));
2403
2404 size = sizeof(struct perf_mmap_data);
2405 size += sizeof(void *);
2406
2407 data = kzalloc(size, GFP_KERNEL);
2408 if (!data)
2409 goto fail;
2410
2411 INIT_WORK(&data->work, perf_mmap_data_free_work);
2412
2413 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2414 if (!all_buf)
2415 goto fail_all_buf;
2416
2417 data->user_page = all_buf;
2418 data->data_pages[0] = all_buf + PAGE_SIZE;
2419 data->data_order = ilog2(nr_pages);
2420 data->nr_pages = 1;
2421
2422 return data;
2423
2424fail_all_buf:
2425 kfree(data);
2426
2427fail:
2428 return NULL;
2429}
2430
2431#endif
2432
2433static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2434{
2435 struct perf_event *event = vma->vm_file->private_data;
2436 struct perf_mmap_data *data;
2437 int ret = VM_FAULT_SIGBUS;
2438
2439 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2440 if (vmf->pgoff == 0)
2441 ret = 0;
2442 return ret;
2443 }
2444
2445 rcu_read_lock();
2446 data = rcu_dereference(event->data);
2447 if (!data)
2448 goto unlock;
2449
2450 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2451 goto unlock;
2452
2453 vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2454 if (!vmf->page)
2455 goto unlock;
2456
2457 get_page(vmf->page);
2458 vmf->page->mapping = vma->vm_file->f_mapping;
2459 vmf->page->index = vmf->pgoff;
2460
2461 ret = 0;
2462unlock:
2463 rcu_read_unlock();
2464
2465 return ret;
2466}
2467
2468static void
2469perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2470{
2471 long max_size = perf_data_size(data);
2472
2473 atomic_set(&data->lock, -1);
2474
2475 if (event->attr.watermark) {
2476 data->watermark = min_t(long, max_size,
2477 event->attr.wakeup_watermark);
2478 }
2479
2480 if (!data->watermark)
Stephane Eranian8904b182009-11-20 22:19:57 +01002481 data->watermark = max_size / 2;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002482
2483
2484 rcu_assign_pointer(event->data, data);
2485}
2486
2487static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2488{
2489 struct perf_mmap_data *data;
2490
2491 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2492 perf_mmap_data_free(data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002493}
2494
Peter Zijlstra906010b2009-09-21 16:08:49 +02002495static void perf_mmap_data_release(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002496{
2497 struct perf_mmap_data *data = event->data;
2498
2499 WARN_ON(atomic_read(&event->mmap_count));
2500
2501 rcu_assign_pointer(event->data, NULL);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002502 call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002503}
2504
2505static void perf_mmap_open(struct vm_area_struct *vma)
2506{
2507 struct perf_event *event = vma->vm_file->private_data;
2508
2509 atomic_inc(&event->mmap_count);
2510}
2511
2512static void perf_mmap_close(struct vm_area_struct *vma)
2513{
2514 struct perf_event *event = vma->vm_file->private_data;
2515
2516 WARN_ON_ONCE(event->ctx->parent_ctx);
2517 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
Peter Zijlstra906010b2009-09-21 16:08:49 +02002518 unsigned long size = perf_data_size(event->data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002519 struct user_struct *user = current_user();
2520
Peter Zijlstra906010b2009-09-21 16:08:49 +02002521 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002522 vma->vm_mm->locked_vm -= event->data->nr_locked;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002523 perf_mmap_data_release(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002524 mutex_unlock(&event->mmap_mutex);
2525 }
2526}
2527
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04002528static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002529 .open = perf_mmap_open,
2530 .close = perf_mmap_close,
2531 .fault = perf_mmap_fault,
2532 .page_mkwrite = perf_mmap_fault,
2533};
2534
2535static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2536{
2537 struct perf_event *event = file->private_data;
2538 unsigned long user_locked, user_lock_limit;
2539 struct user_struct *user = current_user();
2540 unsigned long locked, lock_limit;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002541 struct perf_mmap_data *data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002542 unsigned long vma_size;
2543 unsigned long nr_pages;
2544 long user_extra, extra;
2545 int ret = 0;
2546
2547 if (!(vma->vm_flags & VM_SHARED))
2548 return -EINVAL;
2549
2550 vma_size = vma->vm_end - vma->vm_start;
2551 nr_pages = (vma_size / PAGE_SIZE) - 1;
2552
2553 /*
2554 * If we have data pages ensure they're a power-of-two number, so we
2555 * can do bitmasks instead of modulo.
2556 */
2557 if (nr_pages != 0 && !is_power_of_2(nr_pages))
2558 return -EINVAL;
2559
2560 if (vma_size != PAGE_SIZE * (1 + nr_pages))
2561 return -EINVAL;
2562
2563 if (vma->vm_pgoff != 0)
2564 return -EINVAL;
2565
2566 WARN_ON_ONCE(event->ctx->parent_ctx);
2567 mutex_lock(&event->mmap_mutex);
2568 if (event->output) {
2569 ret = -EINVAL;
2570 goto unlock;
2571 }
2572
2573 if (atomic_inc_not_zero(&event->mmap_count)) {
2574 if (nr_pages != event->data->nr_pages)
2575 ret = -EINVAL;
2576 goto unlock;
2577 }
2578
2579 user_extra = nr_pages + 1;
2580 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2581
2582 /*
2583 * Increase the limit linearly with more CPUs:
2584 */
2585 user_lock_limit *= num_online_cpus();
2586
2587 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2588
2589 extra = 0;
2590 if (user_locked > user_lock_limit)
2591 extra = user_locked - user_lock_limit;
2592
Jiri Slaby78d7d402010-03-05 13:42:54 -08002593 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002594 lock_limit >>= PAGE_SHIFT;
2595 locked = vma->vm_mm->locked_vm + extra;
2596
2597 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2598 !capable(CAP_IPC_LOCK)) {
2599 ret = -EPERM;
2600 goto unlock;
2601 }
2602
2603 WARN_ON(event->data);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002604
2605 data = perf_mmap_data_alloc(event, nr_pages);
2606 ret = -ENOMEM;
2607 if (!data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002608 goto unlock;
2609
Peter Zijlstra906010b2009-09-21 16:08:49 +02002610 ret = 0;
2611 perf_mmap_data_init(event, data);
2612
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002613 atomic_set(&event->mmap_count, 1);
2614 atomic_long_add(user_extra, &user->locked_vm);
2615 vma->vm_mm->locked_vm += extra;
2616 event->data->nr_locked = extra;
2617 if (vma->vm_flags & VM_WRITE)
2618 event->data->writable = 1;
2619
2620unlock:
2621 mutex_unlock(&event->mmap_mutex);
2622
2623 vma->vm_flags |= VM_RESERVED;
2624 vma->vm_ops = &perf_mmap_vmops;
2625
2626 return ret;
2627}
2628
2629static int perf_fasync(int fd, struct file *filp, int on)
2630{
2631 struct inode *inode = filp->f_path.dentry->d_inode;
2632 struct perf_event *event = filp->private_data;
2633 int retval;
2634
2635 mutex_lock(&inode->i_mutex);
2636 retval = fasync_helper(fd, filp, on, &event->fasync);
2637 mutex_unlock(&inode->i_mutex);
2638
2639 if (retval < 0)
2640 return retval;
2641
2642 return 0;
2643}
2644
2645static const struct file_operations perf_fops = {
2646 .release = perf_release,
2647 .read = perf_read,
2648 .poll = perf_poll,
2649 .unlocked_ioctl = perf_ioctl,
2650 .compat_ioctl = perf_ioctl,
2651 .mmap = perf_mmap,
2652 .fasync = perf_fasync,
2653};
2654
2655/*
2656 * Perf event wakeup
2657 *
2658 * If there's data, ensure we set the poll() state and publish everything
2659 * to user-space before waking everybody up.
2660 */
2661
2662void perf_event_wakeup(struct perf_event *event)
2663{
2664 wake_up_all(&event->waitq);
2665
2666 if (event->pending_kill) {
2667 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2668 event->pending_kill = 0;
2669 }
2670}
2671
2672/*
2673 * Pending wakeups
2674 *
2675 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2676 *
2677 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2678 * single linked list and use cmpxchg() to add entries lockless.
2679 */
2680
2681static void perf_pending_event(struct perf_pending_entry *entry)
2682{
2683 struct perf_event *event = container_of(entry,
2684 struct perf_event, pending);
2685
2686 if (event->pending_disable) {
2687 event->pending_disable = 0;
2688 __perf_event_disable(event);
2689 }
2690
2691 if (event->pending_wakeup) {
2692 event->pending_wakeup = 0;
2693 perf_event_wakeup(event);
2694 }
2695}
2696
2697#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2698
2699static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2700 PENDING_TAIL,
2701};
2702
2703static void perf_pending_queue(struct perf_pending_entry *entry,
2704 void (*func)(struct perf_pending_entry *))
2705{
2706 struct perf_pending_entry **head;
2707
2708 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2709 return;
2710
2711 entry->func = func;
2712
2713 head = &get_cpu_var(perf_pending_head);
2714
2715 do {
2716 entry->next = *head;
2717 } while (cmpxchg(head, entry->next, entry) != entry->next);
2718
2719 set_perf_event_pending();
2720
2721 put_cpu_var(perf_pending_head);
2722}
2723
2724static int __perf_pending_run(void)
2725{
2726 struct perf_pending_entry *list;
2727 int nr = 0;
2728
2729 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2730 while (list != PENDING_TAIL) {
2731 void (*func)(struct perf_pending_entry *);
2732 struct perf_pending_entry *entry = list;
2733
2734 list = list->next;
2735
2736 func = entry->func;
2737 entry->next = NULL;
2738 /*
2739 * Ensure we observe the unqueue before we issue the wakeup,
2740 * so that we won't be waiting forever.
2741 * -- see perf_not_pending().
2742 */
2743 smp_wmb();
2744
2745 func(entry);
2746 nr++;
2747 }
2748
2749 return nr;
2750}
2751
2752static inline int perf_not_pending(struct perf_event *event)
2753{
2754 /*
2755 * If we flush on whatever cpu we run, there is a chance we don't
2756 * need to wait.
2757 */
2758 get_cpu();
2759 __perf_pending_run();
2760 put_cpu();
2761
2762 /*
2763 * Ensure we see the proper queue state before going to sleep
2764 * so that we do not miss the wakeup. -- see perf_pending_handle()
2765 */
2766 smp_rmb();
2767 return event->pending.next == NULL;
2768}
2769
2770static void perf_pending_sync(struct perf_event *event)
2771{
2772 wait_event(event->waitq, perf_not_pending(event));
2773}
2774
2775void perf_event_do_pending(void)
2776{
2777 __perf_pending_run();
2778}
2779
2780/*
2781 * Callchain support -- arch specific
2782 */
2783
2784__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2785{
2786 return NULL;
2787}
2788
Frederic Weisbeckerdcd5c162010-03-16 01:05:02 +01002789#ifdef CONFIG_EVENT_TRACING
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01002790__weak
2791void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2792{
2793}
Frederic Weisbeckerdcd5c162010-03-16 01:05:02 +01002794#endif
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01002795
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002796/*
2797 * Output
2798 */
2799static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2800 unsigned long offset, unsigned long head)
2801{
2802 unsigned long mask;
2803
2804 if (!data->writable)
2805 return true;
2806
Peter Zijlstra906010b2009-09-21 16:08:49 +02002807 mask = perf_data_size(data) - 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002808
2809 offset = (offset - tail) & mask;
2810 head = (head - tail) & mask;
2811
2812 if ((int)(head - offset) < 0)
2813 return false;
2814
2815 return true;
2816}
2817
2818static void perf_output_wakeup(struct perf_output_handle *handle)
2819{
2820 atomic_set(&handle->data->poll, POLL_IN);
2821
2822 if (handle->nmi) {
2823 handle->event->pending_wakeup = 1;
2824 perf_pending_queue(&handle->event->pending,
2825 perf_pending_event);
2826 } else
2827 perf_event_wakeup(handle->event);
2828}
2829
2830/*
2831 * Curious locking construct.
2832 *
2833 * We need to ensure a later event_id doesn't publish a head when a former
2834 * event_id isn't done writing. However since we need to deal with NMIs we
2835 * cannot fully serialize things.
2836 *
2837 * What we do is serialize between CPUs so we only have to deal with NMI
2838 * nesting on a single CPU.
2839 *
2840 * We only publish the head (and generate a wakeup) when the outer-most
2841 * event_id completes.
2842 */
2843static void perf_output_lock(struct perf_output_handle *handle)
2844{
2845 struct perf_mmap_data *data = handle->data;
Peter Zijlstra559fdc32009-11-16 12:45:14 +01002846 int cur, cpu = get_cpu();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002847
2848 handle->locked = 0;
2849
Peter Zijlstra559fdc32009-11-16 12:45:14 +01002850 for (;;) {
2851 cur = atomic_cmpxchg(&data->lock, -1, cpu);
2852 if (cur == -1) {
2853 handle->locked = 1;
2854 break;
2855 }
2856 if (cur == cpu)
2857 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002858
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002859 cpu_relax();
Peter Zijlstra559fdc32009-11-16 12:45:14 +01002860 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002861}
2862
2863static void perf_output_unlock(struct perf_output_handle *handle)
2864{
2865 struct perf_mmap_data *data = handle->data;
2866 unsigned long head;
2867 int cpu;
2868
2869 data->done_head = data->head;
2870
2871 if (!handle->locked)
2872 goto out;
2873
2874again:
2875 /*
2876 * The xchg implies a full barrier that ensures all writes are done
2877 * before we publish the new head, matched by a rmb() in userspace when
2878 * reading this position.
2879 */
2880 while ((head = atomic_long_xchg(&data->done_head, 0)))
2881 data->user_page->data_head = head;
2882
2883 /*
2884 * NMI can happen here, which means we can miss a done_head update.
2885 */
2886
2887 cpu = atomic_xchg(&data->lock, -1);
2888 WARN_ON_ONCE(cpu != smp_processor_id());
2889
2890 /*
2891 * Therefore we have to validate we did not indeed do so.
2892 */
2893 if (unlikely(atomic_long_read(&data->done_head))) {
2894 /*
2895 * Since we had it locked, we can lock it again.
2896 */
2897 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2898 cpu_relax();
2899
2900 goto again;
2901 }
2902
2903 if (atomic_xchg(&data->wakeup, 0))
2904 perf_output_wakeup(handle);
2905out:
Peter Zijlstra559fdc32009-11-16 12:45:14 +01002906 put_cpu();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002907}
2908
2909void perf_output_copy(struct perf_output_handle *handle,
2910 const void *buf, unsigned int len)
2911{
2912 unsigned int pages_mask;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002913 unsigned long offset;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002914 unsigned int size;
2915 void **pages;
2916
2917 offset = handle->offset;
2918 pages_mask = handle->data->nr_pages - 1;
2919 pages = handle->data->data_pages;
2920
2921 do {
Peter Zijlstra906010b2009-09-21 16:08:49 +02002922 unsigned long page_offset;
2923 unsigned long page_size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002924 int nr;
2925
2926 nr = (offset >> PAGE_SHIFT) & pages_mask;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002927 page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
2928 page_offset = offset & (page_size - 1);
2929 size = min_t(unsigned int, page_size - page_offset, len);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002930
2931 memcpy(pages[nr] + page_offset, buf, size);
2932
2933 len -= size;
2934 buf += size;
2935 offset += size;
2936 } while (len);
2937
2938 handle->offset = offset;
2939
2940 /*
2941 * Check we didn't copy past our reservation window, taking the
2942 * possible unsigned int wrap into account.
2943 */
2944 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2945}
2946
2947int perf_output_begin(struct perf_output_handle *handle,
2948 struct perf_event *event, unsigned int size,
2949 int nmi, int sample)
2950{
2951 struct perf_event *output_event;
2952 struct perf_mmap_data *data;
2953 unsigned long tail, offset, head;
2954 int have_lost;
2955 struct {
2956 struct perf_event_header header;
2957 u64 id;
2958 u64 lost;
2959 } lost_event;
2960
2961 rcu_read_lock();
2962 /*
2963 * For inherited events we send all the output towards the parent.
2964 */
2965 if (event->parent)
2966 event = event->parent;
2967
2968 output_event = rcu_dereference(event->output);
2969 if (output_event)
2970 event = output_event;
2971
2972 data = rcu_dereference(event->data);
2973 if (!data)
2974 goto out;
2975
2976 handle->data = data;
2977 handle->event = event;
2978 handle->nmi = nmi;
2979 handle->sample = sample;
2980
2981 if (!data->nr_pages)
2982 goto fail;
2983
2984 have_lost = atomic_read(&data->lost);
2985 if (have_lost)
2986 size += sizeof(lost_event);
2987
2988 perf_output_lock(handle);
2989
2990 do {
2991 /*
2992 * Userspace could choose to issue a mb() before updating the
2993 * tail pointer. So that all reads will be completed before the
2994 * write is issued.
2995 */
2996 tail = ACCESS_ONCE(data->user_page->data_tail);
2997 smp_rmb();
2998 offset = head = atomic_long_read(&data->head);
2999 head += size;
3000 if (unlikely(!perf_output_space(data, tail, offset, head)))
3001 goto fail;
3002 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
3003
3004 handle->offset = offset;
3005 handle->head = head;
3006
3007 if (head - tail > data->watermark)
3008 atomic_set(&data->wakeup, 1);
3009
3010 if (have_lost) {
3011 lost_event.header.type = PERF_RECORD_LOST;
3012 lost_event.header.misc = 0;
3013 lost_event.header.size = sizeof(lost_event);
3014 lost_event.id = event->id;
3015 lost_event.lost = atomic_xchg(&data->lost, 0);
3016
3017 perf_output_put(handle, lost_event);
3018 }
3019
3020 return 0;
3021
3022fail:
3023 atomic_inc(&data->lost);
3024 perf_output_unlock(handle);
3025out:
3026 rcu_read_unlock();
3027
3028 return -ENOSPC;
3029}
3030
3031void perf_output_end(struct perf_output_handle *handle)
3032{
3033 struct perf_event *event = handle->event;
3034 struct perf_mmap_data *data = handle->data;
3035
3036 int wakeup_events = event->attr.wakeup_events;
3037
3038 if (handle->sample && wakeup_events) {
3039 int events = atomic_inc_return(&data->events);
3040 if (events >= wakeup_events) {
3041 atomic_sub(wakeup_events, &data->events);
3042 atomic_set(&data->wakeup, 1);
3043 }
3044 }
3045
3046 perf_output_unlock(handle);
3047 rcu_read_unlock();
3048}
3049
3050static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3051{
3052 /*
3053 * only top level events have the pid namespace they were created in
3054 */
3055 if (event->parent)
3056 event = event->parent;
3057
3058 return task_tgid_nr_ns(p, event->ns);
3059}
3060
3061static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3062{
3063 /*
3064 * only top level events have the pid namespace they were created in
3065 */
3066 if (event->parent)
3067 event = event->parent;
3068
3069 return task_pid_nr_ns(p, event->ns);
3070}
3071
3072static void perf_output_read_one(struct perf_output_handle *handle,
3073 struct perf_event *event)
3074{
3075 u64 read_format = event->attr.read_format;
3076 u64 values[4];
3077 int n = 0;
3078
3079 values[n++] = atomic64_read(&event->count);
3080 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3081 values[n++] = event->total_time_enabled +
3082 atomic64_read(&event->child_total_time_enabled);
3083 }
3084 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3085 values[n++] = event->total_time_running +
3086 atomic64_read(&event->child_total_time_running);
3087 }
3088 if (read_format & PERF_FORMAT_ID)
3089 values[n++] = primary_event_id(event);
3090
3091 perf_output_copy(handle, values, n * sizeof(u64));
3092}
3093
3094/*
3095 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3096 */
3097static void perf_output_read_group(struct perf_output_handle *handle,
3098 struct perf_event *event)
3099{
3100 struct perf_event *leader = event->group_leader, *sub;
3101 u64 read_format = event->attr.read_format;
3102 u64 values[5];
3103 int n = 0;
3104
3105 values[n++] = 1 + leader->nr_siblings;
3106
3107 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3108 values[n++] = leader->total_time_enabled;
3109
3110 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3111 values[n++] = leader->total_time_running;
3112
3113 if (leader != event)
3114 leader->pmu->read(leader);
3115
3116 values[n++] = atomic64_read(&leader->count);
3117 if (read_format & PERF_FORMAT_ID)
3118 values[n++] = primary_event_id(leader);
3119
3120 perf_output_copy(handle, values, n * sizeof(u64));
3121
3122 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3123 n = 0;
3124
3125 if (sub != event)
3126 sub->pmu->read(sub);
3127
3128 values[n++] = atomic64_read(&sub->count);
3129 if (read_format & PERF_FORMAT_ID)
3130 values[n++] = primary_event_id(sub);
3131
3132 perf_output_copy(handle, values, n * sizeof(u64));
3133 }
3134}
3135
3136static void perf_output_read(struct perf_output_handle *handle,
3137 struct perf_event *event)
3138{
3139 if (event->attr.read_format & PERF_FORMAT_GROUP)
3140 perf_output_read_group(handle, event);
3141 else
3142 perf_output_read_one(handle, event);
3143}
3144
3145void perf_output_sample(struct perf_output_handle *handle,
3146 struct perf_event_header *header,
3147 struct perf_sample_data *data,
3148 struct perf_event *event)
3149{
3150 u64 sample_type = data->type;
3151
3152 perf_output_put(handle, *header);
3153
3154 if (sample_type & PERF_SAMPLE_IP)
3155 perf_output_put(handle, data->ip);
3156
3157 if (sample_type & PERF_SAMPLE_TID)
3158 perf_output_put(handle, data->tid_entry);
3159
3160 if (sample_type & PERF_SAMPLE_TIME)
3161 perf_output_put(handle, data->time);
3162
3163 if (sample_type & PERF_SAMPLE_ADDR)
3164 perf_output_put(handle, data->addr);
3165
3166 if (sample_type & PERF_SAMPLE_ID)
3167 perf_output_put(handle, data->id);
3168
3169 if (sample_type & PERF_SAMPLE_STREAM_ID)
3170 perf_output_put(handle, data->stream_id);
3171
3172 if (sample_type & PERF_SAMPLE_CPU)
3173 perf_output_put(handle, data->cpu_entry);
3174
3175 if (sample_type & PERF_SAMPLE_PERIOD)
3176 perf_output_put(handle, data->period);
3177
3178 if (sample_type & PERF_SAMPLE_READ)
3179 perf_output_read(handle, event);
3180
3181 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3182 if (data->callchain) {
3183 int size = 1;
3184
3185 if (data->callchain)
3186 size += data->callchain->nr;
3187
3188 size *= sizeof(u64);
3189
3190 perf_output_copy(handle, data->callchain, size);
3191 } else {
3192 u64 nr = 0;
3193 perf_output_put(handle, nr);
3194 }
3195 }
3196
3197 if (sample_type & PERF_SAMPLE_RAW) {
3198 if (data->raw) {
3199 perf_output_put(handle, data->raw->size);
3200 perf_output_copy(handle, data->raw->data,
3201 data->raw->size);
3202 } else {
3203 struct {
3204 u32 size;
3205 u32 data;
3206 } raw = {
3207 .size = sizeof(u32),
3208 .data = 0,
3209 };
3210 perf_output_put(handle, raw);
3211 }
3212 }
3213}
3214
3215void perf_prepare_sample(struct perf_event_header *header,
3216 struct perf_sample_data *data,
3217 struct perf_event *event,
3218 struct pt_regs *regs)
3219{
3220 u64 sample_type = event->attr.sample_type;
3221
3222 data->type = sample_type;
3223
3224 header->type = PERF_RECORD_SAMPLE;
3225 header->size = sizeof(*header);
3226
3227 header->misc = 0;
3228 header->misc |= perf_misc_flags(regs);
3229
3230 if (sample_type & PERF_SAMPLE_IP) {
3231 data->ip = perf_instruction_pointer(regs);
3232
3233 header->size += sizeof(data->ip);
3234 }
3235
3236 if (sample_type & PERF_SAMPLE_TID) {
3237 /* namespace issues */
3238 data->tid_entry.pid = perf_event_pid(event, current);
3239 data->tid_entry.tid = perf_event_tid(event, current);
3240
3241 header->size += sizeof(data->tid_entry);
3242 }
3243
3244 if (sample_type & PERF_SAMPLE_TIME) {
3245 data->time = perf_clock();
3246
3247 header->size += sizeof(data->time);
3248 }
3249
3250 if (sample_type & PERF_SAMPLE_ADDR)
3251 header->size += sizeof(data->addr);
3252
3253 if (sample_type & PERF_SAMPLE_ID) {
3254 data->id = primary_event_id(event);
3255
3256 header->size += sizeof(data->id);
3257 }
3258
3259 if (sample_type & PERF_SAMPLE_STREAM_ID) {
3260 data->stream_id = event->id;
3261
3262 header->size += sizeof(data->stream_id);
3263 }
3264
3265 if (sample_type & PERF_SAMPLE_CPU) {
3266 data->cpu_entry.cpu = raw_smp_processor_id();
3267 data->cpu_entry.reserved = 0;
3268
3269 header->size += sizeof(data->cpu_entry);
3270 }
3271
3272 if (sample_type & PERF_SAMPLE_PERIOD)
3273 header->size += sizeof(data->period);
3274
3275 if (sample_type & PERF_SAMPLE_READ)
3276 header->size += perf_event_read_size(event);
3277
3278 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3279 int size = 1;
3280
3281 data->callchain = perf_callchain(regs);
3282
3283 if (data->callchain)
3284 size += data->callchain->nr;
3285
3286 header->size += size * sizeof(u64);
3287 }
3288
3289 if (sample_type & PERF_SAMPLE_RAW) {
3290 int size = sizeof(u32);
3291
3292 if (data->raw)
3293 size += data->raw->size;
3294 else
3295 size += sizeof(u32);
3296
3297 WARN_ON_ONCE(size & (sizeof(u64)-1));
3298 header->size += size;
3299 }
3300}
3301
3302static void perf_event_output(struct perf_event *event, int nmi,
3303 struct perf_sample_data *data,
3304 struct pt_regs *regs)
3305{
3306 struct perf_output_handle handle;
3307 struct perf_event_header header;
3308
3309 perf_prepare_sample(&header, data, event, regs);
3310
3311 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3312 return;
3313
3314 perf_output_sample(&handle, &header, data, event);
3315
3316 perf_output_end(&handle);
3317}
3318
3319/*
3320 * read event_id
3321 */
3322
3323struct perf_read_event {
3324 struct perf_event_header header;
3325
3326 u32 pid;
3327 u32 tid;
3328};
3329
3330static void
3331perf_event_read_event(struct perf_event *event,
3332 struct task_struct *task)
3333{
3334 struct perf_output_handle handle;
3335 struct perf_read_event read_event = {
3336 .header = {
3337 .type = PERF_RECORD_READ,
3338 .misc = 0,
3339 .size = sizeof(read_event) + perf_event_read_size(event),
3340 },
3341 .pid = perf_event_pid(event, task),
3342 .tid = perf_event_tid(event, task),
3343 };
3344 int ret;
3345
3346 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3347 if (ret)
3348 return;
3349
3350 perf_output_put(&handle, read_event);
3351 perf_output_read(&handle, event);
3352
3353 perf_output_end(&handle);
3354}
3355
3356/*
3357 * task tracking -- fork/exit
3358 *
3359 * enabled by: attr.comm | attr.mmap | attr.task
3360 */
3361
3362struct perf_task_event {
3363 struct task_struct *task;
3364 struct perf_event_context *task_ctx;
3365
3366 struct {
3367 struct perf_event_header header;
3368
3369 u32 pid;
3370 u32 ppid;
3371 u32 tid;
3372 u32 ptid;
3373 u64 time;
3374 } event_id;
3375};
3376
3377static void perf_event_task_output(struct perf_event *event,
3378 struct perf_task_event *task_event)
3379{
3380 struct perf_output_handle handle;
3381 int size;
3382 struct task_struct *task = task_event->task;
3383 int ret;
3384
3385 size = task_event->event_id.header.size;
3386 ret = perf_output_begin(&handle, event, size, 0, 0);
3387
3388 if (ret)
3389 return;
3390
3391 task_event->event_id.pid = perf_event_pid(event, task);
3392 task_event->event_id.ppid = perf_event_pid(event, current);
3393
3394 task_event->event_id.tid = perf_event_tid(event, task);
3395 task_event->event_id.ptid = perf_event_tid(event, current);
3396
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003397 perf_output_put(&handle, task_event->event_id);
3398
3399 perf_output_end(&handle);
3400}
3401
3402static int perf_event_task_match(struct perf_event *event)
3403{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003404 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003405 return 0;
3406
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003407 if (event->cpu != -1 && event->cpu != smp_processor_id())
3408 return 0;
3409
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003410 if (event->attr.comm || event->attr.mmap || event->attr.task)
3411 return 1;
3412
3413 return 0;
3414}
3415
3416static void perf_event_task_ctx(struct perf_event_context *ctx,
3417 struct perf_task_event *task_event)
3418{
3419 struct perf_event *event;
3420
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003421 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3422 if (perf_event_task_match(event))
3423 perf_event_task_output(event, task_event);
3424 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003425}
3426
3427static void perf_event_task_event(struct perf_task_event *task_event)
3428{
3429 struct perf_cpu_context *cpuctx;
3430 struct perf_event_context *ctx = task_event->task_ctx;
3431
Peter Zijlstrad6ff86c2009-11-20 22:19:46 +01003432 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003433 cpuctx = &get_cpu_var(perf_cpu_context);
3434 perf_event_task_ctx(&cpuctx->ctx, task_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003435 if (!ctx)
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003436 ctx = rcu_dereference(current->perf_event_ctxp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003437 if (ctx)
3438 perf_event_task_ctx(ctx, task_event);
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003439 put_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003440 rcu_read_unlock();
3441}
3442
3443static void perf_event_task(struct task_struct *task,
3444 struct perf_event_context *task_ctx,
3445 int new)
3446{
3447 struct perf_task_event task_event;
3448
3449 if (!atomic_read(&nr_comm_events) &&
3450 !atomic_read(&nr_mmap_events) &&
3451 !atomic_read(&nr_task_events))
3452 return;
3453
3454 task_event = (struct perf_task_event){
3455 .task = task,
3456 .task_ctx = task_ctx,
3457 .event_id = {
3458 .header = {
3459 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3460 .misc = 0,
3461 .size = sizeof(task_event.event_id),
3462 },
3463 /* .pid */
3464 /* .ppid */
3465 /* .tid */
3466 /* .ptid */
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003467 .time = perf_clock(),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003468 },
3469 };
3470
3471 perf_event_task_event(&task_event);
3472}
3473
3474void perf_event_fork(struct task_struct *task)
3475{
3476 perf_event_task(task, NULL, 1);
3477}
3478
3479/*
3480 * comm tracking
3481 */
3482
3483struct perf_comm_event {
3484 struct task_struct *task;
3485 char *comm;
3486 int comm_size;
3487
3488 struct {
3489 struct perf_event_header header;
3490
3491 u32 pid;
3492 u32 tid;
3493 } event_id;
3494};
3495
3496static void perf_event_comm_output(struct perf_event *event,
3497 struct perf_comm_event *comm_event)
3498{
3499 struct perf_output_handle handle;
3500 int size = comm_event->event_id.header.size;
3501 int ret = perf_output_begin(&handle, event, size, 0, 0);
3502
3503 if (ret)
3504 return;
3505
3506 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3507 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3508
3509 perf_output_put(&handle, comm_event->event_id);
3510 perf_output_copy(&handle, comm_event->comm,
3511 comm_event->comm_size);
3512 perf_output_end(&handle);
3513}
3514
3515static int perf_event_comm_match(struct perf_event *event)
3516{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003517 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003518 return 0;
3519
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003520 if (event->cpu != -1 && event->cpu != smp_processor_id())
3521 return 0;
3522
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003523 if (event->attr.comm)
3524 return 1;
3525
3526 return 0;
3527}
3528
3529static void perf_event_comm_ctx(struct perf_event_context *ctx,
3530 struct perf_comm_event *comm_event)
3531{
3532 struct perf_event *event;
3533
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003534 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3535 if (perf_event_comm_match(event))
3536 perf_event_comm_output(event, comm_event);
3537 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003538}
3539
3540static void perf_event_comm_event(struct perf_comm_event *comm_event)
3541{
3542 struct perf_cpu_context *cpuctx;
3543 struct perf_event_context *ctx;
3544 unsigned int size;
3545 char comm[TASK_COMM_LEN];
3546
3547 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01003548 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003549 size = ALIGN(strlen(comm)+1, sizeof(u64));
3550
3551 comm_event->comm = comm;
3552 comm_event->comm_size = size;
3553
3554 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3555
Peter Zijlstraf6595f32009-11-20 22:19:47 +01003556 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003557 cpuctx = &get_cpu_var(perf_cpu_context);
3558 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003559 ctx = rcu_dereference(current->perf_event_ctxp);
3560 if (ctx)
3561 perf_event_comm_ctx(ctx, comm_event);
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003562 put_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003563 rcu_read_unlock();
3564}
3565
3566void perf_event_comm(struct task_struct *task)
3567{
3568 struct perf_comm_event comm_event;
3569
3570 if (task->perf_event_ctxp)
3571 perf_event_enable_on_exec(task);
3572
3573 if (!atomic_read(&nr_comm_events))
3574 return;
3575
3576 comm_event = (struct perf_comm_event){
3577 .task = task,
3578 /* .comm */
3579 /* .comm_size */
3580 .event_id = {
3581 .header = {
3582 .type = PERF_RECORD_COMM,
3583 .misc = 0,
3584 /* .size */
3585 },
3586 /* .pid */
3587 /* .tid */
3588 },
3589 };
3590
3591 perf_event_comm_event(&comm_event);
3592}
3593
3594/*
3595 * mmap tracking
3596 */
3597
3598struct perf_mmap_event {
3599 struct vm_area_struct *vma;
3600
3601 const char *file_name;
3602 int file_size;
3603
3604 struct {
3605 struct perf_event_header header;
3606
3607 u32 pid;
3608 u32 tid;
3609 u64 start;
3610 u64 len;
3611 u64 pgoff;
3612 } event_id;
3613};
3614
3615static void perf_event_mmap_output(struct perf_event *event,
3616 struct perf_mmap_event *mmap_event)
3617{
3618 struct perf_output_handle handle;
3619 int size = mmap_event->event_id.header.size;
3620 int ret = perf_output_begin(&handle, event, size, 0, 0);
3621
3622 if (ret)
3623 return;
3624
3625 mmap_event->event_id.pid = perf_event_pid(event, current);
3626 mmap_event->event_id.tid = perf_event_tid(event, current);
3627
3628 perf_output_put(&handle, mmap_event->event_id);
3629 perf_output_copy(&handle, mmap_event->file_name,
3630 mmap_event->file_size);
3631 perf_output_end(&handle);
3632}
3633
3634static int perf_event_mmap_match(struct perf_event *event,
3635 struct perf_mmap_event *mmap_event)
3636{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003637 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003638 return 0;
3639
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003640 if (event->cpu != -1 && event->cpu != smp_processor_id())
3641 return 0;
3642
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003643 if (event->attr.mmap)
3644 return 1;
3645
3646 return 0;
3647}
3648
3649static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3650 struct perf_mmap_event *mmap_event)
3651{
3652 struct perf_event *event;
3653
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003654 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3655 if (perf_event_mmap_match(event, mmap_event))
3656 perf_event_mmap_output(event, mmap_event);
3657 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003658}
3659
3660static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3661{
3662 struct perf_cpu_context *cpuctx;
3663 struct perf_event_context *ctx;
3664 struct vm_area_struct *vma = mmap_event->vma;
3665 struct file *file = vma->vm_file;
3666 unsigned int size;
3667 char tmp[16];
3668 char *buf = NULL;
3669 const char *name;
3670
3671 memset(tmp, 0, sizeof(tmp));
3672
3673 if (file) {
3674 /*
3675 * d_path works from the end of the buffer backwards, so we
3676 * need to add enough zero bytes after the string to handle
3677 * the 64bit alignment we do later.
3678 */
3679 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3680 if (!buf) {
3681 name = strncpy(tmp, "//enomem", sizeof(tmp));
3682 goto got_name;
3683 }
3684 name = d_path(&file->f_path, buf, PATH_MAX);
3685 if (IS_ERR(name)) {
3686 name = strncpy(tmp, "//toolong", sizeof(tmp));
3687 goto got_name;
3688 }
3689 } else {
3690 if (arch_vma_name(mmap_event->vma)) {
3691 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3692 sizeof(tmp));
3693 goto got_name;
3694 }
3695
3696 if (!vma->vm_mm) {
3697 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3698 goto got_name;
3699 }
3700
3701 name = strncpy(tmp, "//anon", sizeof(tmp));
3702 goto got_name;
3703 }
3704
3705got_name:
3706 size = ALIGN(strlen(name)+1, sizeof(u64));
3707
3708 mmap_event->file_name = name;
3709 mmap_event->file_size = size;
3710
3711 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3712
Peter Zijlstraf6d9dd22009-11-20 22:19:48 +01003713 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003714 cpuctx = &get_cpu_var(perf_cpu_context);
3715 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003716 ctx = rcu_dereference(current->perf_event_ctxp);
3717 if (ctx)
3718 perf_event_mmap_ctx(ctx, mmap_event);
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003719 put_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003720 rcu_read_unlock();
3721
3722 kfree(buf);
3723}
3724
3725void __perf_event_mmap(struct vm_area_struct *vma)
3726{
3727 struct perf_mmap_event mmap_event;
3728
3729 if (!atomic_read(&nr_mmap_events))
3730 return;
3731
3732 mmap_event = (struct perf_mmap_event){
3733 .vma = vma,
3734 /* .file_name */
3735 /* .file_size */
3736 .event_id = {
3737 .header = {
3738 .type = PERF_RECORD_MMAP,
3739 .misc = 0,
3740 /* .size */
3741 },
3742 /* .pid */
3743 /* .tid */
3744 .start = vma->vm_start,
3745 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01003746 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003747 },
3748 };
3749
3750 perf_event_mmap_event(&mmap_event);
3751}
3752
3753/*
3754 * IRQ throttle logging
3755 */
3756
3757static void perf_log_throttle(struct perf_event *event, int enable)
3758{
3759 struct perf_output_handle handle;
3760 int ret;
3761
3762 struct {
3763 struct perf_event_header header;
3764 u64 time;
3765 u64 id;
3766 u64 stream_id;
3767 } throttle_event = {
3768 .header = {
3769 .type = PERF_RECORD_THROTTLE,
3770 .misc = 0,
3771 .size = sizeof(throttle_event),
3772 },
3773 .time = perf_clock(),
3774 .id = primary_event_id(event),
3775 .stream_id = event->id,
3776 };
3777
3778 if (enable)
3779 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3780
3781 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3782 if (ret)
3783 return;
3784
3785 perf_output_put(&handle, throttle_event);
3786 perf_output_end(&handle);
3787}
3788
3789/*
3790 * Generic event overflow handling, sampling.
3791 */
3792
3793static int __perf_event_overflow(struct perf_event *event, int nmi,
3794 int throttle, struct perf_sample_data *data,
3795 struct pt_regs *regs)
3796{
3797 int events = atomic_read(&event->event_limit);
3798 struct hw_perf_event *hwc = &event->hw;
3799 int ret = 0;
3800
3801 throttle = (throttle && event->pmu->unthrottle != NULL);
3802
3803 if (!throttle) {
3804 hwc->interrupts++;
3805 } else {
3806 if (hwc->interrupts != MAX_INTERRUPTS) {
3807 hwc->interrupts++;
3808 if (HZ * hwc->interrupts >
3809 (u64)sysctl_perf_event_sample_rate) {
3810 hwc->interrupts = MAX_INTERRUPTS;
3811 perf_log_throttle(event, 0);
3812 ret = 1;
3813 }
3814 } else {
3815 /*
3816 * Keep re-disabling events even though on the previous
3817 * pass we disabled it - just in case we raced with a
3818 * sched-in and the event got enabled again:
3819 */
3820 ret = 1;
3821 }
3822 }
3823
3824 if (event->attr.freq) {
3825 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01003826 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003827
Peter Zijlstraabd50712010-01-26 18:50:16 +01003828 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003829
Peter Zijlstraabd50712010-01-26 18:50:16 +01003830 if (delta > 0 && delta < 2*TICK_NSEC)
3831 perf_adjust_period(event, delta, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003832 }
3833
3834 /*
3835 * XXX event_limit might not quite work as expected on inherited
3836 * events
3837 */
3838
3839 event->pending_kill = POLL_IN;
3840 if (events && atomic_dec_and_test(&event->event_limit)) {
3841 ret = 1;
3842 event->pending_kill = POLL_HUP;
3843 if (nmi) {
3844 event->pending_disable = 1;
3845 perf_pending_queue(&event->pending,
3846 perf_pending_event);
3847 } else
3848 perf_event_disable(event);
3849 }
3850
Peter Zijlstra453f19e2009-11-20 22:19:43 +01003851 if (event->overflow_handler)
3852 event->overflow_handler(event, nmi, data, regs);
3853 else
3854 perf_event_output(event, nmi, data, regs);
3855
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003856 return ret;
3857}
3858
3859int perf_event_overflow(struct perf_event *event, int nmi,
3860 struct perf_sample_data *data,
3861 struct pt_regs *regs)
3862{
3863 return __perf_event_overflow(event, nmi, 1, data, regs);
3864}
3865
3866/*
3867 * Generic software event infrastructure
3868 */
3869
3870/*
3871 * We directly increment event->count and keep a second value in
3872 * event->hw.period_left to count intervals. This period event
3873 * is kept in the range [-sample_period, 0] so that we can use the
3874 * sign as trigger.
3875 */
3876
3877static u64 perf_swevent_set_period(struct perf_event *event)
3878{
3879 struct hw_perf_event *hwc = &event->hw;
3880 u64 period = hwc->last_period;
3881 u64 nr, offset;
3882 s64 old, val;
3883
3884 hwc->last_period = hwc->sample_period;
3885
3886again:
3887 old = val = atomic64_read(&hwc->period_left);
3888 if (val < 0)
3889 return 0;
3890
3891 nr = div64_u64(period + val, period);
3892 offset = nr * period;
3893 val -= offset;
3894 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3895 goto again;
3896
3897 return nr;
3898}
3899
Peter Zijlstra0cff7842009-11-20 22:19:44 +01003900static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003901 int nmi, struct perf_sample_data *data,
3902 struct pt_regs *regs)
3903{
3904 struct hw_perf_event *hwc = &event->hw;
3905 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003906
3907 data->period = event->hw.last_period;
Peter Zijlstra0cff7842009-11-20 22:19:44 +01003908 if (!overflow)
3909 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003910
3911 if (hwc->interrupts == MAX_INTERRUPTS)
3912 return;
3913
3914 for (; overflow; overflow--) {
3915 if (__perf_event_overflow(event, nmi, throttle,
3916 data, regs)) {
3917 /*
3918 * We inhibit the overflow from happening when
3919 * hwc->interrupts == MAX_INTERRUPTS.
3920 */
3921 break;
3922 }
3923 throttle = 1;
3924 }
3925}
3926
3927static void perf_swevent_unthrottle(struct perf_event *event)
3928{
3929 /*
3930 * Nothing to do, we already reset hwc->interrupts.
3931 */
3932}
3933
3934static void perf_swevent_add(struct perf_event *event, u64 nr,
3935 int nmi, struct perf_sample_data *data,
3936 struct pt_regs *regs)
3937{
3938 struct hw_perf_event *hwc = &event->hw;
3939
3940 atomic64_add(nr, &event->count);
3941
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003942 if (!regs)
3943 return;
3944
Peter Zijlstra0cff7842009-11-20 22:19:44 +01003945 if (!hwc->sample_period)
3946 return;
3947
3948 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
3949 return perf_swevent_overflow(event, 1, nmi, data, regs);
3950
3951 if (atomic64_add_negative(nr, &hwc->period_left))
3952 return;
3953
3954 perf_swevent_overflow(event, 0, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003955}
3956
3957static int perf_swevent_is_counting(struct perf_event *event)
3958{
3959 /*
3960 * The event is active, we're good!
3961 */
3962 if (event->state == PERF_EVENT_STATE_ACTIVE)
3963 return 1;
3964
3965 /*
3966 * The event is off/error, not counting.
3967 */
3968 if (event->state != PERF_EVENT_STATE_INACTIVE)
3969 return 0;
3970
3971 /*
3972 * The event is inactive, if the context is active
3973 * we're part of a group that didn't make it on the 'pmu',
3974 * not counting.
3975 */
3976 if (event->ctx->is_active)
3977 return 0;
3978
3979 /*
3980 * We're inactive and the context is too, this means the
3981 * task is scheduled out, we're counting events that happen
3982 * to us, like migration events.
3983 */
3984 return 1;
3985}
3986
Li Zefan6fb29152009-10-15 11:21:42 +08003987static int perf_tp_event_match(struct perf_event *event,
3988 struct perf_sample_data *data);
3989
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01003990static int perf_exclude_event(struct perf_event *event,
3991 struct pt_regs *regs)
3992{
3993 if (regs) {
3994 if (event->attr.exclude_user && user_mode(regs))
3995 return 1;
3996
3997 if (event->attr.exclude_kernel && !user_mode(regs))
3998 return 1;
3999 }
4000
4001 return 0;
4002}
4003
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004004static int perf_swevent_match(struct perf_event *event,
4005 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08004006 u32 event_id,
4007 struct perf_sample_data *data,
4008 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004009{
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004010 if (event->cpu != -1 && event->cpu != smp_processor_id())
4011 return 0;
4012
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004013 if (!perf_swevent_is_counting(event))
4014 return 0;
4015
4016 if (event->attr.type != type)
4017 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004018
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004019 if (event->attr.config != event_id)
4020 return 0;
4021
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004022 if (perf_exclude_event(event, regs))
4023 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004024
Li Zefan6fb29152009-10-15 11:21:42 +08004025 if (event->attr.type == PERF_TYPE_TRACEPOINT &&
4026 !perf_tp_event_match(event, data))
4027 return 0;
4028
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004029 return 1;
4030}
4031
4032static void perf_swevent_ctx_event(struct perf_event_context *ctx,
4033 enum perf_type_id type,
4034 u32 event_id, u64 nr, int nmi,
4035 struct perf_sample_data *data,
4036 struct pt_regs *regs)
4037{
4038 struct perf_event *event;
4039
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004040 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08004041 if (perf_swevent_match(event, type, event_id, data, regs))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004042 perf_swevent_add(event, nr, nmi, data, regs);
4043 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004044}
4045
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004046int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004047{
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004048 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
4049 int rctx;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004050
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004051 if (in_nmi())
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004052 rctx = 3;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004053 else if (in_irq())
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004054 rctx = 2;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004055 else if (in_softirq())
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004056 rctx = 1;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004057 else
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004058 rctx = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004059
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004060 if (cpuctx->recursion[rctx]) {
4061 put_cpu_var(perf_cpu_context);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004062 return -1;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004063 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004064
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004065 cpuctx->recursion[rctx]++;
4066 barrier();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004067
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004068 return rctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004069}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004070EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004071
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004072void perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004073{
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004074 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4075 barrier();
Frederic Weisbeckerfe612672009-11-24 20:38:22 +01004076 cpuctx->recursion[rctx]--;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004077 put_cpu_var(perf_cpu_context);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004078}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004079EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004080
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004081static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4082 u64 nr, int nmi,
4083 struct perf_sample_data *data,
4084 struct pt_regs *regs)
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004085{
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004086 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004087 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004088
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004089 cpuctx = &__get_cpu_var(perf_cpu_context);
Peter Zijlstra81520182009-11-20 22:19:45 +01004090 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004091 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
4092 nr, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004093 /*
4094 * doesn't really matter which of the child contexts the
4095 * events ends up in.
4096 */
4097 ctx = rcu_dereference(current->perf_event_ctxp);
4098 if (ctx)
4099 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
4100 rcu_read_unlock();
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004101}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004102
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004103void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4104 struct pt_regs *regs, u64 addr)
4105{
Ingo Molnara4234bf2009-11-23 10:57:59 +01004106 struct perf_sample_data data;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004107 int rctx;
4108
4109 rctx = perf_swevent_get_recursion_context();
4110 if (rctx < 0)
4111 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004112
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004113 perf_sample_data_init(&data, addr);
Ingo Molnara4234bf2009-11-23 10:57:59 +01004114
4115 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004116
4117 perf_swevent_put_recursion_context(rctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004118}
4119
4120static void perf_swevent_read(struct perf_event *event)
4121{
4122}
4123
4124static int perf_swevent_enable(struct perf_event *event)
4125{
4126 struct hw_perf_event *hwc = &event->hw;
4127
4128 if (hwc->sample_period) {
4129 hwc->last_period = hwc->sample_period;
4130 perf_swevent_set_period(event);
4131 }
4132 return 0;
4133}
4134
4135static void perf_swevent_disable(struct perf_event *event)
4136{
4137}
4138
4139static const struct pmu perf_ops_generic = {
4140 .enable = perf_swevent_enable,
4141 .disable = perf_swevent_disable,
4142 .read = perf_swevent_read,
4143 .unthrottle = perf_swevent_unthrottle,
4144};
4145
4146/*
4147 * hrtimer based swevent callback
4148 */
4149
4150static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4151{
4152 enum hrtimer_restart ret = HRTIMER_RESTART;
4153 struct perf_sample_data data;
4154 struct pt_regs *regs;
4155 struct perf_event *event;
4156 u64 period;
4157
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004158 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004159 event->pmu->read(event);
4160
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004161 perf_sample_data_init(&data, 0);
Xiao Guangrong59d069e2009-12-01 17:30:08 +08004162 data.period = event->hw.last_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004163 regs = get_irq_regs();
4164 /*
4165 * In case we exclude kernel IPs or are somehow not in interrupt
4166 * context, provide the next best thing, the user IP.
4167 */
4168 if ((event->attr.exclude_kernel || !regs) &&
4169 !event->attr.exclude_user)
4170 regs = task_pt_regs(current);
4171
4172 if (regs) {
Soeren Sandmann54f44072009-10-22 18:34:08 +02004173 if (!(event->attr.exclude_idle && current->pid == 0))
4174 if (perf_event_overflow(event, 0, &data, regs))
4175 ret = HRTIMER_NORESTART;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004176 }
4177
4178 period = max_t(u64, 10000, event->hw.sample_period);
4179 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4180
4181 return ret;
4182}
4183
Soeren Sandmann721a6692009-09-15 14:33:08 +02004184static void perf_swevent_start_hrtimer(struct perf_event *event)
4185{
4186 struct hw_perf_event *hwc = &event->hw;
4187
4188 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4189 hwc->hrtimer.function = perf_swevent_hrtimer;
4190 if (hwc->sample_period) {
4191 u64 period;
4192
4193 if (hwc->remaining) {
4194 if (hwc->remaining < 0)
4195 period = 10000;
4196 else
4197 period = hwc->remaining;
4198 hwc->remaining = 0;
4199 } else {
4200 period = max_t(u64, 10000, hwc->sample_period);
4201 }
4202 __hrtimer_start_range_ns(&hwc->hrtimer,
4203 ns_to_ktime(period), 0,
4204 HRTIMER_MODE_REL, 0);
4205 }
4206}
4207
4208static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4209{
4210 struct hw_perf_event *hwc = &event->hw;
4211
4212 if (hwc->sample_period) {
4213 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4214 hwc->remaining = ktime_to_ns(remaining);
4215
4216 hrtimer_cancel(&hwc->hrtimer);
4217 }
4218}
4219
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004220/*
4221 * Software event: cpu wall time clock
4222 */
4223
4224static void cpu_clock_perf_event_update(struct perf_event *event)
4225{
4226 int cpu = raw_smp_processor_id();
4227 s64 prev;
4228 u64 now;
4229
4230 now = cpu_clock(cpu);
Xiao Guangrongec89a06f2009-12-09 11:30:36 +08004231 prev = atomic64_xchg(&event->hw.prev_count, now);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004232 atomic64_add(now - prev, &event->count);
4233}
4234
4235static int cpu_clock_perf_event_enable(struct perf_event *event)
4236{
4237 struct hw_perf_event *hwc = &event->hw;
4238 int cpu = raw_smp_processor_id();
4239
4240 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
Soeren Sandmann721a6692009-09-15 14:33:08 +02004241 perf_swevent_start_hrtimer(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004242
4243 return 0;
4244}
4245
4246static void cpu_clock_perf_event_disable(struct perf_event *event)
4247{
Soeren Sandmann721a6692009-09-15 14:33:08 +02004248 perf_swevent_cancel_hrtimer(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004249 cpu_clock_perf_event_update(event);
4250}
4251
4252static void cpu_clock_perf_event_read(struct perf_event *event)
4253{
4254 cpu_clock_perf_event_update(event);
4255}
4256
4257static const struct pmu perf_ops_cpu_clock = {
4258 .enable = cpu_clock_perf_event_enable,
4259 .disable = cpu_clock_perf_event_disable,
4260 .read = cpu_clock_perf_event_read,
4261};
4262
4263/*
4264 * Software event: task time clock
4265 */
4266
4267static void task_clock_perf_event_update(struct perf_event *event, u64 now)
4268{
4269 u64 prev;
4270 s64 delta;
4271
4272 prev = atomic64_xchg(&event->hw.prev_count, now);
4273 delta = now - prev;
4274 atomic64_add(delta, &event->count);
4275}
4276
4277static int task_clock_perf_event_enable(struct perf_event *event)
4278{
4279 struct hw_perf_event *hwc = &event->hw;
4280 u64 now;
4281
4282 now = event->ctx->time;
4283
4284 atomic64_set(&hwc->prev_count, now);
Soeren Sandmann721a6692009-09-15 14:33:08 +02004285
4286 perf_swevent_start_hrtimer(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004287
4288 return 0;
4289}
4290
4291static void task_clock_perf_event_disable(struct perf_event *event)
4292{
Soeren Sandmann721a6692009-09-15 14:33:08 +02004293 perf_swevent_cancel_hrtimer(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004294 task_clock_perf_event_update(event, event->ctx->time);
4295
4296}
4297
4298static void task_clock_perf_event_read(struct perf_event *event)
4299{
4300 u64 time;
4301
4302 if (!in_nmi()) {
4303 update_context_time(event->ctx);
4304 time = event->ctx->time;
4305 } else {
4306 u64 now = perf_clock();
4307 u64 delta = now - event->ctx->timestamp;
4308 time = event->ctx->time + delta;
4309 }
4310
4311 task_clock_perf_event_update(event, time);
4312}
4313
4314static const struct pmu perf_ops_task_clock = {
4315 .enable = task_clock_perf_event_enable,
4316 .disable = task_clock_perf_event_disable,
4317 .read = task_clock_perf_event_read,
4318};
4319
Li Zefan07b139c2009-12-21 14:27:35 +08004320#ifdef CONFIG_EVENT_TRACING
Li Zefan6fb29152009-10-15 11:21:42 +08004321
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004322void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
Frederic Weisbeckerc5306652010-03-03 07:16:16 +01004323 int entry_size, struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004324{
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004325 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004326 struct perf_raw_record raw = {
4327 .size = entry_size,
4328 .data = record,
4329 };
4330
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004331 perf_sample_data_init(&data, addr);
4332 data.raw = &raw;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004333
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004334 /* Trace events already protected against recursion */
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004335 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
Frederic Weisbeckerc5306652010-03-03 07:16:16 +01004336 &data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004337}
4338EXPORT_SYMBOL_GPL(perf_tp_event);
4339
Li Zefan6fb29152009-10-15 11:21:42 +08004340static int perf_tp_event_match(struct perf_event *event,
4341 struct perf_sample_data *data)
4342{
4343 void *record = data->raw->data;
4344
4345 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4346 return 1;
4347 return 0;
4348}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004349
4350static void tp_perf_event_destroy(struct perf_event *event)
4351{
Frederic Weisbecker97d5a222010-03-05 05:35:37 +01004352 perf_trace_disable(event->attr.config);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004353}
4354
4355static const struct pmu *tp_perf_event_init(struct perf_event *event)
4356{
4357 /*
4358 * Raw tracepoint data is a severe data leak, only allow root to
4359 * have these.
4360 */
4361 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4362 perf_paranoid_tracepoint_raw() &&
4363 !capable(CAP_SYS_ADMIN))
4364 return ERR_PTR(-EPERM);
4365
Frederic Weisbecker97d5a222010-03-05 05:35:37 +01004366 if (perf_trace_enable(event->attr.config))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004367 return NULL;
4368
4369 event->destroy = tp_perf_event_destroy;
4370
4371 return &perf_ops_generic;
4372}
Li Zefan6fb29152009-10-15 11:21:42 +08004373
4374static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4375{
4376 char *filter_str;
4377 int ret;
4378
4379 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4380 return -EINVAL;
4381
4382 filter_str = strndup_user(arg, PAGE_SIZE);
4383 if (IS_ERR(filter_str))
4384 return PTR_ERR(filter_str);
4385
4386 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4387
4388 kfree(filter_str);
4389 return ret;
4390}
4391
4392static void perf_event_free_filter(struct perf_event *event)
4393{
4394 ftrace_profile_free_filter(event);
4395}
4396
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004397#else
Li Zefan6fb29152009-10-15 11:21:42 +08004398
4399static int perf_tp_event_match(struct perf_event *event,
4400 struct perf_sample_data *data)
4401{
4402 return 1;
4403}
4404
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004405static const struct pmu *tp_perf_event_init(struct perf_event *event)
4406{
4407 return NULL;
4408}
Li Zefan6fb29152009-10-15 11:21:42 +08004409
4410static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4411{
4412 return -ENOENT;
4413}
4414
4415static void perf_event_free_filter(struct perf_event *event)
4416{
4417}
4418
Li Zefan07b139c2009-12-21 14:27:35 +08004419#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004420
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004421#ifdef CONFIG_HAVE_HW_BREAKPOINT
4422static void bp_perf_event_destroy(struct perf_event *event)
4423{
4424 release_bp_slot(event);
4425}
4426
4427static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4428{
4429 int err;
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004430
4431 err = register_perf_hw_breakpoint(bp);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004432 if (err)
4433 return ERR_PTR(err);
4434
4435 bp->destroy = bp_perf_event_destroy;
4436
4437 return &perf_ops_bp;
4438}
4439
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004440void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004441{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004442 struct perf_sample_data sample;
4443 struct pt_regs *regs = data;
4444
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004445 perf_sample_data_init(&sample, bp->attr.bp_addr);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004446
4447 if (!perf_exclude_event(bp, regs))
4448 perf_swevent_add(bp, 1, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004449}
4450#else
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004451static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4452{
4453 return NULL;
4454}
4455
4456void perf_bp_event(struct perf_event *bp, void *regs)
4457{
4458}
4459#endif
4460
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004461atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4462
4463static void sw_perf_event_destroy(struct perf_event *event)
4464{
4465 u64 event_id = event->attr.config;
4466
4467 WARN_ON(event->parent);
4468
4469 atomic_dec(&perf_swevent_enabled[event_id]);
4470}
4471
4472static const struct pmu *sw_perf_event_init(struct perf_event *event)
4473{
4474 const struct pmu *pmu = NULL;
4475 u64 event_id = event->attr.config;
4476
4477 /*
4478 * Software events (currently) can't in general distinguish
4479 * between user, kernel and hypervisor events.
4480 * However, context switches and cpu migrations are considered
4481 * to be kernel events, and page faults are never hypervisor
4482 * events.
4483 */
4484 switch (event_id) {
4485 case PERF_COUNT_SW_CPU_CLOCK:
4486 pmu = &perf_ops_cpu_clock;
4487
4488 break;
4489 case PERF_COUNT_SW_TASK_CLOCK:
4490 /*
4491 * If the user instantiates this as a per-cpu event,
4492 * use the cpu_clock event instead.
4493 */
4494 if (event->ctx->task)
4495 pmu = &perf_ops_task_clock;
4496 else
4497 pmu = &perf_ops_cpu_clock;
4498
4499 break;
4500 case PERF_COUNT_SW_PAGE_FAULTS:
4501 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
4502 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4503 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4504 case PERF_COUNT_SW_CPU_MIGRATIONS:
Anton Blanchardf7d79862009-10-18 01:09:29 +00004505 case PERF_COUNT_SW_ALIGNMENT_FAULTS:
4506 case PERF_COUNT_SW_EMULATION_FAULTS:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004507 if (!event->parent) {
4508 atomic_inc(&perf_swevent_enabled[event_id]);
4509 event->destroy = sw_perf_event_destroy;
4510 }
4511 pmu = &perf_ops_generic;
4512 break;
4513 }
4514
4515 return pmu;
4516}
4517
4518/*
4519 * Allocate and initialize a event structure
4520 */
4521static struct perf_event *
4522perf_event_alloc(struct perf_event_attr *attr,
4523 int cpu,
4524 struct perf_event_context *ctx,
4525 struct perf_event *group_leader,
4526 struct perf_event *parent_event,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004527 perf_overflow_handler_t overflow_handler,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004528 gfp_t gfpflags)
4529{
4530 const struct pmu *pmu;
4531 struct perf_event *event;
4532 struct hw_perf_event *hwc;
4533 long err;
4534
4535 event = kzalloc(sizeof(*event), gfpflags);
4536 if (!event)
4537 return ERR_PTR(-ENOMEM);
4538
4539 /*
4540 * Single events are their own group leaders, with an
4541 * empty sibling list:
4542 */
4543 if (!group_leader)
4544 group_leader = event;
4545
4546 mutex_init(&event->child_mutex);
4547 INIT_LIST_HEAD(&event->child_list);
4548
4549 INIT_LIST_HEAD(&event->group_entry);
4550 INIT_LIST_HEAD(&event->event_entry);
4551 INIT_LIST_HEAD(&event->sibling_list);
4552 init_waitqueue_head(&event->waitq);
4553
4554 mutex_init(&event->mmap_mutex);
4555
4556 event->cpu = cpu;
4557 event->attr = *attr;
4558 event->group_leader = group_leader;
4559 event->pmu = NULL;
4560 event->ctx = ctx;
4561 event->oncpu = -1;
4562
4563 event->parent = parent_event;
4564
4565 event->ns = get_pid_ns(current->nsproxy->pid_ns);
4566 event->id = atomic64_inc_return(&perf_event_id);
4567
4568 event->state = PERF_EVENT_STATE_INACTIVE;
4569
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004570 if (!overflow_handler && parent_event)
4571 overflow_handler = parent_event->overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02004572
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004573 event->overflow_handler = overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02004574
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004575 if (attr->disabled)
4576 event->state = PERF_EVENT_STATE_OFF;
4577
4578 pmu = NULL;
4579
4580 hwc = &event->hw;
4581 hwc->sample_period = attr->sample_period;
4582 if (attr->freq && attr->sample_freq)
4583 hwc->sample_period = 1;
4584 hwc->last_period = hwc->sample_period;
4585
4586 atomic64_set(&hwc->period_left, hwc->sample_period);
4587
4588 /*
4589 * we currently do not support PERF_FORMAT_GROUP on inherited events
4590 */
4591 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4592 goto done;
4593
4594 switch (attr->type) {
4595 case PERF_TYPE_RAW:
4596 case PERF_TYPE_HARDWARE:
4597 case PERF_TYPE_HW_CACHE:
4598 pmu = hw_perf_event_init(event);
4599 break;
4600
4601 case PERF_TYPE_SOFTWARE:
4602 pmu = sw_perf_event_init(event);
4603 break;
4604
4605 case PERF_TYPE_TRACEPOINT:
4606 pmu = tp_perf_event_init(event);
4607 break;
4608
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004609 case PERF_TYPE_BREAKPOINT:
4610 pmu = bp_perf_event_init(event);
4611 break;
4612
4613
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004614 default:
4615 break;
4616 }
4617done:
4618 err = 0;
4619 if (!pmu)
4620 err = -EINVAL;
4621 else if (IS_ERR(pmu))
4622 err = PTR_ERR(pmu);
4623
4624 if (err) {
4625 if (event->ns)
4626 put_pid_ns(event->ns);
4627 kfree(event);
4628 return ERR_PTR(err);
4629 }
4630
4631 event->pmu = pmu;
4632
4633 if (!event->parent) {
4634 atomic_inc(&nr_events);
4635 if (event->attr.mmap)
4636 atomic_inc(&nr_mmap_events);
4637 if (event->attr.comm)
4638 atomic_inc(&nr_comm_events);
4639 if (event->attr.task)
4640 atomic_inc(&nr_task_events);
4641 }
4642
4643 return event;
4644}
4645
4646static int perf_copy_attr(struct perf_event_attr __user *uattr,
4647 struct perf_event_attr *attr)
4648{
4649 u32 size;
4650 int ret;
4651
4652 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4653 return -EFAULT;
4654
4655 /*
4656 * zero the full structure, so that a short copy will be nice.
4657 */
4658 memset(attr, 0, sizeof(*attr));
4659
4660 ret = get_user(size, &uattr->size);
4661 if (ret)
4662 return ret;
4663
4664 if (size > PAGE_SIZE) /* silly large */
4665 goto err_size;
4666
4667 if (!size) /* abi compat */
4668 size = PERF_ATTR_SIZE_VER0;
4669
4670 if (size < PERF_ATTR_SIZE_VER0)
4671 goto err_size;
4672
4673 /*
4674 * If we're handed a bigger struct than we know of,
4675 * ensure all the unknown bits are 0 - i.e. new
4676 * user-space does not rely on any kernel feature
4677 * extensions we dont know about yet.
4678 */
4679 if (size > sizeof(*attr)) {
4680 unsigned char __user *addr;
4681 unsigned char __user *end;
4682 unsigned char val;
4683
4684 addr = (void __user *)uattr + sizeof(*attr);
4685 end = (void __user *)uattr + size;
4686
4687 for (; addr < end; addr++) {
4688 ret = get_user(val, addr);
4689 if (ret)
4690 return ret;
4691 if (val)
4692 goto err_size;
4693 }
4694 size = sizeof(*attr);
4695 }
4696
4697 ret = copy_from_user(attr, uattr, size);
4698 if (ret)
4699 return -EFAULT;
4700
4701 /*
4702 * If the type exists, the corresponding creation will verify
4703 * the attr->config.
4704 */
4705 if (attr->type >= PERF_TYPE_MAX)
4706 return -EINVAL;
4707
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05304708 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004709 return -EINVAL;
4710
4711 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4712 return -EINVAL;
4713
4714 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4715 return -EINVAL;
4716
4717out:
4718 return ret;
4719
4720err_size:
4721 put_user(sizeof(*attr), &uattr->size);
4722 ret = -E2BIG;
4723 goto out;
4724}
4725
Li Zefan6fb29152009-10-15 11:21:42 +08004726static int perf_event_set_output(struct perf_event *event, int output_fd)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004727{
4728 struct perf_event *output_event = NULL;
4729 struct file *output_file = NULL;
4730 struct perf_event *old_output;
4731 int fput_needed = 0;
4732 int ret = -EINVAL;
4733
4734 if (!output_fd)
4735 goto set;
4736
4737 output_file = fget_light(output_fd, &fput_needed);
4738 if (!output_file)
4739 return -EBADF;
4740
4741 if (output_file->f_op != &perf_fops)
4742 goto out;
4743
4744 output_event = output_file->private_data;
4745
4746 /* Don't chain output fds */
4747 if (output_event->output)
4748 goto out;
4749
4750 /* Don't set an output fd when we already have an output channel */
4751 if (event->data)
4752 goto out;
4753
4754 atomic_long_inc(&output_file->f_count);
4755
4756set:
4757 mutex_lock(&event->mmap_mutex);
4758 old_output = event->output;
4759 rcu_assign_pointer(event->output, output_event);
4760 mutex_unlock(&event->mmap_mutex);
4761
4762 if (old_output) {
4763 /*
4764 * we need to make sure no existing perf_output_*()
4765 * is still referencing this event.
4766 */
4767 synchronize_rcu();
4768 fput(old_output->filp);
4769 }
4770
4771 ret = 0;
4772out:
4773 fput_light(output_file, fput_needed);
4774 return ret;
4775}
4776
4777/**
4778 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4779 *
4780 * @attr_uptr: event_id type attributes for monitoring/sampling
4781 * @pid: target pid
4782 * @cpu: target cpu
4783 * @group_fd: group leader event fd
4784 */
4785SYSCALL_DEFINE5(perf_event_open,
4786 struct perf_event_attr __user *, attr_uptr,
4787 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4788{
4789 struct perf_event *event, *group_leader;
4790 struct perf_event_attr attr;
4791 struct perf_event_context *ctx;
4792 struct file *event_file = NULL;
4793 struct file *group_file = NULL;
4794 int fput_needed = 0;
4795 int fput_needed2 = 0;
4796 int err;
4797
4798 /* for future expandability... */
4799 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
4800 return -EINVAL;
4801
4802 err = perf_copy_attr(attr_uptr, &attr);
4803 if (err)
4804 return err;
4805
4806 if (!attr.exclude_kernel) {
4807 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4808 return -EACCES;
4809 }
4810
4811 if (attr.freq) {
4812 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4813 return -EINVAL;
4814 }
4815
4816 /*
4817 * Get the target context (task or percpu):
4818 */
4819 ctx = find_get_context(pid, cpu);
4820 if (IS_ERR(ctx))
4821 return PTR_ERR(ctx);
4822
4823 /*
4824 * Look up the group leader (we will attach this event to it):
4825 */
4826 group_leader = NULL;
4827 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
4828 err = -EINVAL;
4829 group_file = fget_light(group_fd, &fput_needed);
4830 if (!group_file)
4831 goto err_put_context;
4832 if (group_file->f_op != &perf_fops)
4833 goto err_put_context;
4834
4835 group_leader = group_file->private_data;
4836 /*
4837 * Do not allow a recursive hierarchy (this new sibling
4838 * becoming part of another group-sibling):
4839 */
4840 if (group_leader->group_leader != group_leader)
4841 goto err_put_context;
4842 /*
4843 * Do not allow to attach to a group in a different
4844 * task or CPU context:
4845 */
4846 if (group_leader->ctx != ctx)
4847 goto err_put_context;
4848 /*
4849 * Only a group leader can be exclusive or pinned
4850 */
4851 if (attr.exclusive || attr.pinned)
4852 goto err_put_context;
4853 }
4854
4855 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02004856 NULL, NULL, GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004857 err = PTR_ERR(event);
4858 if (IS_ERR(event))
4859 goto err_put_context;
4860
Roland Dreier628ff7c2009-12-18 09:41:24 -08004861 err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004862 if (err < 0)
4863 goto err_free_put_context;
4864
4865 event_file = fget_light(err, &fput_needed2);
4866 if (!event_file)
4867 goto err_free_put_context;
4868
4869 if (flags & PERF_FLAG_FD_OUTPUT) {
4870 err = perf_event_set_output(event, group_fd);
4871 if (err)
4872 goto err_fput_free_put_context;
4873 }
4874
4875 event->filp = event_file;
4876 WARN_ON_ONCE(ctx->parent_ctx);
4877 mutex_lock(&ctx->mutex);
4878 perf_install_in_context(ctx, event, cpu);
4879 ++ctx->generation;
4880 mutex_unlock(&ctx->mutex);
4881
4882 event->owner = current;
4883 get_task_struct(current);
4884 mutex_lock(&current->perf_event_mutex);
4885 list_add_tail(&event->owner_entry, &current->perf_event_list);
4886 mutex_unlock(&current->perf_event_mutex);
4887
4888err_fput_free_put_context:
4889 fput_light(event_file, fput_needed2);
4890
4891err_free_put_context:
4892 if (err < 0)
4893 kfree(event);
4894
4895err_put_context:
4896 if (err < 0)
4897 put_ctx(ctx);
4898
4899 fput_light(group_file, fput_needed);
4900
4901 return err;
4902}
4903
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004904/**
4905 * perf_event_create_kernel_counter
4906 *
4907 * @attr: attributes of the counter to create
4908 * @cpu: cpu in which the counter is bound
4909 * @pid: task to profile
4910 */
4911struct perf_event *
4912perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004913 pid_t pid,
4914 perf_overflow_handler_t overflow_handler)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004915{
4916 struct perf_event *event;
4917 struct perf_event_context *ctx;
4918 int err;
4919
4920 /*
4921 * Get the target context (task or percpu):
4922 */
4923
4924 ctx = find_get_context(pid, cpu);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01004925 if (IS_ERR(ctx)) {
4926 err = PTR_ERR(ctx);
4927 goto err_exit;
4928 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004929
4930 event = perf_event_alloc(attr, cpu, ctx, NULL,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004931 NULL, overflow_handler, GFP_KERNEL);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01004932 if (IS_ERR(event)) {
4933 err = PTR_ERR(event);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004934 goto err_put_context;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01004935 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004936
4937 event->filp = NULL;
4938 WARN_ON_ONCE(ctx->parent_ctx);
4939 mutex_lock(&ctx->mutex);
4940 perf_install_in_context(ctx, event, cpu);
4941 ++ctx->generation;
4942 mutex_unlock(&ctx->mutex);
4943
4944 event->owner = current;
4945 get_task_struct(current);
4946 mutex_lock(&current->perf_event_mutex);
4947 list_add_tail(&event->owner_entry, &current->perf_event_list);
4948 mutex_unlock(&current->perf_event_mutex);
4949
4950 return event;
4951
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01004952 err_put_context:
4953 put_ctx(ctx);
4954 err_exit:
4955 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004956}
4957EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
4958
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004959/*
4960 * inherit a event from parent task to child task:
4961 */
4962static struct perf_event *
4963inherit_event(struct perf_event *parent_event,
4964 struct task_struct *parent,
4965 struct perf_event_context *parent_ctx,
4966 struct task_struct *child,
4967 struct perf_event *group_leader,
4968 struct perf_event_context *child_ctx)
4969{
4970 struct perf_event *child_event;
4971
4972 /*
4973 * Instead of creating recursive hierarchies of events,
4974 * we link inherited events back to the original parent,
4975 * which has a filp for sure, which we use as the reference
4976 * count:
4977 */
4978 if (parent_event->parent)
4979 parent_event = parent_event->parent;
4980
4981 child_event = perf_event_alloc(&parent_event->attr,
4982 parent_event->cpu, child_ctx,
4983 group_leader, parent_event,
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02004984 NULL, GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004985 if (IS_ERR(child_event))
4986 return child_event;
4987 get_ctx(child_ctx);
4988
4989 /*
4990 * Make the child state follow the state of the parent event,
4991 * not its attr.disabled bit. We hold the parent's mutex,
4992 * so we won't race with perf_event_{en, dis}able_family.
4993 */
4994 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
4995 child_event->state = PERF_EVENT_STATE_INACTIVE;
4996 else
4997 child_event->state = PERF_EVENT_STATE_OFF;
4998
Peter Zijlstra75c9f322010-01-29 09:04:26 +01004999 if (parent_event->attr.freq) {
5000 u64 sample_period = parent_event->hw.sample_period;
5001 struct hw_perf_event *hwc = &child_event->hw;
5002
5003 hwc->sample_period = sample_period;
5004 hwc->last_period = sample_period;
5005
5006 atomic64_set(&hwc->period_left, sample_period);
5007 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005008
Peter Zijlstra453f19e2009-11-20 22:19:43 +01005009 child_event->overflow_handler = parent_event->overflow_handler;
5010
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005011 /*
5012 * Link it up in the child's context:
5013 */
5014 add_event_to_ctx(child_event, child_ctx);
5015
5016 /*
5017 * Get a reference to the parent filp - we will fput it
5018 * when the child event exits. This is safe to do because
5019 * we are in the parent and we know that the filp still
5020 * exists and has a nonzero count:
5021 */
5022 atomic_long_inc(&parent_event->filp->f_count);
5023
5024 /*
5025 * Link this into the parent event's child list
5026 */
5027 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5028 mutex_lock(&parent_event->child_mutex);
5029 list_add_tail(&child_event->child_list, &parent_event->child_list);
5030 mutex_unlock(&parent_event->child_mutex);
5031
5032 return child_event;
5033}
5034
5035static int inherit_group(struct perf_event *parent_event,
5036 struct task_struct *parent,
5037 struct perf_event_context *parent_ctx,
5038 struct task_struct *child,
5039 struct perf_event_context *child_ctx)
5040{
5041 struct perf_event *leader;
5042 struct perf_event *sub;
5043 struct perf_event *child_ctr;
5044
5045 leader = inherit_event(parent_event, parent, parent_ctx,
5046 child, NULL, child_ctx);
5047 if (IS_ERR(leader))
5048 return PTR_ERR(leader);
5049 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
5050 child_ctr = inherit_event(sub, parent, parent_ctx,
5051 child, leader, child_ctx);
5052 if (IS_ERR(child_ctr))
5053 return PTR_ERR(child_ctr);
5054 }
5055 return 0;
5056}
5057
5058static void sync_child_event(struct perf_event *child_event,
5059 struct task_struct *child)
5060{
5061 struct perf_event *parent_event = child_event->parent;
5062 u64 child_val;
5063
5064 if (child_event->attr.inherit_stat)
5065 perf_event_read_event(child_event, child);
5066
5067 child_val = atomic64_read(&child_event->count);
5068
5069 /*
5070 * Add back the child's count to the parent's count:
5071 */
5072 atomic64_add(child_val, &parent_event->count);
5073 atomic64_add(child_event->total_time_enabled,
5074 &parent_event->child_total_time_enabled);
5075 atomic64_add(child_event->total_time_running,
5076 &parent_event->child_total_time_running);
5077
5078 /*
5079 * Remove this event from the parent's list
5080 */
5081 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5082 mutex_lock(&parent_event->child_mutex);
5083 list_del_init(&child_event->child_list);
5084 mutex_unlock(&parent_event->child_mutex);
5085
5086 /*
5087 * Release the parent event, if this was the last
5088 * reference to it.
5089 */
5090 fput(parent_event->filp);
5091}
5092
5093static void
5094__perf_event_exit_task(struct perf_event *child_event,
5095 struct perf_event_context *child_ctx,
5096 struct task_struct *child)
5097{
5098 struct perf_event *parent_event;
5099
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005100 perf_event_remove_from_context(child_event);
5101
5102 parent_event = child_event->parent;
5103 /*
5104 * It can happen that parent exits first, and has events
5105 * that are still around due to the child reference. These
5106 * events need to be zapped - but otherwise linger.
5107 */
5108 if (parent_event) {
5109 sync_child_event(child_event, child);
5110 free_event(child_event);
5111 }
5112}
5113
5114/*
5115 * When a child task exits, feed back event values to parent events.
5116 */
5117void perf_event_exit_task(struct task_struct *child)
5118{
5119 struct perf_event *child_event, *tmp;
5120 struct perf_event_context *child_ctx;
5121 unsigned long flags;
5122
5123 if (likely(!child->perf_event_ctxp)) {
5124 perf_event_task(child, NULL, 0);
5125 return;
5126 }
5127
5128 local_irq_save(flags);
5129 /*
5130 * We can't reschedule here because interrupts are disabled,
5131 * and either child is current or it is a task that can't be
5132 * scheduled, so we are now safe from rescheduling changing
5133 * our context.
5134 */
5135 child_ctx = child->perf_event_ctxp;
5136 __perf_event_task_sched_out(child_ctx);
5137
5138 /*
5139 * Take the context lock here so that if find_get_context is
5140 * reading child->perf_event_ctxp, we wait until it has
5141 * incremented the context's refcount before we do put_ctx below.
5142 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01005143 raw_spin_lock(&child_ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005144 child->perf_event_ctxp = NULL;
5145 /*
5146 * If this context is a clone; unclone it so it can't get
5147 * swapped to another process while we're removing all
5148 * the events from it.
5149 */
5150 unclone_ctx(child_ctx);
Peter Zijlstra5e942bb2009-11-23 11:37:26 +01005151 update_context_time(child_ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01005152 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005153
5154 /*
5155 * Report the task dead after unscheduling the events so that we
5156 * won't get any samples after PERF_RECORD_EXIT. We can however still
5157 * get a few PERF_RECORD_READ events.
5158 */
5159 perf_event_task(child, child_ctx, 0);
5160
5161 /*
5162 * We can recurse on the same lock type through:
5163 *
5164 * __perf_event_exit_task()
5165 * sync_child_event()
5166 * fput(parent_event->filp)
5167 * perf_release()
5168 * mutex_lock(&ctx->mutex)
5169 *
5170 * But since its the parent context it won't be the same instance.
5171 */
5172 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
5173
5174again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005175 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5176 group_entry)
5177 __perf_event_exit_task(child_event, child_ctx, child);
5178
5179 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005180 group_entry)
5181 __perf_event_exit_task(child_event, child_ctx, child);
5182
5183 /*
5184 * If the last event was a group event, it will have appended all
5185 * its siblings to the list, but we obtained 'tmp' before that which
5186 * will still point to the list head terminating the iteration.
5187 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005188 if (!list_empty(&child_ctx->pinned_groups) ||
5189 !list_empty(&child_ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005190 goto again;
5191
5192 mutex_unlock(&child_ctx->mutex);
5193
5194 put_ctx(child_ctx);
5195}
5196
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005197static void perf_free_event(struct perf_event *event,
5198 struct perf_event_context *ctx)
5199{
5200 struct perf_event *parent = event->parent;
5201
5202 if (WARN_ON_ONCE(!parent))
5203 return;
5204
5205 mutex_lock(&parent->child_mutex);
5206 list_del_init(&event->child_list);
5207 mutex_unlock(&parent->child_mutex);
5208
5209 fput(parent->filp);
5210
5211 list_del_event(event, ctx);
5212 free_event(event);
5213}
5214
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005215/*
5216 * free an unexposed, unused context as created by inheritance by
5217 * init_task below, used by fork() in case of fail.
5218 */
5219void perf_event_free_task(struct task_struct *task)
5220{
5221 struct perf_event_context *ctx = task->perf_event_ctxp;
5222 struct perf_event *event, *tmp;
5223
5224 if (!ctx)
5225 return;
5226
5227 mutex_lock(&ctx->mutex);
5228again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005229 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5230 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005231
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005232 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5233 group_entry)
5234 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005235
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005236 if (!list_empty(&ctx->pinned_groups) ||
5237 !list_empty(&ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005238 goto again;
5239
5240 mutex_unlock(&ctx->mutex);
5241
5242 put_ctx(ctx);
5243}
5244
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005245static int
5246inherit_task_group(struct perf_event *event, struct task_struct *parent,
5247 struct perf_event_context *parent_ctx,
5248 struct task_struct *child,
5249 int *inherited_all)
5250{
5251 int ret;
5252 struct perf_event_context *child_ctx = child->perf_event_ctxp;
5253
5254 if (!event->attr.inherit) {
5255 *inherited_all = 0;
5256 return 0;
5257 }
5258
5259 if (!child_ctx) {
5260 /*
5261 * This is executed from the parent task context, so
5262 * inherit events that have been marked for cloning.
5263 * First allocate and initialize a context for the
5264 * child.
5265 */
5266
5267 child_ctx = kzalloc(sizeof(struct perf_event_context),
5268 GFP_KERNEL);
5269 if (!child_ctx)
5270 return -ENOMEM;
5271
5272 __perf_event_init_context(child_ctx, child);
5273 child->perf_event_ctxp = child_ctx;
5274 get_task_struct(child);
5275 }
5276
5277 ret = inherit_group(event, parent, parent_ctx,
5278 child, child_ctx);
5279
5280 if (ret)
5281 *inherited_all = 0;
5282
5283 return ret;
5284}
5285
5286
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005287/*
5288 * Initialize the perf_event context in task_struct
5289 */
5290int perf_event_init_task(struct task_struct *child)
5291{
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005292 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005293 struct perf_event_context *cloned_ctx;
5294 struct perf_event *event;
5295 struct task_struct *parent = current;
5296 int inherited_all = 1;
5297 int ret = 0;
5298
5299 child->perf_event_ctxp = NULL;
5300
5301 mutex_init(&child->perf_event_mutex);
5302 INIT_LIST_HEAD(&child->perf_event_list);
5303
5304 if (likely(!parent->perf_event_ctxp))
5305 return 0;
5306
5307 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005308 * If the parent's context is a clone, pin it so it won't get
5309 * swapped under us.
5310 */
5311 parent_ctx = perf_pin_task_context(parent);
5312
5313 /*
5314 * No need to check if parent_ctx != NULL here; since we saw
5315 * it non-NULL earlier, the only reason for it to become NULL
5316 * is if we exit, and since we're currently in the middle of
5317 * a fork we can't be exiting at the same time.
5318 */
5319
5320 /*
5321 * Lock the parent list. No need to lock the child - not PID
5322 * hashed yet and not running, so nobody can access it.
5323 */
5324 mutex_lock(&parent_ctx->mutex);
5325
5326 /*
5327 * We dont have to disable NMIs - we are only looking at
5328 * the list, not manipulating it:
5329 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005330 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
5331 ret = inherit_task_group(event, parent, parent_ctx, child,
5332 &inherited_all);
5333 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005334 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005335 }
5336
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005337 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
5338 ret = inherit_task_group(event, parent, parent_ctx, child,
5339 &inherited_all);
5340 if (ret)
5341 break;
5342 }
5343
5344 child_ctx = child->perf_event_ctxp;
5345
Peter Zijlstra05cbaa22009-12-30 16:00:35 +01005346 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005347 /*
5348 * Mark the child context as a clone of the parent
5349 * context, or of whatever the parent is a clone of.
5350 * Note that if the parent is a clone, it could get
5351 * uncloned at any point, but that doesn't matter
5352 * because the list of events and the generation
5353 * count can't have changed since we took the mutex.
5354 */
5355 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
5356 if (cloned_ctx) {
5357 child_ctx->parent_ctx = cloned_ctx;
5358 child_ctx->parent_gen = parent_ctx->parent_gen;
5359 } else {
5360 child_ctx->parent_ctx = parent_ctx;
5361 child_ctx->parent_gen = parent_ctx->generation;
5362 }
5363 get_ctx(child_ctx->parent_ctx);
5364 }
5365
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005366 mutex_unlock(&parent_ctx->mutex);
5367
5368 perf_unpin_context(parent_ctx);
5369
5370 return ret;
5371}
5372
Paul Mackerras220b1402010-03-10 20:45:52 +11005373static void __init perf_event_init_all_cpus(void)
5374{
5375 int cpu;
5376 struct perf_cpu_context *cpuctx;
5377
5378 for_each_possible_cpu(cpu) {
5379 cpuctx = &per_cpu(perf_cpu_context, cpu);
5380 __perf_event_init_context(&cpuctx->ctx, NULL);
5381 }
5382}
5383
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005384static void __cpuinit perf_event_init_cpu(int cpu)
5385{
5386 struct perf_cpu_context *cpuctx;
5387
5388 cpuctx = &per_cpu(perf_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005389
5390 spin_lock(&perf_resource_lock);
5391 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5392 spin_unlock(&perf_resource_lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005393}
5394
5395#ifdef CONFIG_HOTPLUG_CPU
5396static void __perf_event_exit_cpu(void *info)
5397{
5398 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5399 struct perf_event_context *ctx = &cpuctx->ctx;
5400 struct perf_event *event, *tmp;
5401
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005402 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5403 __perf_event_remove_from_context(event);
5404 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005405 __perf_event_remove_from_context(event);
5406}
5407static void perf_event_exit_cpu(int cpu)
5408{
5409 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5410 struct perf_event_context *ctx = &cpuctx->ctx;
5411
5412 mutex_lock(&ctx->mutex);
5413 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
5414 mutex_unlock(&ctx->mutex);
5415}
5416#else
5417static inline void perf_event_exit_cpu(int cpu) { }
5418#endif
5419
5420static int __cpuinit
5421perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5422{
5423 unsigned int cpu = (long)hcpu;
5424
5425 switch (action) {
5426
5427 case CPU_UP_PREPARE:
5428 case CPU_UP_PREPARE_FROZEN:
5429 perf_event_init_cpu(cpu);
5430 break;
5431
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005432 case CPU_DOWN_PREPARE:
5433 case CPU_DOWN_PREPARE_FROZEN:
5434 perf_event_exit_cpu(cpu);
5435 break;
5436
5437 default:
5438 break;
5439 }
5440
5441 return NOTIFY_OK;
5442}
5443
5444/*
5445 * This has to have a higher priority than migration_notifier in sched.c.
5446 */
5447static struct notifier_block __cpuinitdata perf_cpu_nb = {
5448 .notifier_call = perf_cpu_notify,
5449 .priority = 20,
5450};
5451
5452void __init perf_event_init(void)
5453{
Paul Mackerras220b1402010-03-10 20:45:52 +11005454 perf_event_init_all_cpus();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005455 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5456 (void *)(long)smp_processor_id());
5457 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
5458 (void *)(long)smp_processor_id());
5459 register_cpu_notifier(&perf_cpu_nb);
5460}
5461
Andi Kleenc9be0a32010-01-05 12:47:58 +01005462static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
5463 struct sysdev_class_attribute *attr,
5464 char *buf)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005465{
5466 return sprintf(buf, "%d\n", perf_reserved_percpu);
5467}
5468
5469static ssize_t
5470perf_set_reserve_percpu(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01005471 struct sysdev_class_attribute *attr,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005472 const char *buf,
5473 size_t count)
5474{
5475 struct perf_cpu_context *cpuctx;
5476 unsigned long val;
5477 int err, cpu, mpt;
5478
5479 err = strict_strtoul(buf, 10, &val);
5480 if (err)
5481 return err;
5482 if (val > perf_max_events)
5483 return -EINVAL;
5484
5485 spin_lock(&perf_resource_lock);
5486 perf_reserved_percpu = val;
5487 for_each_online_cpu(cpu) {
5488 cpuctx = &per_cpu(perf_cpu_context, cpu);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01005489 raw_spin_lock_irq(&cpuctx->ctx.lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005490 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5491 perf_max_events - perf_reserved_percpu);
5492 cpuctx->max_pertask = mpt;
Thomas Gleixnere625cce2009-11-17 18:02:06 +01005493 raw_spin_unlock_irq(&cpuctx->ctx.lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005494 }
5495 spin_unlock(&perf_resource_lock);
5496
5497 return count;
5498}
5499
Andi Kleenc9be0a32010-01-05 12:47:58 +01005500static ssize_t perf_show_overcommit(struct sysdev_class *class,
5501 struct sysdev_class_attribute *attr,
5502 char *buf)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005503{
5504 return sprintf(buf, "%d\n", perf_overcommit);
5505}
5506
5507static ssize_t
Andi Kleenc9be0a32010-01-05 12:47:58 +01005508perf_set_overcommit(struct sysdev_class *class,
5509 struct sysdev_class_attribute *attr,
5510 const char *buf, size_t count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005511{
5512 unsigned long val;
5513 int err;
5514
5515 err = strict_strtoul(buf, 10, &val);
5516 if (err)
5517 return err;
5518 if (val > 1)
5519 return -EINVAL;
5520
5521 spin_lock(&perf_resource_lock);
5522 perf_overcommit = val;
5523 spin_unlock(&perf_resource_lock);
5524
5525 return count;
5526}
5527
5528static SYSDEV_CLASS_ATTR(
5529 reserve_percpu,
5530 0644,
5531 perf_show_reserve_percpu,
5532 perf_set_reserve_percpu
5533 );
5534
5535static SYSDEV_CLASS_ATTR(
5536 overcommit,
5537 0644,
5538 perf_show_overcommit,
5539 perf_set_overcommit
5540 );
5541
5542static struct attribute *perfclass_attrs[] = {
5543 &attr_reserve_percpu.attr,
5544 &attr_overcommit.attr,
5545 NULL
5546};
5547
5548static struct attribute_group perfclass_attr_group = {
5549 .attrs = perfclass_attrs,
5550 .name = "perf_events",
5551};
5552
5553static int __init perf_event_sysfs_init(void)
5554{
5555 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
5556 &perfclass_attr_group);
5557}
5558device_initcall(perf_event_sysfs_init);