blob: 671f6c8c8a32364cdc1c010aeac76a98126e9bc6 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
16#include <linux/file.h>
17#include <linux/poll.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020019#include <linux/hash.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020020#include <linux/sysfs.h>
21#include <linux/dcache.h>
22#include <linux/percpu.h>
23#include <linux/ptrace.h>
24#include <linux/vmstat.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020025#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020026#include <linux/hardirq.h>
27#include <linux/rculist.h>
28#include <linux/uaccess.h>
29#include <linux/syscalls.h>
30#include <linux/anon_inodes.h>
31#include <linux/kernel_stat.h>
32#include <linux/perf_event.h>
Li Zefan6fb29152009-10-15 11:21:42 +080033#include <linux/ftrace_event.h>
Jason Wessel3c502e72010-11-04 17:33:01 -050034#include <linux/hw_breakpoint.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020035
36#include <asm/irq_regs.h>
37
Peter Zijlstra82cd6de2010-10-14 17:57:23 +020038atomic_t perf_task_events __read_mostly;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020039static atomic_t nr_mmap_events __read_mostly;
40static atomic_t nr_comm_events __read_mostly;
41static atomic_t nr_task_events __read_mostly;
42
Peter Zijlstra108b02c2010-09-06 14:32:03 +020043static LIST_HEAD(pmus);
44static DEFINE_MUTEX(pmus_lock);
45static struct srcu_struct pmus_srcu;
46
Ingo Molnarcdd6c482009-09-21 12:02:48 +020047/*
48 * perf event paranoia level:
49 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu events for unpriv
52 * 2 - disallow kernel profiling for unpriv
53 */
54int sysctl_perf_event_paranoid __read_mostly = 1;
55
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
57
58/*
59 * max perf event sample rate
60 */
61int sysctl_perf_event_sample_rate __read_mostly = 100000;
62
63static atomic64_t perf_event_id;
64
Ingo Molnarcdd6c482009-09-21 12:02:48 +020065void __weak perf_event_print_debug(void) { }
66
Matt Fleming84c79912010-10-03 21:41:13 +010067extern __weak const char *perf_pmu_name(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020068{
Matt Fleming84c79912010-10-03 21:41:13 +010069 return "pmu";
Ingo Molnarcdd6c482009-09-21 12:02:48 +020070}
71
Peter Zijlstra33696fc2010-06-14 08:49:00 +020072void perf_pmu_disable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020073{
Peter Zijlstra33696fc2010-06-14 08:49:00 +020074 int *count = this_cpu_ptr(pmu->pmu_disable_count);
75 if (!(*count)++)
76 pmu->pmu_disable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020077}
78
Peter Zijlstra33696fc2010-06-14 08:49:00 +020079void perf_pmu_enable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020080{
Peter Zijlstra33696fc2010-06-14 08:49:00 +020081 int *count = this_cpu_ptr(pmu->pmu_disable_count);
82 if (!--(*count))
83 pmu->pmu_enable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020084}
85
Peter Zijlstrae9d2b062010-09-17 11:28:50 +020086static DEFINE_PER_CPU(struct list_head, rotation_list);
87
88/*
89 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
90 * because they're strictly cpu affine and rotate_start is called with IRQs
91 * disabled, while rotate_context is called from IRQ context.
92 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +020093static void perf_pmu_rotate_start(struct pmu *pmu)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +020094{
Peter Zijlstra108b02c2010-09-06 14:32:03 +020095 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +020096 struct list_head *head = &__get_cpu_var(rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +020097
Peter Zijlstrae9d2b062010-09-17 11:28:50 +020098 WARN_ON(!irqs_disabled());
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +020099
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200100 if (list_empty(&cpuctx->rotation_list))
101 list_add(&cpuctx->rotation_list, head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200102}
103
104static void get_ctx(struct perf_event_context *ctx)
105{
106 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
107}
108
109static void free_ctx(struct rcu_head *head)
110{
111 struct perf_event_context *ctx;
112
113 ctx = container_of(head, struct perf_event_context, rcu_head);
114 kfree(ctx);
115}
116
117static void put_ctx(struct perf_event_context *ctx)
118{
119 if (atomic_dec_and_test(&ctx->refcount)) {
120 if (ctx->parent_ctx)
121 put_ctx(ctx->parent_ctx);
122 if (ctx->task)
123 put_task_struct(ctx->task);
124 call_rcu(&ctx->rcu_head, free_ctx);
125 }
126}
127
128static void unclone_ctx(struct perf_event_context *ctx)
129{
130 if (ctx->parent_ctx) {
131 put_ctx(ctx->parent_ctx);
132 ctx->parent_ctx = NULL;
133 }
134}
135
136/*
137 * If we inherit events we want to return the parent event id
138 * to userspace.
139 */
140static u64 primary_event_id(struct perf_event *event)
141{
142 u64 id = event->id;
143
144 if (event->parent)
145 id = event->parent->id;
146
147 return id;
148}
149
150/*
151 * Get the perf_event_context for a task and lock it.
152 * This has to cope with with the fact that until it is locked,
153 * the context could get moved to another task.
154 */
155static struct perf_event_context *
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200156perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200157{
158 struct perf_event_context *ctx;
159
160 rcu_read_lock();
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200161retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200162 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200163 if (ctx) {
164 /*
165 * If this context is a clone of another, it might
166 * get swapped for another underneath us by
167 * perf_event_task_sched_out, though the
168 * rcu_read_lock() protects us from any context
169 * getting freed. Lock the context and check if it
170 * got swapped before we could get the lock, and retry
171 * if so. If we locked the right context, then it
172 * can't get swapped on us any more.
173 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100174 raw_spin_lock_irqsave(&ctx->lock, *flags);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200175 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100176 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200177 goto retry;
178 }
179
180 if (!atomic_inc_not_zero(&ctx->refcount)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100181 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200182 ctx = NULL;
183 }
184 }
185 rcu_read_unlock();
186 return ctx;
187}
188
189/*
190 * Get the context for a task and increment its pin_count so it
191 * can't get swapped to another task. This also increments its
192 * reference count so that the context can't get freed.
193 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200194static struct perf_event_context *
195perf_pin_task_context(struct task_struct *task, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200196{
197 struct perf_event_context *ctx;
198 unsigned long flags;
199
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200200 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200201 if (ctx) {
202 ++ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100203 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200204 }
205 return ctx;
206}
207
208static void perf_unpin_context(struct perf_event_context *ctx)
209{
210 unsigned long flags;
211
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100212 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200213 --ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100214 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200215 put_ctx(ctx);
216}
217
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100218static inline u64 perf_clock(void)
219{
Peter Zijlstrac6763292010-05-25 10:48:51 +0200220 return local_clock();
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100221}
222
223/*
224 * Update the record of the current time in a context.
225 */
226static void update_context_time(struct perf_event_context *ctx)
227{
228 u64 now = perf_clock();
229
230 ctx->time += now - ctx->timestamp;
231 ctx->timestamp = now;
232}
233
234/*
235 * Update the total_time_enabled and total_time_running fields for a event.
236 */
237static void update_event_times(struct perf_event *event)
238{
239 struct perf_event_context *ctx = event->ctx;
240 u64 run_end;
241
242 if (event->state < PERF_EVENT_STATE_INACTIVE ||
243 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
244 return;
245
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100246 if (ctx->is_active)
247 run_end = ctx->time;
248 else
249 run_end = event->tstamp_stopped;
250
251 event->total_time_enabled = run_end - event->tstamp_enabled;
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100252
253 if (event->state == PERF_EVENT_STATE_INACTIVE)
254 run_end = event->tstamp_stopped;
255 else
256 run_end = ctx->time;
257
258 event->total_time_running = run_end - event->tstamp_running;
259}
260
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200261/*
262 * Update total_time_enabled and total_time_running for all events in a group.
263 */
264static void update_group_times(struct perf_event *leader)
265{
266 struct perf_event *event;
267
268 update_event_times(leader);
269 list_for_each_entry(event, &leader->sibling_list, group_entry)
270 update_event_times(event);
271}
272
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100273static struct list_head *
274ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
275{
276 if (event->attr.pinned)
277 return &ctx->pinned_groups;
278 else
279 return &ctx->flexible_groups;
280}
281
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200282/*
283 * Add a event from the lists for its context.
284 * Must be called with ctx->mutex and ctx->lock held.
285 */
286static void
287list_add_event(struct perf_event *event, struct perf_event_context *ctx)
288{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200289 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
290 event->attach_state |= PERF_ATTACH_CONTEXT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200291
292 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +0200293 * If we're a stand alone event or group leader, we go to the context
294 * list, group events are kept attached to the group so that
295 * perf_group_detach can, at all times, locate all siblings.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200296 */
Peter Zijlstra8a495422010-05-27 15:47:49 +0200297 if (event->group_leader == event) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100298 struct list_head *list;
299
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100300 if (is_software_event(event))
301 event->group_flags |= PERF_GROUP_SOFTWARE;
302
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100303 list = ctx_group_list(event, ctx);
304 list_add_tail(&event->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200305 }
306
307 list_add_rcu(&event->event_entry, &ctx->event_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200308 if (!ctx->nr_events)
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200309 perf_pmu_rotate_start(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200310 ctx->nr_events++;
311 if (event->attr.inherit_stat)
312 ctx->nr_stat++;
313}
314
Peter Zijlstra8a495422010-05-27 15:47:49 +0200315static void perf_group_attach(struct perf_event *event)
316{
317 struct perf_event *group_leader = event->group_leader;
318
Peter Zijlstra74c33372010-10-15 11:40:29 +0200319 /*
320 * We can have double attach due to group movement in perf_event_open.
321 */
322 if (event->attach_state & PERF_ATTACH_GROUP)
323 return;
324
Peter Zijlstra8a495422010-05-27 15:47:49 +0200325 event->attach_state |= PERF_ATTACH_GROUP;
326
327 if (group_leader == event)
328 return;
329
330 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
331 !is_software_event(event))
332 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
333
334 list_add_tail(&event->group_entry, &group_leader->sibling_list);
335 group_leader->nr_siblings++;
336}
337
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200338/*
339 * Remove a event from the lists for its context.
340 * Must be called with ctx->mutex and ctx->lock held.
341 */
342static void
343list_del_event(struct perf_event *event, struct perf_event_context *ctx)
344{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200345 /*
346 * We can have double detach due to exit/hot-unplug + close.
347 */
348 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200349 return;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200350
351 event->attach_state &= ~PERF_ATTACH_CONTEXT;
352
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200353 ctx->nr_events--;
354 if (event->attr.inherit_stat)
355 ctx->nr_stat--;
356
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200357 list_del_rcu(&event->event_entry);
358
Peter Zijlstra8a495422010-05-27 15:47:49 +0200359 if (event->group_leader == event)
360 list_del_init(&event->group_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200361
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200362 update_group_times(event);
Stephane Eranianb2e74a22009-11-26 09:24:30 -0800363
364 /*
365 * If event was in error state, then keep it
366 * that way, otherwise bogus counts will be
367 * returned on read(). The only way to get out
368 * of error state is by explicit re-enabling
369 * of the event
370 */
371 if (event->state > PERF_EVENT_STATE_OFF)
372 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra050735b2010-05-11 11:51:53 +0200373}
374
Peter Zijlstra8a495422010-05-27 15:47:49 +0200375static void perf_group_detach(struct perf_event *event)
Peter Zijlstra050735b2010-05-11 11:51:53 +0200376{
377 struct perf_event *sibling, *tmp;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200378 struct list_head *list = NULL;
379
380 /*
381 * We can have double detach due to exit/hot-unplug + close.
382 */
383 if (!(event->attach_state & PERF_ATTACH_GROUP))
384 return;
385
386 event->attach_state &= ~PERF_ATTACH_GROUP;
387
388 /*
389 * If this is a sibling, remove it from its group.
390 */
391 if (event->group_leader != event) {
392 list_del_init(&event->group_entry);
393 event->group_leader->nr_siblings--;
394 return;
395 }
396
397 if (!list_empty(&event->group_entry))
398 list = &event->group_entry;
Peter Zijlstra2e2af502009-11-23 11:37:25 +0100399
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200400 /*
401 * If this was a group event with sibling events then
402 * upgrade the siblings to singleton events by adding them
Peter Zijlstra8a495422010-05-27 15:47:49 +0200403 * to whatever list we are on.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200404 */
405 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Peter Zijlstra8a495422010-05-27 15:47:49 +0200406 if (list)
407 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200408 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100409
410 /* Inherit group flags from the previous leader */
411 sibling->group_flags = event->group_flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200412 }
413}
414
Stephane Eranianfa66f072010-08-26 16:40:01 +0200415static inline int
416event_filter_match(struct perf_event *event)
417{
418 return event->cpu == -1 || event->cpu == smp_processor_id();
419}
420
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200421static void
422event_sched_out(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200423 struct perf_cpu_context *cpuctx,
424 struct perf_event_context *ctx)
425{
Stephane Eranianfa66f072010-08-26 16:40:01 +0200426 u64 delta;
427 /*
428 * An event which could not be activated because of
429 * filter mismatch still needs to have its timings
430 * maintained, otherwise bogus information is return
431 * via read() for time_enabled, time_running:
432 */
433 if (event->state == PERF_EVENT_STATE_INACTIVE
434 && !event_filter_match(event)) {
435 delta = ctx->time - event->tstamp_stopped;
436 event->tstamp_running += delta;
437 event->tstamp_stopped = ctx->time;
438 }
439
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200440 if (event->state != PERF_EVENT_STATE_ACTIVE)
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200441 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200442
443 event->state = PERF_EVENT_STATE_INACTIVE;
444 if (event->pending_disable) {
445 event->pending_disable = 0;
446 event->state = PERF_EVENT_STATE_OFF;
447 }
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200448 event->tstamp_stopped = ctx->time;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200449 event->pmu->del(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200450 event->oncpu = -1;
451
452 if (!is_software_event(event))
453 cpuctx->active_oncpu--;
454 ctx->nr_active--;
455 if (event->attr.exclusive || !cpuctx->active_oncpu)
456 cpuctx->exclusive = 0;
457}
458
459static void
460group_sched_out(struct perf_event *group_event,
461 struct perf_cpu_context *cpuctx,
462 struct perf_event_context *ctx)
463{
464 struct perf_event *event;
Stephane Eranianfa66f072010-08-26 16:40:01 +0200465 int state = group_event->state;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200466
467 event_sched_out(group_event, cpuctx, ctx);
468
469 /*
470 * Schedule out siblings (if any):
471 */
472 list_for_each_entry(event, &group_event->sibling_list, group_entry)
473 event_sched_out(event, cpuctx, ctx);
474
Stephane Eranianfa66f072010-08-26 16:40:01 +0200475 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200476 cpuctx->exclusive = 0;
477}
478
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200479static inline struct perf_cpu_context *
480__get_cpu_context(struct perf_event_context *ctx)
481{
482 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
483}
484
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200485/*
486 * Cross CPU call to remove a performance event
487 *
488 * We disable the event on the hardware level first. After that we
489 * remove it from the context list.
490 */
491static void __perf_event_remove_from_context(void *info)
492{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200493 struct perf_event *event = info;
494 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200495 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200496
497 /*
498 * If this is a task context, we need to check whether it is
499 * the current task context of this cpu. If not it has been
500 * scheduled out before the smp call arrived.
501 */
502 if (ctx->task && cpuctx->task_ctx != ctx)
503 return;
504
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100505 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200506
507 event_sched_out(event, cpuctx, ctx);
508
509 list_del_event(event, ctx);
510
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100511 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200512}
513
514
515/*
516 * Remove the event from a task's (or a CPU's) list of events.
517 *
518 * Must be called with ctx->mutex held.
519 *
520 * CPU events are removed with a smp call. For task events we only
521 * call when the task is on a CPU.
522 *
523 * If event->ctx is a cloned context, callers must make sure that
524 * every task struct that event->ctx->task could possibly point to
525 * remains valid. This is OK when called from perf_release since
526 * that only calls us on the top-level context, which can't be a clone.
527 * When called from perf_event_exit_task, it's OK because the
528 * context has been detached from its task.
529 */
530static void perf_event_remove_from_context(struct perf_event *event)
531{
532 struct perf_event_context *ctx = event->ctx;
533 struct task_struct *task = ctx->task;
534
535 if (!task) {
536 /*
537 * Per cpu events are removed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200538 * the removal is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200539 */
540 smp_call_function_single(event->cpu,
541 __perf_event_remove_from_context,
542 event, 1);
543 return;
544 }
545
546retry:
547 task_oncpu_function_call(task, __perf_event_remove_from_context,
548 event);
549
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100550 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200551 /*
552 * If the context is active we need to retry the smp call.
553 */
554 if (ctx->nr_active && !list_empty(&event->group_entry)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100555 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200556 goto retry;
557 }
558
559 /*
560 * The lock prevents that this context is scheduled in so we
561 * can remove the event safely, if the call above did not
562 * succeed.
563 */
Peter Zijlstra6c2bfcb2009-11-23 11:37:24 +0100564 if (!list_empty(&event->group_entry))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200565 list_del_event(event, ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100566 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200567}
568
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200569/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200570 * Cross CPU call to disable a performance event
571 */
572static void __perf_event_disable(void *info)
573{
574 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200575 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200576 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200577
578 /*
579 * If this is a per-task event, need to check whether this
580 * event's task is the current task on this cpu.
581 */
582 if (ctx->task && cpuctx->task_ctx != ctx)
583 return;
584
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100585 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200586
587 /*
588 * If the event is on, turn it off.
589 * If it is in error state, leave it in error state.
590 */
591 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
592 update_context_time(ctx);
593 update_group_times(event);
594 if (event == event->group_leader)
595 group_sched_out(event, cpuctx, ctx);
596 else
597 event_sched_out(event, cpuctx, ctx);
598 event->state = PERF_EVENT_STATE_OFF;
599 }
600
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100601 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200602}
603
604/*
605 * Disable a event.
606 *
607 * If event->ctx is a cloned context, callers must make sure that
608 * every task struct that event->ctx->task could possibly point to
609 * remains valid. This condition is satisifed when called through
610 * perf_event_for_each_child or perf_event_for_each because they
611 * hold the top-level event's child_mutex, so any descendant that
612 * goes to exit will block in sync_child_event.
613 * When called from perf_pending_event it's OK because event->ctx
614 * is the current context on this CPU and preemption is disabled,
615 * hence we can't get into perf_event_task_sched_out for this context.
616 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100617void perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200618{
619 struct perf_event_context *ctx = event->ctx;
620 struct task_struct *task = ctx->task;
621
622 if (!task) {
623 /*
624 * Disable the event on the cpu that it's on
625 */
626 smp_call_function_single(event->cpu, __perf_event_disable,
627 event, 1);
628 return;
629 }
630
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200631retry:
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200632 task_oncpu_function_call(task, __perf_event_disable, event);
633
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100634 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200635 /*
636 * If the event is still active, we need to retry the cross-call.
637 */
638 if (event->state == PERF_EVENT_STATE_ACTIVE) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100639 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200640 goto retry;
641 }
642
643 /*
644 * Since we have the lock this context can't be scheduled
645 * in, so we can change the state safely.
646 */
647 if (event->state == PERF_EVENT_STATE_INACTIVE) {
648 update_group_times(event);
649 event->state = PERF_EVENT_STATE_OFF;
650 }
651
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100652 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200653}
654
655static int
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200656event_sched_in(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200657 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100658 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200659{
660 if (event->state <= PERF_EVENT_STATE_OFF)
661 return 0;
662
663 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +0100664 event->oncpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200665 /*
666 * The new state must be visible before we turn it on in the hardware:
667 */
668 smp_wmb();
669
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200670 if (event->pmu->add(event, PERF_EF_START)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200671 event->state = PERF_EVENT_STATE_INACTIVE;
672 event->oncpu = -1;
673 return -EAGAIN;
674 }
675
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200676 event->tstamp_running += ctx->time - event->tstamp_stopped;
677
Stephane Eranianeed01522010-10-26 16:08:01 +0200678 event->shadow_ctx_time = ctx->time - ctx->timestamp;
679
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200680 if (!is_software_event(event))
681 cpuctx->active_oncpu++;
682 ctx->nr_active++;
683
684 if (event->attr.exclusive)
685 cpuctx->exclusive = 1;
686
687 return 0;
688}
689
690static int
691group_sched_in(struct perf_event *group_event,
692 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100693 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200694{
Lin Ming6bde9b62010-04-23 13:56:00 +0800695 struct perf_event *event, *partial_group = NULL;
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200696 struct pmu *pmu = group_event->pmu;
Stephane Eraniand7842da2010-10-20 15:25:01 +0200697 u64 now = ctx->time;
698 bool simulate = false;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200699
700 if (group_event->state == PERF_EVENT_STATE_OFF)
701 return 0;
702
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200703 pmu->start_txn(pmu);
Lin Ming6bde9b62010-04-23 13:56:00 +0800704
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200705 if (event_sched_in(group_event, cpuctx, ctx)) {
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200706 pmu->cancel_txn(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200707 return -EAGAIN;
Stephane Eranian90151c352010-05-25 16:23:10 +0200708 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200709
710 /*
711 * Schedule in siblings as one group (if any):
712 */
713 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200714 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200715 partial_group = event;
716 goto group_error;
717 }
718 }
719
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200720 if (!pmu->commit_txn(pmu))
Paul Mackerras6e851582010-05-08 20:58:00 +1000721 return 0;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200722
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200723group_error:
724 /*
725 * Groups can be scheduled in as one unit only, so undo any
726 * partial group before returning:
Stephane Eraniand7842da2010-10-20 15:25:01 +0200727 * The events up to the failed event are scheduled out normally,
728 * tstamp_stopped will be updated.
729 *
730 * The failed events and the remaining siblings need to have
731 * their timings updated as if they had gone thru event_sched_in()
732 * and event_sched_out(). This is required to get consistent timings
733 * across the group. This also takes care of the case where the group
734 * could never be scheduled by ensuring tstamp_stopped is set to mark
735 * the time the event was actually stopped, such that time delta
736 * calculation in update_event_times() is correct.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200737 */
738 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
739 if (event == partial_group)
Stephane Eraniand7842da2010-10-20 15:25:01 +0200740 simulate = true;
741
742 if (simulate) {
743 event->tstamp_running += now - event->tstamp_stopped;
744 event->tstamp_stopped = now;
745 } else {
746 event_sched_out(event, cpuctx, ctx);
747 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200748 }
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200749 event_sched_out(group_event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200750
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200751 pmu->cancel_txn(pmu);
Stephane Eranian90151c352010-05-25 16:23:10 +0200752
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200753 return -EAGAIN;
754}
755
756/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200757 * Work out whether we can put this event group on the CPU now.
758 */
759static int group_can_go_on(struct perf_event *event,
760 struct perf_cpu_context *cpuctx,
761 int can_add_hw)
762{
763 /*
764 * Groups consisting entirely of software events can always go on.
765 */
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100766 if (event->group_flags & PERF_GROUP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200767 return 1;
768 /*
769 * If an exclusive group is already on, no other hardware
770 * events can go on.
771 */
772 if (cpuctx->exclusive)
773 return 0;
774 /*
775 * If this group is exclusive and there are already
776 * events on the CPU, it can't go on.
777 */
778 if (event->attr.exclusive && cpuctx->active_oncpu)
779 return 0;
780 /*
781 * Otherwise, try to add it if all previous groups were able
782 * to go on.
783 */
784 return can_add_hw;
785}
786
787static void add_event_to_ctx(struct perf_event *event,
788 struct perf_event_context *ctx)
789{
790 list_add_event(event, ctx);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200791 perf_group_attach(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200792 event->tstamp_enabled = ctx->time;
793 event->tstamp_running = ctx->time;
794 event->tstamp_stopped = ctx->time;
795}
796
797/*
798 * Cross CPU call to install and enable a performance event
799 *
800 * Must be called with ctx->mutex held
801 */
802static void __perf_install_in_context(void *info)
803{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200804 struct perf_event *event = info;
805 struct perf_event_context *ctx = event->ctx;
806 struct perf_event *leader = event->group_leader;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200807 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200808 int err;
809
810 /*
811 * If this is a task context, we need to check whether it is
812 * the current task context of this cpu. If not it has been
813 * scheduled out before the smp call arrived.
814 * Or possibly this is the right context but it isn't
815 * on this cpu because it had no events.
816 */
817 if (ctx->task && cpuctx->task_ctx != ctx) {
818 if (cpuctx->task_ctx || ctx->task != current)
819 return;
820 cpuctx->task_ctx = ctx;
821 }
822
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100823 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200824 ctx->is_active = 1;
825 update_context_time(ctx);
826
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200827 add_event_to_ctx(event, ctx);
828
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100829 if (event->cpu != -1 && event->cpu != smp_processor_id())
830 goto unlock;
831
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200832 /*
833 * Don't put the event on if it is disabled or if
834 * it is in a group and the group isn't on.
835 */
836 if (event->state != PERF_EVENT_STATE_INACTIVE ||
837 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
838 goto unlock;
839
840 /*
841 * An exclusive event can't go on if there are already active
842 * hardware events, and no hardware event can go on if there
843 * is already an exclusive event on.
844 */
845 if (!group_can_go_on(event, cpuctx, 1))
846 err = -EEXIST;
847 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100848 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200849
850 if (err) {
851 /*
852 * This event couldn't go on. If it is in a group
853 * then we have to pull the whole group off.
854 * If the event group is pinned then put it in error state.
855 */
856 if (leader != event)
857 group_sched_out(leader, cpuctx, ctx);
858 if (leader->attr.pinned) {
859 update_group_times(leader);
860 leader->state = PERF_EVENT_STATE_ERROR;
861 }
862 }
863
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200864unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100865 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200866}
867
868/*
869 * Attach a performance event to a context
870 *
871 * First we add the event to the list with the hardware enable bit
872 * in event->hw_config cleared.
873 *
874 * If the event is attached to a task which is on a CPU we use a smp
875 * call to enable it in the task context. The task might have been
876 * scheduled away, but we check this in the smp call again.
877 *
878 * Must be called with ctx->mutex held.
879 */
880static void
881perf_install_in_context(struct perf_event_context *ctx,
882 struct perf_event *event,
883 int cpu)
884{
885 struct task_struct *task = ctx->task;
886
Peter Zijlstrac3f00c72010-08-18 14:37:15 +0200887 event->ctx = ctx;
888
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200889 if (!task) {
890 /*
891 * Per cpu events are installed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200892 * the install is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200893 */
894 smp_call_function_single(cpu, __perf_install_in_context,
895 event, 1);
896 return;
897 }
898
899retry:
900 task_oncpu_function_call(task, __perf_install_in_context,
901 event);
902
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100903 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200904 /*
905 * we need to retry the smp call.
906 */
907 if (ctx->is_active && list_empty(&event->group_entry)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100908 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200909 goto retry;
910 }
911
912 /*
913 * The lock prevents that this context is scheduled in so we
914 * can add the event safely, if it the call above did not
915 * succeed.
916 */
917 if (list_empty(&event->group_entry))
918 add_event_to_ctx(event, ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100919 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200920}
921
922/*
923 * Put a event into inactive state and update time fields.
924 * Enabling the leader of a group effectively enables all
925 * the group members that aren't explicitly disabled, so we
926 * have to update their ->tstamp_enabled also.
927 * Note: this works for group members as well as group leaders
928 * since the non-leader members' sibling_lists will be empty.
929 */
930static void __perf_event_mark_enabled(struct perf_event *event,
931 struct perf_event_context *ctx)
932{
933 struct perf_event *sub;
934
935 event->state = PERF_EVENT_STATE_INACTIVE;
936 event->tstamp_enabled = ctx->time - event->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200937 list_for_each_entry(sub, &event->sibling_list, group_entry) {
938 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200939 sub->tstamp_enabled =
940 ctx->time - sub->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200941 }
942 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200943}
944
945/*
946 * Cross CPU call to enable a performance event
947 */
948static void __perf_event_enable(void *info)
949{
950 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200951 struct perf_event_context *ctx = event->ctx;
952 struct perf_event *leader = event->group_leader;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200953 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200954 int err;
955
956 /*
957 * If this is a per-task event, need to check whether this
958 * event's task is the current task on this cpu.
959 */
960 if (ctx->task && cpuctx->task_ctx != ctx) {
961 if (cpuctx->task_ctx || ctx->task != current)
962 return;
963 cpuctx->task_ctx = ctx;
964 }
965
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100966 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200967 ctx->is_active = 1;
968 update_context_time(ctx);
969
970 if (event->state >= PERF_EVENT_STATE_INACTIVE)
971 goto unlock;
972 __perf_event_mark_enabled(event, ctx);
973
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100974 if (event->cpu != -1 && event->cpu != smp_processor_id())
975 goto unlock;
976
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200977 /*
978 * If the event is in a group and isn't the group leader,
979 * then don't put it on unless the group is on.
980 */
981 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
982 goto unlock;
983
984 if (!group_can_go_on(event, cpuctx, 1)) {
985 err = -EEXIST;
986 } else {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200987 if (event == leader)
Peter Zijlstra6e377382010-02-11 13:21:58 +0100988 err = group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200989 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100990 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200991 }
992
993 if (err) {
994 /*
995 * If this event can't go on and it's part of a
996 * group, then the whole group has to come off.
997 */
998 if (leader != event)
999 group_sched_out(leader, cpuctx, ctx);
1000 if (leader->attr.pinned) {
1001 update_group_times(leader);
1002 leader->state = PERF_EVENT_STATE_ERROR;
1003 }
1004 }
1005
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001006unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001007 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001008}
1009
1010/*
1011 * Enable a event.
1012 *
1013 * If event->ctx is a cloned context, callers must make sure that
1014 * every task struct that event->ctx->task could possibly point to
1015 * remains valid. This condition is satisfied when called through
1016 * perf_event_for_each_child or perf_event_for_each as described
1017 * for perf_event_disable.
1018 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +01001019void perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001020{
1021 struct perf_event_context *ctx = event->ctx;
1022 struct task_struct *task = ctx->task;
1023
1024 if (!task) {
1025 /*
1026 * Enable the event on the cpu that it's on
1027 */
1028 smp_call_function_single(event->cpu, __perf_event_enable,
1029 event, 1);
1030 return;
1031 }
1032
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001033 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001034 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1035 goto out;
1036
1037 /*
1038 * If the event is in error state, clear that first.
1039 * That way, if we see the event in error state below, we
1040 * know that it has gone back into error state, as distinct
1041 * from the task having been scheduled away before the
1042 * cross-call arrived.
1043 */
1044 if (event->state == PERF_EVENT_STATE_ERROR)
1045 event->state = PERF_EVENT_STATE_OFF;
1046
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001047retry:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001048 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001049 task_oncpu_function_call(task, __perf_event_enable, event);
1050
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001051 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001052
1053 /*
1054 * If the context is active and the event is still off,
1055 * we need to retry the cross-call.
1056 */
1057 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1058 goto retry;
1059
1060 /*
1061 * Since we have the lock this context can't be scheduled
1062 * in, so we can change the state safely.
1063 */
1064 if (event->state == PERF_EVENT_STATE_OFF)
1065 __perf_event_mark_enabled(event, ctx);
1066
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001067out:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001068 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001069}
1070
1071static int perf_event_refresh(struct perf_event *event, int refresh)
1072{
1073 /*
1074 * not supported on inherited events
1075 */
1076 if (event->attr.inherit)
1077 return -EINVAL;
1078
1079 atomic_add(refresh, &event->event_limit);
1080 perf_event_enable(event);
1081
1082 return 0;
1083}
1084
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001085enum event_type_t {
1086 EVENT_FLEXIBLE = 0x1,
1087 EVENT_PINNED = 0x2,
1088 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1089};
1090
1091static void ctx_sched_out(struct perf_event_context *ctx,
1092 struct perf_cpu_context *cpuctx,
1093 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001094{
1095 struct perf_event *event;
1096
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001097 raw_spin_lock(&ctx->lock);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001098 perf_pmu_disable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001099 ctx->is_active = 0;
1100 if (likely(!ctx->nr_events))
1101 goto out;
1102 update_context_time(ctx);
1103
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001104 if (!ctx->nr_active)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001105 goto out;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001106
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001107 if (event_type & EVENT_PINNED) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001108 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1109 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001110 }
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001111
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001112 if (event_type & EVENT_FLEXIBLE) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001113 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001114 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001115 }
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001116out:
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001117 perf_pmu_enable(ctx->pmu);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001118 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001119}
1120
1121/*
1122 * Test whether two contexts are equivalent, i.e. whether they
1123 * have both been cloned from the same version of the same context
1124 * and they both have the same number of enabled events.
1125 * If the number of enabled events is the same, then the set
1126 * of enabled events should be the same, because these are both
1127 * inherited contexts, therefore we can't access individual events
1128 * in them directly with an fd; we can only enable/disable all
1129 * events via prctl, or enable/disable all events in a family
1130 * via ioctl, which will have the same effect on both contexts.
1131 */
1132static int context_equiv(struct perf_event_context *ctx1,
1133 struct perf_event_context *ctx2)
1134{
1135 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1136 && ctx1->parent_gen == ctx2->parent_gen
1137 && !ctx1->pin_count && !ctx2->pin_count;
1138}
1139
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001140static void __perf_event_sync_stat(struct perf_event *event,
1141 struct perf_event *next_event)
1142{
1143 u64 value;
1144
1145 if (!event->attr.inherit_stat)
1146 return;
1147
1148 /*
1149 * Update the event value, we cannot use perf_event_read()
1150 * because we're in the middle of a context switch and have IRQs
1151 * disabled, which upsets smp_call_function_single(), however
1152 * we know the event must be on the current CPU, therefore we
1153 * don't need to use it.
1154 */
1155 switch (event->state) {
1156 case PERF_EVENT_STATE_ACTIVE:
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01001157 event->pmu->read(event);
1158 /* fall-through */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001159
1160 case PERF_EVENT_STATE_INACTIVE:
1161 update_event_times(event);
1162 break;
1163
1164 default:
1165 break;
1166 }
1167
1168 /*
1169 * In order to keep per-task stats reliable we need to flip the event
1170 * values when we flip the contexts.
1171 */
Peter Zijlstrae7850592010-05-21 14:43:08 +02001172 value = local64_read(&next_event->count);
1173 value = local64_xchg(&event->count, value);
1174 local64_set(&next_event->count, value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001175
1176 swap(event->total_time_enabled, next_event->total_time_enabled);
1177 swap(event->total_time_running, next_event->total_time_running);
1178
1179 /*
1180 * Since we swizzled the values, update the user visible data too.
1181 */
1182 perf_event_update_userpage(event);
1183 perf_event_update_userpage(next_event);
1184}
1185
1186#define list_next_entry(pos, member) \
1187 list_entry(pos->member.next, typeof(*pos), member)
1188
1189static void perf_event_sync_stat(struct perf_event_context *ctx,
1190 struct perf_event_context *next_ctx)
1191{
1192 struct perf_event *event, *next_event;
1193
1194 if (!ctx->nr_stat)
1195 return;
1196
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01001197 update_context_time(ctx);
1198
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001199 event = list_first_entry(&ctx->event_list,
1200 struct perf_event, event_entry);
1201
1202 next_event = list_first_entry(&next_ctx->event_list,
1203 struct perf_event, event_entry);
1204
1205 while (&event->event_entry != &ctx->event_list &&
1206 &next_event->event_entry != &next_ctx->event_list) {
1207
1208 __perf_event_sync_stat(event, next_event);
1209
1210 event = list_next_entry(event, event_entry);
1211 next_event = list_next_entry(next_event, event_entry);
1212 }
1213}
1214
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001215void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1216 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001217{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001218 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001219 struct perf_event_context *next_ctx;
1220 struct perf_event_context *parent;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001221 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001222 int do_switch = 1;
1223
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001224 if (likely(!ctx))
1225 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001226
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001227 cpuctx = __get_cpu_context(ctx);
1228 if (!cpuctx->task_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001229 return;
1230
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001231 rcu_read_lock();
1232 parent = rcu_dereference(ctx->parent_ctx);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001233 next_ctx = next->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001234 if (parent && next_ctx &&
1235 rcu_dereference(next_ctx->parent_ctx) == parent) {
1236 /*
1237 * Looks like the two contexts are clones, so we might be
1238 * able to optimize the context switch. We lock both
1239 * contexts and check that they are clones under the
1240 * lock (including re-checking that neither has been
1241 * uncloned in the meantime). It doesn't matter which
1242 * order we take the locks because no other cpu could
1243 * be trying to lock both of these tasks.
1244 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001245 raw_spin_lock(&ctx->lock);
1246 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001247 if (context_equiv(ctx, next_ctx)) {
1248 /*
1249 * XXX do we need a memory barrier of sorts
1250 * wrt to rcu_dereference() of perf_event_ctxp
1251 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001252 task->perf_event_ctxp[ctxn] = next_ctx;
1253 next->perf_event_ctxp[ctxn] = ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001254 ctx->task = next;
1255 next_ctx->task = task;
1256 do_switch = 0;
1257
1258 perf_event_sync_stat(ctx, next_ctx);
1259 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001260 raw_spin_unlock(&next_ctx->lock);
1261 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001262 }
1263 rcu_read_unlock();
1264
1265 if (do_switch) {
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001266 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001267 cpuctx->task_ctx = NULL;
1268 }
1269}
1270
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001271#define for_each_task_context_nr(ctxn) \
1272 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
1273
1274/*
1275 * Called from scheduler to remove the events of the current task,
1276 * with interrupts disabled.
1277 *
1278 * We stop each event and update the event value in event->count.
1279 *
1280 * This does not protect us against NMI, but disable()
1281 * sets the disabled bit in the control field of event _before_
1282 * accessing the event control register. If a NMI hits, then it will
1283 * not restart the event.
1284 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02001285void __perf_event_task_sched_out(struct task_struct *task,
1286 struct task_struct *next)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001287{
1288 int ctxn;
1289
1290 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1291
1292 for_each_task_context_nr(ctxn)
1293 perf_event_context_sched_out(task, ctxn, next);
1294}
1295
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001296static void task_ctx_sched_out(struct perf_event_context *ctx,
1297 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001298{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001299 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001300
1301 if (!cpuctx->task_ctx)
1302 return;
1303
1304 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1305 return;
1306
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001307 ctx_sched_out(ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001308 cpuctx->task_ctx = NULL;
1309}
1310
1311/*
1312 * Called with IRQs disabled
1313 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001314static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1315 enum event_type_t event_type)
1316{
1317 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001318}
1319
1320static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001321ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001322 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001323{
1324 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001325
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001326 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1327 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001328 continue;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001329 if (event->cpu != -1 && event->cpu != smp_processor_id())
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001330 continue;
1331
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001332 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01001333 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001334
1335 /*
1336 * If this pinned group hasn't been scheduled,
1337 * put it in error state.
1338 */
1339 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1340 update_group_times(event);
1341 event->state = PERF_EVENT_STATE_ERROR;
1342 }
1343 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001344}
1345
1346static void
1347ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001348 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001349{
1350 struct perf_event *event;
1351 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001352
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001353 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1354 /* Ignore events in OFF or ERROR state */
1355 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001356 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001357 /*
1358 * Listen to the 'cpu' scheduling filter constraint
1359 * of events:
1360 */
Peter Zijlstra6e377382010-02-11 13:21:58 +01001361 if (event->cpu != -1 && event->cpu != smp_processor_id())
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001362 continue;
1363
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001364 if (group_can_go_on(event, cpuctx, can_add_hw)) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001365 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001366 can_add_hw = 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001367 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001368 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001369}
1370
1371static void
1372ctx_sched_in(struct perf_event_context *ctx,
1373 struct perf_cpu_context *cpuctx,
1374 enum event_type_t event_type)
1375{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001376 raw_spin_lock(&ctx->lock);
1377 ctx->is_active = 1;
1378 if (likely(!ctx->nr_events))
1379 goto out;
1380
1381 ctx->timestamp = perf_clock();
1382
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001383 /*
1384 * First go through the list and put on any pinned groups
1385 * in order to give them the best chance of going on.
1386 */
1387 if (event_type & EVENT_PINNED)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001388 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001389
1390 /* Then walk through the lower prio flexible groups */
1391 if (event_type & EVENT_FLEXIBLE)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001392 ctx_flexible_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001393
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001394out:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001395 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001396}
1397
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001398static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1399 enum event_type_t event_type)
1400{
1401 struct perf_event_context *ctx = &cpuctx->ctx;
1402
1403 ctx_sched_in(ctx, cpuctx, event_type);
1404}
1405
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001406static void task_ctx_sched_in(struct perf_event_context *ctx,
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001407 enum event_type_t event_type)
1408{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001409 struct perf_cpu_context *cpuctx;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001410
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001411 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001412 if (cpuctx->task_ctx == ctx)
1413 return;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001414
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001415 ctx_sched_in(ctx, cpuctx, event_type);
1416 cpuctx->task_ctx = ctx;
1417}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001418
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001419void perf_event_context_sched_in(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001420{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001421 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001422
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001423 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001424 if (cpuctx->task_ctx == ctx)
1425 return;
1426
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001427 perf_pmu_disable(ctx->pmu);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001428 /*
1429 * We want to keep the following priority order:
1430 * cpu pinned (that don't need to move), task pinned,
1431 * cpu flexible, task flexible.
1432 */
1433 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1434
1435 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1436 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1437 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1438
1439 cpuctx->task_ctx = ctx;
eranian@google.com9b33fa62010-03-10 22:26:05 -08001440
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001441 /*
1442 * Since these rotations are per-cpu, we need to ensure the
1443 * cpu-context we got scheduled on is actually rotating.
1444 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001445 perf_pmu_rotate_start(ctx->pmu);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001446 perf_pmu_enable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001447}
1448
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001449/*
1450 * Called from scheduler to add the events of the current task
1451 * with interrupts disabled.
1452 *
1453 * We restore the event value and then enable it.
1454 *
1455 * This does not protect us against NMI, but enable()
1456 * sets the enabled bit in the control field of event _before_
1457 * accessing the event control register. If a NMI hits, then it will
1458 * keep the event running.
1459 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02001460void __perf_event_task_sched_in(struct task_struct *task)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001461{
1462 struct perf_event_context *ctx;
1463 int ctxn;
1464
1465 for_each_task_context_nr(ctxn) {
1466 ctx = task->perf_event_ctxp[ctxn];
1467 if (likely(!ctx))
1468 continue;
1469
1470 perf_event_context_sched_in(ctx);
1471 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001472}
1473
1474#define MAX_INTERRUPTS (~0ULL)
1475
1476static void perf_log_throttle(struct perf_event *event, int enable);
1477
Peter Zijlstraabd50712010-01-26 18:50:16 +01001478static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1479{
1480 u64 frequency = event->attr.sample_freq;
1481 u64 sec = NSEC_PER_SEC;
1482 u64 divisor, dividend;
1483
1484 int count_fls, nsec_fls, frequency_fls, sec_fls;
1485
1486 count_fls = fls64(count);
1487 nsec_fls = fls64(nsec);
1488 frequency_fls = fls64(frequency);
1489 sec_fls = 30;
1490
1491 /*
1492 * We got @count in @nsec, with a target of sample_freq HZ
1493 * the target period becomes:
1494 *
1495 * @count * 10^9
1496 * period = -------------------
1497 * @nsec * sample_freq
1498 *
1499 */
1500
1501 /*
1502 * Reduce accuracy by one bit such that @a and @b converge
1503 * to a similar magnitude.
1504 */
1505#define REDUCE_FLS(a, b) \
1506do { \
1507 if (a##_fls > b##_fls) { \
1508 a >>= 1; \
1509 a##_fls--; \
1510 } else { \
1511 b >>= 1; \
1512 b##_fls--; \
1513 } \
1514} while (0)
1515
1516 /*
1517 * Reduce accuracy until either term fits in a u64, then proceed with
1518 * the other, so that finally we can do a u64/u64 division.
1519 */
1520 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1521 REDUCE_FLS(nsec, frequency);
1522 REDUCE_FLS(sec, count);
1523 }
1524
1525 if (count_fls + sec_fls > 64) {
1526 divisor = nsec * frequency;
1527
1528 while (count_fls + sec_fls > 64) {
1529 REDUCE_FLS(count, sec);
1530 divisor >>= 1;
1531 }
1532
1533 dividend = count * sec;
1534 } else {
1535 dividend = count * sec;
1536
1537 while (nsec_fls + frequency_fls > 64) {
1538 REDUCE_FLS(nsec, frequency);
1539 dividend >>= 1;
1540 }
1541
1542 divisor = nsec * frequency;
1543 }
1544
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001545 if (!divisor)
1546 return dividend;
1547
Peter Zijlstraabd50712010-01-26 18:50:16 +01001548 return div64_u64(dividend, divisor);
1549}
1550
1551static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001552{
1553 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001554 s64 period, sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001555 s64 delta;
1556
Peter Zijlstraabd50712010-01-26 18:50:16 +01001557 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001558
1559 delta = (s64)(period - hwc->sample_period);
1560 delta = (delta + 7) / 8; /* low pass filter */
1561
1562 sample_period = hwc->sample_period + delta;
1563
1564 if (!sample_period)
1565 sample_period = 1;
1566
1567 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001568
Peter Zijlstrae7850592010-05-21 14:43:08 +02001569 if (local64_read(&hwc->period_left) > 8*sample_period) {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001570 event->pmu->stop(event, PERF_EF_UPDATE);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001571 local64_set(&hwc->period_left, 0);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001572 event->pmu->start(event, PERF_EF_RELOAD);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001573 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001574}
1575
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001576static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001577{
1578 struct perf_event *event;
1579 struct hw_perf_event *hwc;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001580 u64 interrupts, now;
1581 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001582
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001583 raw_spin_lock(&ctx->lock);
Paul Mackerras03541f82009-10-14 16:58:03 +11001584 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001585 if (event->state != PERF_EVENT_STATE_ACTIVE)
1586 continue;
1587
Peter Zijlstra5d27c232009-12-17 13:16:32 +01001588 if (event->cpu != -1 && event->cpu != smp_processor_id())
1589 continue;
1590
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001591 hwc = &event->hw;
1592
1593 interrupts = hwc->interrupts;
1594 hwc->interrupts = 0;
1595
1596 /*
1597 * unthrottle events on the tick
1598 */
1599 if (interrupts == MAX_INTERRUPTS) {
1600 perf_log_throttle(event, 1);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001601 event->pmu->start(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001602 }
1603
1604 if (!event->attr.freq || !event->attr.sample_freq)
1605 continue;
1606
Peter Zijlstraabd50712010-01-26 18:50:16 +01001607 event->pmu->read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001608 now = local64_read(&event->count);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001609 delta = now - hwc->freq_count_stamp;
1610 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001611
Peter Zijlstraabd50712010-01-26 18:50:16 +01001612 if (delta > 0)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001613 perf_adjust_period(event, period, delta);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001614 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001615 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001616}
1617
1618/*
1619 * Round-robin a context's events:
1620 */
1621static void rotate_ctx(struct perf_event_context *ctx)
1622{
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001623 raw_spin_lock(&ctx->lock);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001624
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001625 /* Rotate the first entry last of non-pinned groups */
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001626 list_rotate_left(&ctx->flexible_groups);
1627
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001628 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001629}
1630
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001631/*
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001632 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
1633 * because they're strictly cpu affine and rotate_start is called with IRQs
1634 * disabled, while rotate_context is called from IRQ context.
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001635 */
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001636static void perf_rotate_context(struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001637{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001638 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001639 struct perf_event_context *ctx = NULL;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001640 int rotate = 0, remove = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001641
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001642 if (cpuctx->ctx.nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001643 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001644 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1645 rotate = 1;
1646 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001647
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001648 ctx = cpuctx->task_ctx;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001649 if (ctx && ctx->nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001650 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001651 if (ctx->nr_events != ctx->nr_active)
1652 rotate = 1;
1653 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001654
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001655 perf_pmu_disable(cpuctx->ctx.pmu);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001656 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001657 if (ctx)
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001658 perf_ctx_adjust_freq(ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001659
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001660 if (!rotate)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001661 goto done;
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001662
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001663 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001664 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001665 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001666
1667 rotate_ctx(&cpuctx->ctx);
1668 if (ctx)
1669 rotate_ctx(ctx);
1670
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001671 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001672 if (ctx)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001673 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001674
1675done:
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001676 if (remove)
1677 list_del_init(&cpuctx->rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001678
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001679 perf_pmu_enable(cpuctx->ctx.pmu);
1680}
1681
1682void perf_event_task_tick(void)
1683{
1684 struct list_head *head = &__get_cpu_var(rotation_list);
1685 struct perf_cpu_context *cpuctx, *tmp;
1686
1687 WARN_ON(!irqs_disabled());
1688
1689 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
1690 if (cpuctx->jiffies_interval == 1 ||
1691 !(jiffies % cpuctx->jiffies_interval))
1692 perf_rotate_context(cpuctx);
1693 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001694}
1695
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001696static int event_enable_on_exec(struct perf_event *event,
1697 struct perf_event_context *ctx)
1698{
1699 if (!event->attr.enable_on_exec)
1700 return 0;
1701
1702 event->attr.enable_on_exec = 0;
1703 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1704 return 0;
1705
1706 __perf_event_mark_enabled(event, ctx);
1707
1708 return 1;
1709}
1710
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001711/*
1712 * Enable all of a task's events that have been marked enable-on-exec.
1713 * This expects task == current.
1714 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001715static void perf_event_enable_on_exec(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001716{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001717 struct perf_event *event;
1718 unsigned long flags;
1719 int enabled = 0;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001720 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001721
1722 local_irq_save(flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001723 if (!ctx || !ctx->nr_events)
1724 goto out;
1725
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001726 task_ctx_sched_out(ctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001727
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001728 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001729
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001730 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1731 ret = event_enable_on_exec(event, ctx);
1732 if (ret)
1733 enabled = 1;
1734 }
1735
1736 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1737 ret = event_enable_on_exec(event, ctx);
1738 if (ret)
1739 enabled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001740 }
1741
1742 /*
1743 * Unclone this context if we enabled any event.
1744 */
1745 if (enabled)
1746 unclone_ctx(ctx);
1747
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001748 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001749
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001750 perf_event_context_sched_in(ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001751out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001752 local_irq_restore(flags);
1753}
1754
1755/*
1756 * Cross CPU call to read the hardware event
1757 */
1758static void __perf_event_read(void *info)
1759{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001760 struct perf_event *event = info;
1761 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001762 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001763
1764 /*
1765 * If this is a task context, we need to check whether it is
1766 * the current task context of this cpu. If not it has been
1767 * scheduled out before the smp call arrived. In that case
1768 * event->count would have been updated to a recent sample
1769 * when the event was scheduled out.
1770 */
1771 if (ctx->task && cpuctx->task_ctx != ctx)
1772 return;
1773
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001774 raw_spin_lock(&ctx->lock);
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001775 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001776 update_event_times(event);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001777 raw_spin_unlock(&ctx->lock);
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001778
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001779 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001780}
1781
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001782static inline u64 perf_event_count(struct perf_event *event)
1783{
Peter Zijlstrae7850592010-05-21 14:43:08 +02001784 return local64_read(&event->count) + atomic64_read(&event->child_count);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001785}
1786
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001787static u64 perf_event_read(struct perf_event *event)
1788{
1789 /*
1790 * If event is enabled and currently active on a CPU, update the
1791 * value in the event structure:
1792 */
1793 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1794 smp_call_function_single(event->oncpu,
1795 __perf_event_read, event, 1);
1796 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001797 struct perf_event_context *ctx = event->ctx;
1798 unsigned long flags;
1799
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001800 raw_spin_lock_irqsave(&ctx->lock, flags);
Stephane Eranianc530ccd2010-10-15 15:26:01 +02001801 /*
1802 * may read while context is not active
1803 * (e.g., thread is blocked), in that case
1804 * we cannot update context time
1805 */
1806 if (ctx->is_active)
1807 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001808 update_event_times(event);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001809 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001810 }
1811
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001812 return perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001813}
1814
1815/*
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001816 * Callchain support
1817 */
1818
1819struct callchain_cpus_entries {
1820 struct rcu_head rcu_head;
1821 struct perf_callchain_entry *cpu_entries[0];
1822};
1823
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02001824static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001825static atomic_t nr_callchain_events;
1826static DEFINE_MUTEX(callchain_mutex);
1827struct callchain_cpus_entries *callchain_cpus_entries;
1828
1829
1830__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
1831 struct pt_regs *regs)
1832{
1833}
1834
1835__weak void perf_callchain_user(struct perf_callchain_entry *entry,
1836 struct pt_regs *regs)
1837{
1838}
1839
1840static void release_callchain_buffers_rcu(struct rcu_head *head)
1841{
1842 struct callchain_cpus_entries *entries;
1843 int cpu;
1844
1845 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
1846
1847 for_each_possible_cpu(cpu)
1848 kfree(entries->cpu_entries[cpu]);
1849
1850 kfree(entries);
1851}
1852
1853static void release_callchain_buffers(void)
1854{
1855 struct callchain_cpus_entries *entries;
1856
1857 entries = callchain_cpus_entries;
1858 rcu_assign_pointer(callchain_cpus_entries, NULL);
1859 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
1860}
1861
1862static int alloc_callchain_buffers(void)
1863{
1864 int cpu;
1865 int size;
1866 struct callchain_cpus_entries *entries;
1867
1868 /*
1869 * We can't use the percpu allocation API for data that can be
1870 * accessed from NMI. Use a temporary manual per cpu allocation
1871 * until that gets sorted out.
1872 */
1873 size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
1874 num_possible_cpus();
1875
1876 entries = kzalloc(size, GFP_KERNEL);
1877 if (!entries)
1878 return -ENOMEM;
1879
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02001880 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001881
1882 for_each_possible_cpu(cpu) {
1883 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
1884 cpu_to_node(cpu));
1885 if (!entries->cpu_entries[cpu])
1886 goto fail;
1887 }
1888
1889 rcu_assign_pointer(callchain_cpus_entries, entries);
1890
1891 return 0;
1892
1893fail:
1894 for_each_possible_cpu(cpu)
1895 kfree(entries->cpu_entries[cpu]);
1896 kfree(entries);
1897
1898 return -ENOMEM;
1899}
1900
1901static int get_callchain_buffers(void)
1902{
1903 int err = 0;
1904 int count;
1905
1906 mutex_lock(&callchain_mutex);
1907
1908 count = atomic_inc_return(&nr_callchain_events);
1909 if (WARN_ON_ONCE(count < 1)) {
1910 err = -EINVAL;
1911 goto exit;
1912 }
1913
1914 if (count > 1) {
1915 /* If the allocation failed, give up */
1916 if (!callchain_cpus_entries)
1917 err = -ENOMEM;
1918 goto exit;
1919 }
1920
1921 err = alloc_callchain_buffers();
1922 if (err)
1923 release_callchain_buffers();
1924exit:
1925 mutex_unlock(&callchain_mutex);
1926
1927 return err;
1928}
1929
1930static void put_callchain_buffers(void)
1931{
1932 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
1933 release_callchain_buffers();
1934 mutex_unlock(&callchain_mutex);
1935 }
1936}
1937
1938static int get_recursion_context(int *recursion)
1939{
1940 int rctx;
1941
1942 if (in_nmi())
1943 rctx = 3;
1944 else if (in_irq())
1945 rctx = 2;
1946 else if (in_softirq())
1947 rctx = 1;
1948 else
1949 rctx = 0;
1950
1951 if (recursion[rctx])
1952 return -1;
1953
1954 recursion[rctx]++;
1955 barrier();
1956
1957 return rctx;
1958}
1959
1960static inline void put_recursion_context(int *recursion, int rctx)
1961{
1962 barrier();
1963 recursion[rctx]--;
1964}
1965
1966static struct perf_callchain_entry *get_callchain_entry(int *rctx)
1967{
1968 int cpu;
1969 struct callchain_cpus_entries *entries;
1970
1971 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
1972 if (*rctx == -1)
1973 return NULL;
1974
1975 entries = rcu_dereference(callchain_cpus_entries);
1976 if (!entries)
1977 return NULL;
1978
1979 cpu = smp_processor_id();
1980
1981 return &entries->cpu_entries[cpu][*rctx];
1982}
1983
1984static void
1985put_callchain_entry(int rctx)
1986{
1987 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
1988}
1989
1990static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1991{
1992 int rctx;
1993 struct perf_callchain_entry *entry;
1994
1995
1996 entry = get_callchain_entry(&rctx);
1997 if (rctx == -1)
1998 return NULL;
1999
2000 if (!entry)
2001 goto exit_put;
2002
2003 entry->nr = 0;
2004
2005 if (!user_mode(regs)) {
2006 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
2007 perf_callchain_kernel(entry, regs);
2008 if (current->mm)
2009 regs = task_pt_regs(current);
2010 else
2011 regs = NULL;
2012 }
2013
2014 if (regs) {
2015 perf_callchain_store(entry, PERF_CONTEXT_USER);
2016 perf_callchain_user(entry, regs);
2017 }
2018
2019exit_put:
2020 put_callchain_entry(rctx);
2021
2022 return entry;
2023}
2024
2025/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002026 * Initialize the perf_event context in a task_struct:
2027 */
Peter Zijlstraeb184472010-09-07 15:55:13 +02002028static void __perf_event_init_context(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002029{
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002030 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002031 mutex_init(&ctx->mutex);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002032 INIT_LIST_HEAD(&ctx->pinned_groups);
2033 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002034 INIT_LIST_HEAD(&ctx->event_list);
2035 atomic_set(&ctx->refcount, 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002036}
2037
Peter Zijlstraeb184472010-09-07 15:55:13 +02002038static struct perf_event_context *
2039alloc_perf_context(struct pmu *pmu, struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002040{
2041 struct perf_event_context *ctx;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002042
2043 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2044 if (!ctx)
2045 return NULL;
2046
2047 __perf_event_init_context(ctx);
2048 if (task) {
2049 ctx->task = task;
2050 get_task_struct(task);
2051 }
2052 ctx->pmu = pmu;
2053
2054 return ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002055}
2056
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002057static struct task_struct *
2058find_lively_task_by_vpid(pid_t vpid)
2059{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002060 struct task_struct *task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002061 int err;
2062
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002063 rcu_read_lock();
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002064 if (!vpid)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002065 task = current;
2066 else
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002067 task = find_task_by_vpid(vpid);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002068 if (task)
2069 get_task_struct(task);
2070 rcu_read_unlock();
2071
2072 if (!task)
2073 return ERR_PTR(-ESRCH);
2074
2075 /*
2076 * Can't attach events to a dying task.
2077 */
2078 err = -ESRCH;
2079 if (task->flags & PF_EXITING)
2080 goto errout;
2081
2082 /* Reuse ptrace permission checks for now. */
2083 err = -EACCES;
2084 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2085 goto errout;
2086
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002087 return task;
2088errout:
2089 put_task_struct(task);
2090 return ERR_PTR(err);
2091
2092}
2093
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002094static struct perf_event_context *
Matt Helsley38a81da2010-09-13 13:01:20 -07002095find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002096{
2097 struct perf_event_context *ctx;
2098 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002099 unsigned long flags;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002100 int ctxn, err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002101
Matt Helsley38a81da2010-09-13 13:01:20 -07002102 if (!task && cpu != -1) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002103 /* Must be root to operate on a CPU event: */
2104 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2105 return ERR_PTR(-EACCES);
2106
2107 if (cpu < 0 || cpu >= nr_cpumask_bits)
2108 return ERR_PTR(-EINVAL);
2109
2110 /*
2111 * We could be clever and allow to attach a event to an
2112 * offline CPU and activate it when the CPU comes up, but
2113 * that's for later.
2114 */
2115 if (!cpu_online(cpu))
2116 return ERR_PTR(-ENODEV);
2117
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002118 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002119 ctx = &cpuctx->ctx;
2120 get_ctx(ctx);
2121
2122 return ctx;
2123 }
2124
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002125 err = -EINVAL;
2126 ctxn = pmu->task_ctx_nr;
2127 if (ctxn < 0)
2128 goto errout;
2129
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002130retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002131 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002132 if (ctx) {
2133 unclone_ctx(ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002134 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002135 }
2136
2137 if (!ctx) {
Peter Zijlstraeb184472010-09-07 15:55:13 +02002138 ctx = alloc_perf_context(pmu, task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002139 err = -ENOMEM;
2140 if (!ctx)
2141 goto errout;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002142
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002143 get_ctx(ctx);
Peter Zijlstraeb184472010-09-07 15:55:13 +02002144
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002145 if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002146 /*
2147 * We raced with some other task; use
2148 * the context they set.
2149 */
Peter Zijlstraeb184472010-09-07 15:55:13 +02002150 put_task_struct(task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002151 kfree(ctx);
2152 goto retry;
2153 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002154 }
2155
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002156 return ctx;
2157
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002158errout:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002159 return ERR_PTR(err);
2160}
2161
Li Zefan6fb29152009-10-15 11:21:42 +08002162static void perf_event_free_filter(struct perf_event *event);
2163
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002164static void free_event_rcu(struct rcu_head *head)
2165{
2166 struct perf_event *event;
2167
2168 event = container_of(head, struct perf_event, rcu_head);
2169 if (event->ns)
2170 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08002171 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002172 kfree(event);
2173}
2174
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002175static void perf_buffer_put(struct perf_buffer *buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002176
2177static void free_event(struct perf_event *event)
2178{
Peter Zijlstrae360adb2010-10-14 14:01:34 +08002179 irq_work_sync(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002180
2181 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02002182 if (event->attach_state & PERF_ATTACH_TASK)
2183 jump_label_dec(&perf_task_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01002184 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002185 atomic_dec(&nr_mmap_events);
2186 if (event->attr.comm)
2187 atomic_dec(&nr_comm_events);
2188 if (event->attr.task)
2189 atomic_dec(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002190 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2191 put_callchain_buffers();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002192 }
2193
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002194 if (event->buffer) {
2195 perf_buffer_put(event->buffer);
2196 event->buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002197 }
2198
2199 if (event->destroy)
2200 event->destroy(event);
2201
Peter Zijlstra0c67b402010-09-13 11:15:58 +02002202 if (event->ctx)
2203 put_ctx(event->ctx);
2204
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002205 call_rcu(&event->rcu_head, free_event_rcu);
2206}
2207
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002208int perf_event_release_kernel(struct perf_event *event)
2209{
2210 struct perf_event_context *ctx = event->ctx;
2211
Peter Zijlstra050735b2010-05-11 11:51:53 +02002212 /*
2213 * Remove from the PMU, can't get re-enabled since we got
2214 * here because the last ref went.
2215 */
2216 perf_event_disable(event);
2217
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002218 WARN_ON_ONCE(ctx->parent_ctx);
Peter Zijlstraa0507c82010-05-06 15:42:53 +02002219 /*
2220 * There are two ways this annotation is useful:
2221 *
2222 * 1) there is a lock recursion from perf_event_exit_task
2223 * see the comment there.
2224 *
2225 * 2) there is a lock-inversion with mmap_sem through
2226 * perf_event_read_group(), which takes faults while
2227 * holding ctx->mutex, however this is called after
2228 * the last filedesc died, so there is no possibility
2229 * to trigger the AB-BA case.
2230 */
2231 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002232 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra8a495422010-05-27 15:47:49 +02002233 perf_group_detach(event);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002234 list_del_event(event, ctx);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002235 raw_spin_unlock_irq(&ctx->lock);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002236 mutex_unlock(&ctx->mutex);
2237
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002238 free_event(event);
2239
2240 return 0;
2241}
2242EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2243
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002244/*
2245 * Called when the last reference to the file is gone.
2246 */
2247static int perf_release(struct inode *inode, struct file *file)
2248{
2249 struct perf_event *event = file->private_data;
Peter Zijlstra88821352010-11-09 19:01:43 +01002250 struct task_struct *owner;
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002251
2252 file->private_data = NULL;
2253
Peter Zijlstra88821352010-11-09 19:01:43 +01002254 rcu_read_lock();
2255 owner = ACCESS_ONCE(event->owner);
2256 /*
2257 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2258 * !owner it means the list deletion is complete and we can indeed
2259 * free this event, otherwise we need to serialize on
2260 * owner->perf_event_mutex.
2261 */
2262 smp_read_barrier_depends();
2263 if (owner) {
2264 /*
2265 * Since delayed_put_task_struct() also drops the last
2266 * task reference we can safely take a new reference
2267 * while holding the rcu_read_lock().
2268 */
2269 get_task_struct(owner);
2270 }
2271 rcu_read_unlock();
2272
2273 if (owner) {
2274 mutex_lock(&owner->perf_event_mutex);
2275 /*
2276 * We have to re-check the event->owner field, if it is cleared
2277 * we raced with perf_event_exit_task(), acquiring the mutex
2278 * ensured they're done, and we can proceed with freeing the
2279 * event.
2280 */
2281 if (event->owner)
2282 list_del_init(&event->owner_entry);
2283 mutex_unlock(&owner->perf_event_mutex);
2284 put_task_struct(owner);
2285 }
2286
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002287 return perf_event_release_kernel(event);
2288}
2289
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002290static int perf_event_read_size(struct perf_event *event)
2291{
2292 int entry = sizeof(u64); /* value */
2293 int size = 0;
2294 int nr = 1;
2295
2296 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2297 size += sizeof(u64);
2298
2299 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2300 size += sizeof(u64);
2301
2302 if (event->attr.read_format & PERF_FORMAT_ID)
2303 entry += sizeof(u64);
2304
2305 if (event->attr.read_format & PERF_FORMAT_GROUP) {
2306 nr += event->group_leader->nr_siblings;
2307 size += sizeof(u64);
2308 }
2309
2310 size += entry * nr;
2311
2312 return size;
2313}
2314
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002315u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002316{
2317 struct perf_event *child;
2318 u64 total = 0;
2319
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002320 *enabled = 0;
2321 *running = 0;
2322
Peter Zijlstra6f105812009-11-20 22:19:56 +01002323 mutex_lock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002324 total += perf_event_read(event);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002325 *enabled += event->total_time_enabled +
2326 atomic64_read(&event->child_total_time_enabled);
2327 *running += event->total_time_running +
2328 atomic64_read(&event->child_total_time_running);
2329
2330 list_for_each_entry(child, &event->child_list, child_list) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002331 total += perf_event_read(child);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002332 *enabled += child->total_time_enabled;
2333 *running += child->total_time_running;
2334 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002335 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002336
2337 return total;
2338}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002339EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002340
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002341static int perf_event_read_group(struct perf_event *event,
2342 u64 read_format, char __user *buf)
2343{
2344 struct perf_event *leader = event->group_leader, *sub;
Peter Zijlstra6f105812009-11-20 22:19:56 +01002345 int n = 0, size = 0, ret = -EFAULT;
2346 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002347 u64 values[5];
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002348 u64 count, enabled, running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002349
Peter Zijlstra6f105812009-11-20 22:19:56 +01002350 mutex_lock(&ctx->mutex);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002351 count = perf_event_read_value(leader, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002352
2353 values[n++] = 1 + leader->nr_siblings;
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002354 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2355 values[n++] = enabled;
2356 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2357 values[n++] = running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002358 values[n++] = count;
2359 if (read_format & PERF_FORMAT_ID)
2360 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002361
2362 size = n * sizeof(u64);
2363
2364 if (copy_to_user(buf, values, size))
Peter Zijlstra6f105812009-11-20 22:19:56 +01002365 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002366
Peter Zijlstra6f105812009-11-20 22:19:56 +01002367 ret = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002368
2369 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstraabf48682009-11-20 22:19:49 +01002370 n = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002371
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002372 values[n++] = perf_event_read_value(sub, &enabled, &running);
Peter Zijlstraabf48682009-11-20 22:19:49 +01002373 if (read_format & PERF_FORMAT_ID)
2374 values[n++] = primary_event_id(sub);
2375
2376 size = n * sizeof(u64);
2377
Stephane Eranian184d3da2009-11-23 21:40:49 -08002378 if (copy_to_user(buf + ret, values, size)) {
Peter Zijlstra6f105812009-11-20 22:19:56 +01002379 ret = -EFAULT;
2380 goto unlock;
2381 }
Peter Zijlstraabf48682009-11-20 22:19:49 +01002382
2383 ret += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002384 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002385unlock:
2386 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002387
Peter Zijlstraabf48682009-11-20 22:19:49 +01002388 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002389}
2390
2391static int perf_event_read_one(struct perf_event *event,
2392 u64 read_format, char __user *buf)
2393{
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002394 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002395 u64 values[4];
2396 int n = 0;
2397
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002398 values[n++] = perf_event_read_value(event, &enabled, &running);
2399 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2400 values[n++] = enabled;
2401 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2402 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002403 if (read_format & PERF_FORMAT_ID)
2404 values[n++] = primary_event_id(event);
2405
2406 if (copy_to_user(buf, values, n * sizeof(u64)))
2407 return -EFAULT;
2408
2409 return n * sizeof(u64);
2410}
2411
2412/*
2413 * Read the performance event - simple non blocking version for now
2414 */
2415static ssize_t
2416perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2417{
2418 u64 read_format = event->attr.read_format;
2419 int ret;
2420
2421 /*
2422 * Return end-of-file for a read on a event that is in
2423 * error state (i.e. because it was pinned but it couldn't be
2424 * scheduled on to the CPU at some point).
2425 */
2426 if (event->state == PERF_EVENT_STATE_ERROR)
2427 return 0;
2428
2429 if (count < perf_event_read_size(event))
2430 return -ENOSPC;
2431
2432 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002433 if (read_format & PERF_FORMAT_GROUP)
2434 ret = perf_event_read_group(event, read_format, buf);
2435 else
2436 ret = perf_event_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002437
2438 return ret;
2439}
2440
2441static ssize_t
2442perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2443{
2444 struct perf_event *event = file->private_data;
2445
2446 return perf_read_hw(event, buf, count);
2447}
2448
2449static unsigned int perf_poll(struct file *file, poll_table *wait)
2450{
2451 struct perf_event *event = file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002452 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002453 unsigned int events = POLL_HUP;
2454
2455 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002456 buffer = rcu_dereference(event->buffer);
2457 if (buffer)
2458 events = atomic_xchg(&buffer->poll, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002459 rcu_read_unlock();
2460
2461 poll_wait(file, &event->waitq, wait);
2462
2463 return events;
2464}
2465
2466static void perf_event_reset(struct perf_event *event)
2467{
2468 (void)perf_event_read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02002469 local64_set(&event->count, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002470 perf_event_update_userpage(event);
2471}
2472
2473/*
2474 * Holding the top-level event's child_mutex means that any
2475 * descendant process that has inherited this event will block
2476 * in sync_child_event if it goes to exit, thus satisfying the
2477 * task existence requirements of perf_event_enable/disable.
2478 */
2479static void perf_event_for_each_child(struct perf_event *event,
2480 void (*func)(struct perf_event *))
2481{
2482 struct perf_event *child;
2483
2484 WARN_ON_ONCE(event->ctx->parent_ctx);
2485 mutex_lock(&event->child_mutex);
2486 func(event);
2487 list_for_each_entry(child, &event->child_list, child_list)
2488 func(child);
2489 mutex_unlock(&event->child_mutex);
2490}
2491
2492static void perf_event_for_each(struct perf_event *event,
2493 void (*func)(struct perf_event *))
2494{
2495 struct perf_event_context *ctx = event->ctx;
2496 struct perf_event *sibling;
2497
2498 WARN_ON_ONCE(ctx->parent_ctx);
2499 mutex_lock(&ctx->mutex);
2500 event = event->group_leader;
2501
2502 perf_event_for_each_child(event, func);
2503 func(event);
2504 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2505 perf_event_for_each_child(event, func);
2506 mutex_unlock(&ctx->mutex);
2507}
2508
2509static int perf_event_period(struct perf_event *event, u64 __user *arg)
2510{
2511 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002512 int ret = 0;
2513 u64 value;
2514
2515 if (!event->attr.sample_period)
2516 return -EINVAL;
2517
John Blackwoodad0cf342010-09-28 18:03:11 -04002518 if (copy_from_user(&value, arg, sizeof(value)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002519 return -EFAULT;
2520
2521 if (!value)
2522 return -EINVAL;
2523
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002524 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002525 if (event->attr.freq) {
2526 if (value > sysctl_perf_event_sample_rate) {
2527 ret = -EINVAL;
2528 goto unlock;
2529 }
2530
2531 event->attr.sample_freq = value;
2532 } else {
2533 event->attr.sample_period = value;
2534 event->hw.sample_period = value;
2535 }
2536unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002537 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002538
2539 return ret;
2540}
2541
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002542static const struct file_operations perf_fops;
2543
2544static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2545{
2546 struct file *file;
2547
2548 file = fget_light(fd, fput_needed);
2549 if (!file)
2550 return ERR_PTR(-EBADF);
2551
2552 if (file->f_op != &perf_fops) {
2553 fput_light(file, *fput_needed);
2554 *fput_needed = 0;
2555 return ERR_PTR(-EBADF);
2556 }
2557
2558 return file->private_data;
2559}
2560
2561static int perf_event_set_output(struct perf_event *event,
2562 struct perf_event *output_event);
Li Zefan6fb29152009-10-15 11:21:42 +08002563static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002564
2565static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2566{
2567 struct perf_event *event = file->private_data;
2568 void (*func)(struct perf_event *);
2569 u32 flags = arg;
2570
2571 switch (cmd) {
2572 case PERF_EVENT_IOC_ENABLE:
2573 func = perf_event_enable;
2574 break;
2575 case PERF_EVENT_IOC_DISABLE:
2576 func = perf_event_disable;
2577 break;
2578 case PERF_EVENT_IOC_RESET:
2579 func = perf_event_reset;
2580 break;
2581
2582 case PERF_EVENT_IOC_REFRESH:
2583 return perf_event_refresh(event, arg);
2584
2585 case PERF_EVENT_IOC_PERIOD:
2586 return perf_event_period(event, (u64 __user *)arg);
2587
2588 case PERF_EVENT_IOC_SET_OUTPUT:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002589 {
2590 struct perf_event *output_event = NULL;
2591 int fput_needed = 0;
2592 int ret;
2593
2594 if (arg != -1) {
2595 output_event = perf_fget_light(arg, &fput_needed);
2596 if (IS_ERR(output_event))
2597 return PTR_ERR(output_event);
2598 }
2599
2600 ret = perf_event_set_output(event, output_event);
2601 if (output_event)
2602 fput_light(output_event->filp, fput_needed);
2603
2604 return ret;
2605 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002606
Li Zefan6fb29152009-10-15 11:21:42 +08002607 case PERF_EVENT_IOC_SET_FILTER:
2608 return perf_event_set_filter(event, (void __user *)arg);
2609
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002610 default:
2611 return -ENOTTY;
2612 }
2613
2614 if (flags & PERF_IOC_FLAG_GROUP)
2615 perf_event_for_each(event, func);
2616 else
2617 perf_event_for_each_child(event, func);
2618
2619 return 0;
2620}
2621
2622int perf_event_task_enable(void)
2623{
2624 struct perf_event *event;
2625
2626 mutex_lock(&current->perf_event_mutex);
2627 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2628 perf_event_for_each_child(event, perf_event_enable);
2629 mutex_unlock(&current->perf_event_mutex);
2630
2631 return 0;
2632}
2633
2634int perf_event_task_disable(void)
2635{
2636 struct perf_event *event;
2637
2638 mutex_lock(&current->perf_event_mutex);
2639 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2640 perf_event_for_each_child(event, perf_event_disable);
2641 mutex_unlock(&current->perf_event_mutex);
2642
2643 return 0;
2644}
2645
2646#ifndef PERF_EVENT_INDEX_OFFSET
2647# define PERF_EVENT_INDEX_OFFSET 0
2648#endif
2649
2650static int perf_event_index(struct perf_event *event)
2651{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002652 if (event->hw.state & PERF_HES_STOPPED)
2653 return 0;
2654
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002655 if (event->state != PERF_EVENT_STATE_ACTIVE)
2656 return 0;
2657
2658 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2659}
2660
2661/*
2662 * Callers need to ensure there can be no nesting of this function, otherwise
2663 * the seqlock logic goes bad. We can not serialize this because the arch
2664 * code calls this from NMI context.
2665 */
2666void perf_event_update_userpage(struct perf_event *event)
2667{
2668 struct perf_event_mmap_page *userpg;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002669 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002670
2671 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002672 buffer = rcu_dereference(event->buffer);
2673 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002674 goto unlock;
2675
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002676 userpg = buffer->user_page;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002677
2678 /*
2679 * Disable preemption so as to not let the corresponding user-space
2680 * spin too long if we get preempted.
2681 */
2682 preempt_disable();
2683 ++userpg->lock;
2684 barrier();
2685 userpg->index = perf_event_index(event);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02002686 userpg->offset = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002687 if (event->state == PERF_EVENT_STATE_ACTIVE)
Peter Zijlstrae7850592010-05-21 14:43:08 +02002688 userpg->offset -= local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002689
2690 userpg->time_enabled = event->total_time_enabled +
2691 atomic64_read(&event->child_total_time_enabled);
2692
2693 userpg->time_running = event->total_time_running +
2694 atomic64_read(&event->child_total_time_running);
2695
2696 barrier();
2697 ++userpg->lock;
2698 preempt_enable();
2699unlock:
2700 rcu_read_unlock();
2701}
2702
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002703static unsigned long perf_data_size(struct perf_buffer *buffer);
2704
2705static void
2706perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2707{
2708 long max_size = perf_data_size(buffer);
2709
2710 if (watermark)
2711 buffer->watermark = min(max_size, watermark);
2712
2713 if (!buffer->watermark)
2714 buffer->watermark = max_size / 2;
2715
2716 if (flags & PERF_BUFFER_WRITABLE)
2717 buffer->writable = 1;
2718
2719 atomic_set(&buffer->refcount, 1);
2720}
2721
Peter Zijlstra906010b2009-09-21 16:08:49 +02002722#ifndef CONFIG_PERF_USE_VMALLOC
2723
2724/*
2725 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2726 */
2727
2728static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002729perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002730{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002731 if (pgoff > buffer->nr_pages)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002732 return NULL;
2733
2734 if (pgoff == 0)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002735 return virt_to_page(buffer->user_page);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002736
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002737 return virt_to_page(buffer->data_pages[pgoff - 1]);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002738}
2739
Peter Zijlstraa19d35c2010-05-17 18:48:00 +02002740static void *perf_mmap_alloc_page(int cpu)
2741{
2742 struct page *page;
2743 int node;
2744
2745 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2746 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2747 if (!page)
2748 return NULL;
2749
2750 return page_address(page);
2751}
2752
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002753static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002754perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002755{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002756 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002757 unsigned long size;
2758 int i;
2759
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002760 size = sizeof(struct perf_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002761 size += nr_pages * sizeof(void *);
2762
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002763 buffer = kzalloc(size, GFP_KERNEL);
2764 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002765 goto fail;
2766
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002767 buffer->user_page = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002768 if (!buffer->user_page)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002769 goto fail_user_page;
2770
2771 for (i = 0; i < nr_pages; i++) {
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002772 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002773 if (!buffer->data_pages[i])
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002774 goto fail_data_pages;
2775 }
2776
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002777 buffer->nr_pages = nr_pages;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002778
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002779 perf_buffer_init(buffer, watermark, flags);
2780
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002781 return buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002782
2783fail_data_pages:
2784 for (i--; i >= 0; i--)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002785 free_page((unsigned long)buffer->data_pages[i]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002786
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002787 free_page((unsigned long)buffer->user_page);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002788
2789fail_user_page:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002790 kfree(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002791
2792fail:
Peter Zijlstra906010b2009-09-21 16:08:49 +02002793 return NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002794}
2795
2796static void perf_mmap_free_page(unsigned long addr)
2797{
2798 struct page *page = virt_to_page((void *)addr);
2799
2800 page->mapping = NULL;
2801 __free_page(page);
2802}
2803
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002804static void perf_buffer_free(struct perf_buffer *buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002805{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002806 int i;
2807
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002808 perf_mmap_free_page((unsigned long)buffer->user_page);
2809 for (i = 0; i < buffer->nr_pages; i++)
2810 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2811 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002812}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002813
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002814static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002815{
2816 return 0;
2817}
2818
Peter Zijlstra906010b2009-09-21 16:08:49 +02002819#else
2820
2821/*
2822 * Back perf_mmap() with vmalloc memory.
2823 *
2824 * Required for architectures that have d-cache aliasing issues.
2825 */
2826
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002827static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002828{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002829 return buffer->page_order;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002830}
2831
Peter Zijlstra906010b2009-09-21 16:08:49 +02002832static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002833perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002834{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002835 if (pgoff > (1UL << page_order(buffer)))
Peter Zijlstra906010b2009-09-21 16:08:49 +02002836 return NULL;
2837
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002838 return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002839}
2840
2841static void perf_mmap_unmark_page(void *addr)
2842{
2843 struct page *page = vmalloc_to_page(addr);
2844
2845 page->mapping = NULL;
2846}
2847
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002848static void perf_buffer_free_work(struct work_struct *work)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002849{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002850 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002851 void *base;
2852 int i, nr;
2853
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002854 buffer = container_of(work, struct perf_buffer, work);
2855 nr = 1 << page_order(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002856
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002857 base = buffer->user_page;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002858 for (i = 0; i < nr + 1; i++)
2859 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2860
2861 vfree(base);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002862 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002863}
2864
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002865static void perf_buffer_free(struct perf_buffer *buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002866{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002867 schedule_work(&buffer->work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002868}
2869
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002870static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002871perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002872{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002873 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002874 unsigned long size;
2875 void *all_buf;
2876
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002877 size = sizeof(struct perf_buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002878 size += sizeof(void *);
2879
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002880 buffer = kzalloc(size, GFP_KERNEL);
2881 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002882 goto fail;
2883
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002884 INIT_WORK(&buffer->work, perf_buffer_free_work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002885
2886 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2887 if (!all_buf)
2888 goto fail_all_buf;
2889
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002890 buffer->user_page = all_buf;
2891 buffer->data_pages[0] = all_buf + PAGE_SIZE;
2892 buffer->page_order = ilog2(nr_pages);
2893 buffer->nr_pages = 1;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002894
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002895 perf_buffer_init(buffer, watermark, flags);
2896
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002897 return buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002898
2899fail_all_buf:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002900 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002901
2902fail:
2903 return NULL;
2904}
2905
2906#endif
2907
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002908static unsigned long perf_data_size(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002909{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002910 return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002911}
2912
Peter Zijlstra906010b2009-09-21 16:08:49 +02002913static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2914{
2915 struct perf_event *event = vma->vm_file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002916 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002917 int ret = VM_FAULT_SIGBUS;
2918
2919 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2920 if (vmf->pgoff == 0)
2921 ret = 0;
2922 return ret;
2923 }
2924
2925 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002926 buffer = rcu_dereference(event->buffer);
2927 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002928 goto unlock;
2929
2930 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2931 goto unlock;
2932
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002933 vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002934 if (!vmf->page)
2935 goto unlock;
2936
2937 get_page(vmf->page);
2938 vmf->page->mapping = vma->vm_file->f_mapping;
2939 vmf->page->index = vmf->pgoff;
2940
2941 ret = 0;
2942unlock:
2943 rcu_read_unlock();
2944
2945 return ret;
2946}
2947
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002948static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002949{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002950 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002951
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002952 buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
2953 perf_buffer_free(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002954}
2955
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002956static struct perf_buffer *perf_buffer_get(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002957{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002958 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002959
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002960 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002961 buffer = rcu_dereference(event->buffer);
2962 if (buffer) {
2963 if (!atomic_inc_not_zero(&buffer->refcount))
2964 buffer = NULL;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002965 }
2966 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002967
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002968 return buffer;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002969}
2970
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002971static void perf_buffer_put(struct perf_buffer *buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002972{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002973 if (!atomic_dec_and_test(&buffer->refcount))
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002974 return;
2975
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002976 call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002977}
2978
2979static void perf_mmap_open(struct vm_area_struct *vma)
2980{
2981 struct perf_event *event = vma->vm_file->private_data;
2982
2983 atomic_inc(&event->mmap_count);
2984}
2985
2986static void perf_mmap_close(struct vm_area_struct *vma)
2987{
2988 struct perf_event *event = vma->vm_file->private_data;
2989
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002990 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002991 unsigned long size = perf_data_size(event->buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002992 struct user_struct *user = event->mmap_user;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002993 struct perf_buffer *buffer = event->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002994
Peter Zijlstra906010b2009-09-21 16:08:49 +02002995 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002996 vma->vm_mm->locked_vm -= event->mmap_locked;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002997 rcu_assign_pointer(event->buffer, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002998 mutex_unlock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002999
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003000 perf_buffer_put(buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003001 free_uid(user);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003002 }
3003}
3004
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003005static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003006 .open = perf_mmap_open,
3007 .close = perf_mmap_close,
3008 .fault = perf_mmap_fault,
3009 .page_mkwrite = perf_mmap_fault,
3010};
3011
3012static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3013{
3014 struct perf_event *event = file->private_data;
3015 unsigned long user_locked, user_lock_limit;
3016 struct user_struct *user = current_user();
3017 unsigned long locked, lock_limit;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003018 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003019 unsigned long vma_size;
3020 unsigned long nr_pages;
3021 long user_extra, extra;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003022 int ret = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003023
Peter Zijlstrac7920612010-05-18 10:33:24 +02003024 /*
3025 * Don't allow mmap() of inherited per-task counters. This would
3026 * create a performance issue due to all children writing to the
3027 * same buffer.
3028 */
3029 if (event->cpu == -1 && event->attr.inherit)
3030 return -EINVAL;
3031
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003032 if (!(vma->vm_flags & VM_SHARED))
3033 return -EINVAL;
3034
3035 vma_size = vma->vm_end - vma->vm_start;
3036 nr_pages = (vma_size / PAGE_SIZE) - 1;
3037
3038 /*
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003039 * If we have buffer pages ensure they're a power-of-two number, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003040 * can do bitmasks instead of modulo.
3041 */
3042 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3043 return -EINVAL;
3044
3045 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3046 return -EINVAL;
3047
3048 if (vma->vm_pgoff != 0)
3049 return -EINVAL;
3050
3051 WARN_ON_ONCE(event->ctx->parent_ctx);
3052 mutex_lock(&event->mmap_mutex);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003053 if (event->buffer) {
3054 if (event->buffer->nr_pages == nr_pages)
3055 atomic_inc(&event->buffer->refcount);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003056 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003057 ret = -EINVAL;
3058 goto unlock;
3059 }
3060
3061 user_extra = nr_pages + 1;
3062 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3063
3064 /*
3065 * Increase the limit linearly with more CPUs:
3066 */
3067 user_lock_limit *= num_online_cpus();
3068
3069 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3070
3071 extra = 0;
3072 if (user_locked > user_lock_limit)
3073 extra = user_locked - user_lock_limit;
3074
Jiri Slaby78d7d402010-03-05 13:42:54 -08003075 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003076 lock_limit >>= PAGE_SHIFT;
3077 locked = vma->vm_mm->locked_vm + extra;
3078
3079 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3080 !capable(CAP_IPC_LOCK)) {
3081 ret = -EPERM;
3082 goto unlock;
3083 }
3084
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003085 WARN_ON(event->buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003086
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003087 if (vma->vm_flags & VM_WRITE)
3088 flags |= PERF_BUFFER_WRITABLE;
3089
3090 buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
3091 event->cpu, flags);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003092 if (!buffer) {
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003093 ret = -ENOMEM;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003094 goto unlock;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003095 }
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003096 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003097
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003098 atomic_long_add(user_extra, &user->locked_vm);
3099 event->mmap_locked = extra;
3100 event->mmap_user = get_current_user();
3101 vma->vm_mm->locked_vm += event->mmap_locked;
3102
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003103unlock:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003104 if (!ret)
3105 atomic_inc(&event->mmap_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003106 mutex_unlock(&event->mmap_mutex);
3107
3108 vma->vm_flags |= VM_RESERVED;
3109 vma->vm_ops = &perf_mmap_vmops;
3110
3111 return ret;
3112}
3113
3114static int perf_fasync(int fd, struct file *filp, int on)
3115{
3116 struct inode *inode = filp->f_path.dentry->d_inode;
3117 struct perf_event *event = filp->private_data;
3118 int retval;
3119
3120 mutex_lock(&inode->i_mutex);
3121 retval = fasync_helper(fd, filp, on, &event->fasync);
3122 mutex_unlock(&inode->i_mutex);
3123
3124 if (retval < 0)
3125 return retval;
3126
3127 return 0;
3128}
3129
3130static const struct file_operations perf_fops = {
Arnd Bergmann3326c1c2010-03-23 19:09:33 +01003131 .llseek = no_llseek,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003132 .release = perf_release,
3133 .read = perf_read,
3134 .poll = perf_poll,
3135 .unlocked_ioctl = perf_ioctl,
3136 .compat_ioctl = perf_ioctl,
3137 .mmap = perf_mmap,
3138 .fasync = perf_fasync,
3139};
3140
3141/*
3142 * Perf event wakeup
3143 *
3144 * If there's data, ensure we set the poll() state and publish everything
3145 * to user-space before waking everybody up.
3146 */
3147
3148void perf_event_wakeup(struct perf_event *event)
3149{
3150 wake_up_all(&event->waitq);
3151
3152 if (event->pending_kill) {
3153 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3154 event->pending_kill = 0;
3155 }
3156}
3157
Peter Zijlstrae360adb2010-10-14 14:01:34 +08003158static void perf_pending_event(struct irq_work *entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003159{
3160 struct perf_event *event = container_of(entry,
3161 struct perf_event, pending);
3162
3163 if (event->pending_disable) {
3164 event->pending_disable = 0;
3165 __perf_event_disable(event);
3166 }
3167
3168 if (event->pending_wakeup) {
3169 event->pending_wakeup = 0;
3170 perf_event_wakeup(event);
3171 }
3172}
3173
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003174/*
Zhang, Yanmin39447b32010-04-19 13:32:41 +08003175 * We assume there is only KVM supporting the callbacks.
3176 * Later on, we might change it to a list if there is
3177 * another virtualization implementation supporting the callbacks.
3178 */
3179struct perf_guest_info_callbacks *perf_guest_cbs;
3180
3181int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3182{
3183 perf_guest_cbs = cbs;
3184 return 0;
3185}
3186EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3187
3188int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3189{
3190 perf_guest_cbs = NULL;
3191 return 0;
3192}
3193EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3194
3195/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003196 * Output
3197 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003198static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003199 unsigned long offset, unsigned long head)
3200{
3201 unsigned long mask;
3202
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003203 if (!buffer->writable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003204 return true;
3205
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003206 mask = perf_data_size(buffer) - 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003207
3208 offset = (offset - tail) & mask;
3209 head = (head - tail) & mask;
3210
3211 if ((int)(head - offset) < 0)
3212 return false;
3213
3214 return true;
3215}
3216
3217static void perf_output_wakeup(struct perf_output_handle *handle)
3218{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003219 atomic_set(&handle->buffer->poll, POLL_IN);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003220
3221 if (handle->nmi) {
3222 handle->event->pending_wakeup = 1;
Peter Zijlstrae360adb2010-10-14 14:01:34 +08003223 irq_work_queue(&handle->event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003224 } else
3225 perf_event_wakeup(handle->event);
3226}
3227
3228/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003229 * We need to ensure a later event_id doesn't publish a head when a former
Peter Zijlstraef607772010-05-18 10:50:41 +02003230 * event isn't done writing. However since we need to deal with NMIs we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003231 * cannot fully serialize things.
3232 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003233 * We only publish the head (and generate a wakeup) when the outer-most
Peter Zijlstraef607772010-05-18 10:50:41 +02003234 * event completes.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003235 */
Peter Zijlstraef607772010-05-18 10:50:41 +02003236static void perf_output_get_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003237{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003238 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003239
Peter Zijlstraef607772010-05-18 10:50:41 +02003240 preempt_disable();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003241 local_inc(&buffer->nest);
3242 handle->wakeup = local_read(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003243}
3244
Peter Zijlstraef607772010-05-18 10:50:41 +02003245static void perf_output_put_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003246{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003247 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003248 unsigned long head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003249
3250again:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003251 head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003252
3253 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003254 * IRQ/NMI can happen here, which means we can miss a head update.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003255 */
3256
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003257 if (!local_dec_and_test(&buffer->nest))
Frederic Weisbeckeracd35a42010-05-20 21:28:34 +02003258 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003259
3260 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003261 * Publish the known good head. Rely on the full barrier implied
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003262 * by atomic_dec_and_test() order the buffer->head read and this
Peter Zijlstraef607772010-05-18 10:50:41 +02003263 * write.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003264 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003265 buffer->user_page->data_head = head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003266
Peter Zijlstraef607772010-05-18 10:50:41 +02003267 /*
3268 * Now check if we missed an update, rely on the (compiler)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003269 * barrier in atomic_dec_and_test() to re-read buffer->head.
Peter Zijlstraef607772010-05-18 10:50:41 +02003270 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003271 if (unlikely(head != local_read(&buffer->head))) {
3272 local_inc(&buffer->nest);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003273 goto again;
3274 }
3275
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003276 if (handle->wakeup != local_read(&buffer->wakeup))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003277 perf_output_wakeup(handle);
Peter Zijlstraef607772010-05-18 10:50:41 +02003278
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003279out:
Peter Zijlstraef607772010-05-18 10:50:41 +02003280 preempt_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003281}
3282
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003283__always_inline void perf_output_copy(struct perf_output_handle *handle,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003284 const void *buf, unsigned int len)
3285{
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003286 do {
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003287 unsigned long size = min_t(unsigned long, handle->size, len);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003288
3289 memcpy(handle->addr, buf, size);
3290
3291 len -= size;
3292 handle->addr += size;
Frederic Weisbecker74048f82010-05-27 21:34:58 +02003293 buf += size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003294 handle->size -= size;
3295 if (!handle->size) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003296 struct perf_buffer *buffer = handle->buffer;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003297
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003298 handle->page++;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003299 handle->page &= buffer->nr_pages - 1;
3300 handle->addr = buffer->data_pages[handle->page];
3301 handle->size = PAGE_SIZE << page_order(buffer);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003302 }
3303 } while (len);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003304}
3305
3306int perf_output_begin(struct perf_output_handle *handle,
3307 struct perf_event *event, unsigned int size,
3308 int nmi, int sample)
3309{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003310 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003311 unsigned long tail, offset, head;
3312 int have_lost;
3313 struct {
3314 struct perf_event_header header;
3315 u64 id;
3316 u64 lost;
3317 } lost_event;
3318
3319 rcu_read_lock();
3320 /*
3321 * For inherited events we send all the output towards the parent.
3322 */
3323 if (event->parent)
3324 event = event->parent;
3325
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003326 buffer = rcu_dereference(event->buffer);
3327 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003328 goto out;
3329
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003330 handle->buffer = buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003331 handle->event = event;
3332 handle->nmi = nmi;
3333 handle->sample = sample;
3334
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003335 if (!buffer->nr_pages)
Stephane Eranian00d1d0b2010-05-17 12:46:01 +02003336 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003337
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003338 have_lost = local_read(&buffer->lost);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003339 if (have_lost)
3340 size += sizeof(lost_event);
3341
Peter Zijlstraef607772010-05-18 10:50:41 +02003342 perf_output_get_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003343
3344 do {
3345 /*
3346 * Userspace could choose to issue a mb() before updating the
3347 * tail pointer. So that all reads will be completed before the
3348 * write is issued.
3349 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003350 tail = ACCESS_ONCE(buffer->user_page->data_tail);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003351 smp_rmb();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003352 offset = head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003353 head += size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003354 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003355 goto fail;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003356 } while (local_cmpxchg(&buffer->head, offset, head) != offset);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003357
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003358 if (head - local_read(&buffer->wakeup) > buffer->watermark)
3359 local_add(buffer->watermark, &buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003360
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003361 handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3362 handle->page &= buffer->nr_pages - 1;
3363 handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3364 handle->addr = buffer->data_pages[handle->page];
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003365 handle->addr += handle->size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003366 handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003367
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003368 if (have_lost) {
3369 lost_event.header.type = PERF_RECORD_LOST;
3370 lost_event.header.misc = 0;
3371 lost_event.header.size = sizeof(lost_event);
3372 lost_event.id = event->id;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003373 lost_event.lost = local_xchg(&buffer->lost, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003374
3375 perf_output_put(handle, lost_event);
3376 }
3377
3378 return 0;
3379
3380fail:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003381 local_inc(&buffer->lost);
Peter Zijlstraef607772010-05-18 10:50:41 +02003382 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003383out:
3384 rcu_read_unlock();
3385
3386 return -ENOSPC;
3387}
3388
3389void perf_output_end(struct perf_output_handle *handle)
3390{
3391 struct perf_event *event = handle->event;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003392 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003393
3394 int wakeup_events = event->attr.wakeup_events;
3395
3396 if (handle->sample && wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003397 int events = local_inc_return(&buffer->events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003398 if (events >= wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003399 local_sub(wakeup_events, &buffer->events);
3400 local_inc(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003401 }
3402 }
3403
Peter Zijlstraef607772010-05-18 10:50:41 +02003404 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003405 rcu_read_unlock();
3406}
3407
3408static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3409{
3410 /*
3411 * only top level events have the pid namespace they were created in
3412 */
3413 if (event->parent)
3414 event = event->parent;
3415
3416 return task_tgid_nr_ns(p, event->ns);
3417}
3418
3419static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3420{
3421 /*
3422 * only top level events have the pid namespace they were created in
3423 */
3424 if (event->parent)
3425 event = event->parent;
3426
3427 return task_pid_nr_ns(p, event->ns);
3428}
3429
3430static void perf_output_read_one(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003431 struct perf_event *event,
3432 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003433{
3434 u64 read_format = event->attr.read_format;
3435 u64 values[4];
3436 int n = 0;
3437
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003438 values[n++] = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003439 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003440 values[n++] = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003441 atomic64_read(&event->child_total_time_enabled);
3442 }
3443 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003444 values[n++] = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003445 atomic64_read(&event->child_total_time_running);
3446 }
3447 if (read_format & PERF_FORMAT_ID)
3448 values[n++] = primary_event_id(event);
3449
3450 perf_output_copy(handle, values, n * sizeof(u64));
3451}
3452
3453/*
3454 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3455 */
3456static void perf_output_read_group(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003457 struct perf_event *event,
3458 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003459{
3460 struct perf_event *leader = event->group_leader, *sub;
3461 u64 read_format = event->attr.read_format;
3462 u64 values[5];
3463 int n = 0;
3464
3465 values[n++] = 1 + leader->nr_siblings;
3466
3467 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
Stephane Eranianeed01522010-10-26 16:08:01 +02003468 values[n++] = enabled;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003469
3470 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
Stephane Eranianeed01522010-10-26 16:08:01 +02003471 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003472
3473 if (leader != event)
3474 leader->pmu->read(leader);
3475
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003476 values[n++] = perf_event_count(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003477 if (read_format & PERF_FORMAT_ID)
3478 values[n++] = primary_event_id(leader);
3479
3480 perf_output_copy(handle, values, n * sizeof(u64));
3481
3482 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3483 n = 0;
3484
3485 if (sub != event)
3486 sub->pmu->read(sub);
3487
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003488 values[n++] = perf_event_count(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003489 if (read_format & PERF_FORMAT_ID)
3490 values[n++] = primary_event_id(sub);
3491
3492 perf_output_copy(handle, values, n * sizeof(u64));
3493 }
3494}
3495
Stephane Eranianeed01522010-10-26 16:08:01 +02003496#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3497 PERF_FORMAT_TOTAL_TIME_RUNNING)
3498
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003499static void perf_output_read(struct perf_output_handle *handle,
3500 struct perf_event *event)
3501{
Stephane Eranianeed01522010-10-26 16:08:01 +02003502 u64 enabled = 0, running = 0, now, ctx_time;
3503 u64 read_format = event->attr.read_format;
3504
3505 /*
3506 * compute total_time_enabled, total_time_running
3507 * based on snapshot values taken when the event
3508 * was last scheduled in.
3509 *
3510 * we cannot simply called update_context_time()
3511 * because of locking issue as we are called in
3512 * NMI context
3513 */
3514 if (read_format & PERF_FORMAT_TOTAL_TIMES) {
3515 now = perf_clock();
3516 ctx_time = event->shadow_ctx_time + now;
3517 enabled = ctx_time - event->tstamp_enabled;
3518 running = ctx_time - event->tstamp_running;
3519 }
3520
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003521 if (event->attr.read_format & PERF_FORMAT_GROUP)
Stephane Eranianeed01522010-10-26 16:08:01 +02003522 perf_output_read_group(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003523 else
Stephane Eranianeed01522010-10-26 16:08:01 +02003524 perf_output_read_one(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003525}
3526
3527void perf_output_sample(struct perf_output_handle *handle,
3528 struct perf_event_header *header,
3529 struct perf_sample_data *data,
3530 struct perf_event *event)
3531{
3532 u64 sample_type = data->type;
3533
3534 perf_output_put(handle, *header);
3535
3536 if (sample_type & PERF_SAMPLE_IP)
3537 perf_output_put(handle, data->ip);
3538
3539 if (sample_type & PERF_SAMPLE_TID)
3540 perf_output_put(handle, data->tid_entry);
3541
3542 if (sample_type & PERF_SAMPLE_TIME)
3543 perf_output_put(handle, data->time);
3544
3545 if (sample_type & PERF_SAMPLE_ADDR)
3546 perf_output_put(handle, data->addr);
3547
3548 if (sample_type & PERF_SAMPLE_ID)
3549 perf_output_put(handle, data->id);
3550
3551 if (sample_type & PERF_SAMPLE_STREAM_ID)
3552 perf_output_put(handle, data->stream_id);
3553
3554 if (sample_type & PERF_SAMPLE_CPU)
3555 perf_output_put(handle, data->cpu_entry);
3556
3557 if (sample_type & PERF_SAMPLE_PERIOD)
3558 perf_output_put(handle, data->period);
3559
3560 if (sample_type & PERF_SAMPLE_READ)
3561 perf_output_read(handle, event);
3562
3563 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3564 if (data->callchain) {
3565 int size = 1;
3566
3567 if (data->callchain)
3568 size += data->callchain->nr;
3569
3570 size *= sizeof(u64);
3571
3572 perf_output_copy(handle, data->callchain, size);
3573 } else {
3574 u64 nr = 0;
3575 perf_output_put(handle, nr);
3576 }
3577 }
3578
3579 if (sample_type & PERF_SAMPLE_RAW) {
3580 if (data->raw) {
3581 perf_output_put(handle, data->raw->size);
3582 perf_output_copy(handle, data->raw->data,
3583 data->raw->size);
3584 } else {
3585 struct {
3586 u32 size;
3587 u32 data;
3588 } raw = {
3589 .size = sizeof(u32),
3590 .data = 0,
3591 };
3592 perf_output_put(handle, raw);
3593 }
3594 }
3595}
3596
3597void perf_prepare_sample(struct perf_event_header *header,
3598 struct perf_sample_data *data,
3599 struct perf_event *event,
3600 struct pt_regs *regs)
3601{
3602 u64 sample_type = event->attr.sample_type;
3603
3604 data->type = sample_type;
3605
3606 header->type = PERF_RECORD_SAMPLE;
3607 header->size = sizeof(*header);
3608
3609 header->misc = 0;
3610 header->misc |= perf_misc_flags(regs);
3611
3612 if (sample_type & PERF_SAMPLE_IP) {
3613 data->ip = perf_instruction_pointer(regs);
3614
3615 header->size += sizeof(data->ip);
3616 }
3617
3618 if (sample_type & PERF_SAMPLE_TID) {
3619 /* namespace issues */
3620 data->tid_entry.pid = perf_event_pid(event, current);
3621 data->tid_entry.tid = perf_event_tid(event, current);
3622
3623 header->size += sizeof(data->tid_entry);
3624 }
3625
3626 if (sample_type & PERF_SAMPLE_TIME) {
3627 data->time = perf_clock();
3628
3629 header->size += sizeof(data->time);
3630 }
3631
3632 if (sample_type & PERF_SAMPLE_ADDR)
3633 header->size += sizeof(data->addr);
3634
3635 if (sample_type & PERF_SAMPLE_ID) {
3636 data->id = primary_event_id(event);
3637
3638 header->size += sizeof(data->id);
3639 }
3640
3641 if (sample_type & PERF_SAMPLE_STREAM_ID) {
3642 data->stream_id = event->id;
3643
3644 header->size += sizeof(data->stream_id);
3645 }
3646
3647 if (sample_type & PERF_SAMPLE_CPU) {
3648 data->cpu_entry.cpu = raw_smp_processor_id();
3649 data->cpu_entry.reserved = 0;
3650
3651 header->size += sizeof(data->cpu_entry);
3652 }
3653
3654 if (sample_type & PERF_SAMPLE_PERIOD)
3655 header->size += sizeof(data->period);
3656
3657 if (sample_type & PERF_SAMPLE_READ)
3658 header->size += perf_event_read_size(event);
3659
3660 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3661 int size = 1;
3662
3663 data->callchain = perf_callchain(regs);
3664
3665 if (data->callchain)
3666 size += data->callchain->nr;
3667
3668 header->size += size * sizeof(u64);
3669 }
3670
3671 if (sample_type & PERF_SAMPLE_RAW) {
3672 int size = sizeof(u32);
3673
3674 if (data->raw)
3675 size += data->raw->size;
3676 else
3677 size += sizeof(u32);
3678
3679 WARN_ON_ONCE(size & (sizeof(u64)-1));
3680 header->size += size;
3681 }
3682}
3683
3684static void perf_event_output(struct perf_event *event, int nmi,
3685 struct perf_sample_data *data,
3686 struct pt_regs *regs)
3687{
3688 struct perf_output_handle handle;
3689 struct perf_event_header header;
3690
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003691 /* protect the callchain buffers */
3692 rcu_read_lock();
3693
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003694 perf_prepare_sample(&header, data, event, regs);
3695
3696 if (perf_output_begin(&handle, event, header.size, nmi, 1))
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003697 goto exit;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003698
3699 perf_output_sample(&handle, &header, data, event);
3700
3701 perf_output_end(&handle);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003702
3703exit:
3704 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003705}
3706
3707/*
3708 * read event_id
3709 */
3710
3711struct perf_read_event {
3712 struct perf_event_header header;
3713
3714 u32 pid;
3715 u32 tid;
3716};
3717
3718static void
3719perf_event_read_event(struct perf_event *event,
3720 struct task_struct *task)
3721{
3722 struct perf_output_handle handle;
3723 struct perf_read_event read_event = {
3724 .header = {
3725 .type = PERF_RECORD_READ,
3726 .misc = 0,
3727 .size = sizeof(read_event) + perf_event_read_size(event),
3728 },
3729 .pid = perf_event_pid(event, task),
3730 .tid = perf_event_tid(event, task),
3731 };
3732 int ret;
3733
3734 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3735 if (ret)
3736 return;
3737
3738 perf_output_put(&handle, read_event);
3739 perf_output_read(&handle, event);
3740
3741 perf_output_end(&handle);
3742}
3743
3744/*
3745 * task tracking -- fork/exit
3746 *
Eric B Munson3af9e852010-05-18 15:30:49 +01003747 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003748 */
3749
3750struct perf_task_event {
3751 struct task_struct *task;
3752 struct perf_event_context *task_ctx;
3753
3754 struct {
3755 struct perf_event_header header;
3756
3757 u32 pid;
3758 u32 ppid;
3759 u32 tid;
3760 u32 ptid;
3761 u64 time;
3762 } event_id;
3763};
3764
3765static void perf_event_task_output(struct perf_event *event,
3766 struct perf_task_event *task_event)
3767{
3768 struct perf_output_handle handle;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003769 struct task_struct *task = task_event->task;
Mike Galbraith8bb39f92010-03-26 11:11:33 +01003770 int size, ret;
3771
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003772 size = task_event->event_id.header.size;
3773 ret = perf_output_begin(&handle, event, size, 0, 0);
3774
Peter Zijlstraef607772010-05-18 10:50:41 +02003775 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003776 return;
3777
3778 task_event->event_id.pid = perf_event_pid(event, task);
3779 task_event->event_id.ppid = perf_event_pid(event, current);
3780
3781 task_event->event_id.tid = perf_event_tid(event, task);
3782 task_event->event_id.ptid = perf_event_tid(event, current);
3783
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003784 perf_output_put(&handle, task_event->event_id);
3785
3786 perf_output_end(&handle);
3787}
3788
3789static int perf_event_task_match(struct perf_event *event)
3790{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003791 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003792 return 0;
3793
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003794 if (event->cpu != -1 && event->cpu != smp_processor_id())
3795 return 0;
3796
Eric B Munson3af9e852010-05-18 15:30:49 +01003797 if (event->attr.comm || event->attr.mmap ||
3798 event->attr.mmap_data || event->attr.task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003799 return 1;
3800
3801 return 0;
3802}
3803
3804static void perf_event_task_ctx(struct perf_event_context *ctx,
3805 struct perf_task_event *task_event)
3806{
3807 struct perf_event *event;
3808
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003809 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3810 if (perf_event_task_match(event))
3811 perf_event_task_output(event, task_event);
3812 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003813}
3814
3815static void perf_event_task_event(struct perf_task_event *task_event)
3816{
3817 struct perf_cpu_context *cpuctx;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003818 struct perf_event_context *ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003819 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003820 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003821
Peter Zijlstrad6ff86c2009-11-20 22:19:46 +01003822 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003823 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02003824 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003825 perf_event_task_ctx(&cpuctx->ctx, task_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003826
3827 ctx = task_event->task_ctx;
3828 if (!ctx) {
3829 ctxn = pmu->task_ctx_nr;
3830 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02003831 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003832 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3833 }
3834 if (ctx)
3835 perf_event_task_ctx(ctx, task_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02003836next:
3837 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003838 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003839 rcu_read_unlock();
3840}
3841
3842static void perf_event_task(struct task_struct *task,
3843 struct perf_event_context *task_ctx,
3844 int new)
3845{
3846 struct perf_task_event task_event;
3847
3848 if (!atomic_read(&nr_comm_events) &&
3849 !atomic_read(&nr_mmap_events) &&
3850 !atomic_read(&nr_task_events))
3851 return;
3852
3853 task_event = (struct perf_task_event){
3854 .task = task,
3855 .task_ctx = task_ctx,
3856 .event_id = {
3857 .header = {
3858 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3859 .misc = 0,
3860 .size = sizeof(task_event.event_id),
3861 },
3862 /* .pid */
3863 /* .ppid */
3864 /* .tid */
3865 /* .ptid */
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003866 .time = perf_clock(),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003867 },
3868 };
3869
3870 perf_event_task_event(&task_event);
3871}
3872
3873void perf_event_fork(struct task_struct *task)
3874{
3875 perf_event_task(task, NULL, 1);
3876}
3877
3878/*
3879 * comm tracking
3880 */
3881
3882struct perf_comm_event {
3883 struct task_struct *task;
3884 char *comm;
3885 int comm_size;
3886
3887 struct {
3888 struct perf_event_header header;
3889
3890 u32 pid;
3891 u32 tid;
3892 } event_id;
3893};
3894
3895static void perf_event_comm_output(struct perf_event *event,
3896 struct perf_comm_event *comm_event)
3897{
3898 struct perf_output_handle handle;
3899 int size = comm_event->event_id.header.size;
3900 int ret = perf_output_begin(&handle, event, size, 0, 0);
3901
3902 if (ret)
3903 return;
3904
3905 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3906 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3907
3908 perf_output_put(&handle, comm_event->event_id);
3909 perf_output_copy(&handle, comm_event->comm,
3910 comm_event->comm_size);
3911 perf_output_end(&handle);
3912}
3913
3914static int perf_event_comm_match(struct perf_event *event)
3915{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003916 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003917 return 0;
3918
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003919 if (event->cpu != -1 && event->cpu != smp_processor_id())
3920 return 0;
3921
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003922 if (event->attr.comm)
3923 return 1;
3924
3925 return 0;
3926}
3927
3928static void perf_event_comm_ctx(struct perf_event_context *ctx,
3929 struct perf_comm_event *comm_event)
3930{
3931 struct perf_event *event;
3932
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003933 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3934 if (perf_event_comm_match(event))
3935 perf_event_comm_output(event, comm_event);
3936 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003937}
3938
3939static void perf_event_comm_event(struct perf_comm_event *comm_event)
3940{
3941 struct perf_cpu_context *cpuctx;
3942 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003943 char comm[TASK_COMM_LEN];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003944 unsigned int size;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003945 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003946 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003947
3948 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01003949 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003950 size = ALIGN(strlen(comm)+1, sizeof(u64));
3951
3952 comm_event->comm = comm;
3953 comm_event->comm_size = size;
3954
3955 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3956
Peter Zijlstraf6595f32009-11-20 22:19:47 +01003957 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003958 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02003959 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003960 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003961
3962 ctxn = pmu->task_ctx_nr;
3963 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02003964 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003965
3966 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3967 if (ctx)
3968 perf_event_comm_ctx(ctx, comm_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02003969next:
3970 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003971 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003972 rcu_read_unlock();
3973}
3974
3975void perf_event_comm(struct task_struct *task)
3976{
3977 struct perf_comm_event comm_event;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003978 struct perf_event_context *ctx;
3979 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003980
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003981 for_each_task_context_nr(ctxn) {
3982 ctx = task->perf_event_ctxp[ctxn];
3983 if (!ctx)
3984 continue;
3985
3986 perf_event_enable_on_exec(ctx);
3987 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003988
3989 if (!atomic_read(&nr_comm_events))
3990 return;
3991
3992 comm_event = (struct perf_comm_event){
3993 .task = task,
3994 /* .comm */
3995 /* .comm_size */
3996 .event_id = {
3997 .header = {
3998 .type = PERF_RECORD_COMM,
3999 .misc = 0,
4000 /* .size */
4001 },
4002 /* .pid */
4003 /* .tid */
4004 },
4005 };
4006
4007 perf_event_comm_event(&comm_event);
4008}
4009
4010/*
4011 * mmap tracking
4012 */
4013
4014struct perf_mmap_event {
4015 struct vm_area_struct *vma;
4016
4017 const char *file_name;
4018 int file_size;
4019
4020 struct {
4021 struct perf_event_header header;
4022
4023 u32 pid;
4024 u32 tid;
4025 u64 start;
4026 u64 len;
4027 u64 pgoff;
4028 } event_id;
4029};
4030
4031static void perf_event_mmap_output(struct perf_event *event,
4032 struct perf_mmap_event *mmap_event)
4033{
4034 struct perf_output_handle handle;
4035 int size = mmap_event->event_id.header.size;
4036 int ret = perf_output_begin(&handle, event, size, 0, 0);
4037
4038 if (ret)
4039 return;
4040
4041 mmap_event->event_id.pid = perf_event_pid(event, current);
4042 mmap_event->event_id.tid = perf_event_tid(event, current);
4043
4044 perf_output_put(&handle, mmap_event->event_id);
4045 perf_output_copy(&handle, mmap_event->file_name,
4046 mmap_event->file_size);
4047 perf_output_end(&handle);
4048}
4049
4050static int perf_event_mmap_match(struct perf_event *event,
Eric B Munson3af9e852010-05-18 15:30:49 +01004051 struct perf_mmap_event *mmap_event,
4052 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004053{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004054 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004055 return 0;
4056
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004057 if (event->cpu != -1 && event->cpu != smp_processor_id())
4058 return 0;
4059
Eric B Munson3af9e852010-05-18 15:30:49 +01004060 if ((!executable && event->attr.mmap_data) ||
4061 (executable && event->attr.mmap))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004062 return 1;
4063
4064 return 0;
4065}
4066
4067static void perf_event_mmap_ctx(struct perf_event_context *ctx,
Eric B Munson3af9e852010-05-18 15:30:49 +01004068 struct perf_mmap_event *mmap_event,
4069 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004070{
4071 struct perf_event *event;
4072
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004073 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Eric B Munson3af9e852010-05-18 15:30:49 +01004074 if (perf_event_mmap_match(event, mmap_event, executable))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004075 perf_event_mmap_output(event, mmap_event);
4076 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004077}
4078
4079static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4080{
4081 struct perf_cpu_context *cpuctx;
4082 struct perf_event_context *ctx;
4083 struct vm_area_struct *vma = mmap_event->vma;
4084 struct file *file = vma->vm_file;
4085 unsigned int size;
4086 char tmp[16];
4087 char *buf = NULL;
4088 const char *name;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004089 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004090 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004091
4092 memset(tmp, 0, sizeof(tmp));
4093
4094 if (file) {
4095 /*
4096 * d_path works from the end of the buffer backwards, so we
4097 * need to add enough zero bytes after the string to handle
4098 * the 64bit alignment we do later.
4099 */
4100 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4101 if (!buf) {
4102 name = strncpy(tmp, "//enomem", sizeof(tmp));
4103 goto got_name;
4104 }
4105 name = d_path(&file->f_path, buf, PATH_MAX);
4106 if (IS_ERR(name)) {
4107 name = strncpy(tmp, "//toolong", sizeof(tmp));
4108 goto got_name;
4109 }
4110 } else {
4111 if (arch_vma_name(mmap_event->vma)) {
4112 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4113 sizeof(tmp));
4114 goto got_name;
4115 }
4116
4117 if (!vma->vm_mm) {
4118 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4119 goto got_name;
Eric B Munson3af9e852010-05-18 15:30:49 +01004120 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4121 vma->vm_end >= vma->vm_mm->brk) {
4122 name = strncpy(tmp, "[heap]", sizeof(tmp));
4123 goto got_name;
4124 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4125 vma->vm_end >= vma->vm_mm->start_stack) {
4126 name = strncpy(tmp, "[stack]", sizeof(tmp));
4127 goto got_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004128 }
4129
4130 name = strncpy(tmp, "//anon", sizeof(tmp));
4131 goto got_name;
4132 }
4133
4134got_name:
4135 size = ALIGN(strlen(name)+1, sizeof(u64));
4136
4137 mmap_event->file_name = name;
4138 mmap_event->file_size = size;
4139
4140 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4141
Peter Zijlstraf6d9dd22009-11-20 22:19:48 +01004142 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004143 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004144 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004145 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4146 vma->vm_flags & VM_EXEC);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004147
4148 ctxn = pmu->task_ctx_nr;
4149 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004150 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004151
4152 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4153 if (ctx) {
4154 perf_event_mmap_ctx(ctx, mmap_event,
4155 vma->vm_flags & VM_EXEC);
4156 }
Peter Zijlstra41945f62010-09-16 19:17:24 +02004157next:
4158 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004159 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004160 rcu_read_unlock();
4161
4162 kfree(buf);
4163}
4164
Eric B Munson3af9e852010-05-18 15:30:49 +01004165void perf_event_mmap(struct vm_area_struct *vma)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004166{
4167 struct perf_mmap_event mmap_event;
4168
4169 if (!atomic_read(&nr_mmap_events))
4170 return;
4171
4172 mmap_event = (struct perf_mmap_event){
4173 .vma = vma,
4174 /* .file_name */
4175 /* .file_size */
4176 .event_id = {
4177 .header = {
4178 .type = PERF_RECORD_MMAP,
Zhang, Yanmin39447b32010-04-19 13:32:41 +08004179 .misc = PERF_RECORD_MISC_USER,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004180 /* .size */
4181 },
4182 /* .pid */
4183 /* .tid */
4184 .start = vma->vm_start,
4185 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01004186 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004187 },
4188 };
4189
4190 perf_event_mmap_event(&mmap_event);
4191}
4192
4193/*
4194 * IRQ throttle logging
4195 */
4196
4197static void perf_log_throttle(struct perf_event *event, int enable)
4198{
4199 struct perf_output_handle handle;
4200 int ret;
4201
4202 struct {
4203 struct perf_event_header header;
4204 u64 time;
4205 u64 id;
4206 u64 stream_id;
4207 } throttle_event = {
4208 .header = {
4209 .type = PERF_RECORD_THROTTLE,
4210 .misc = 0,
4211 .size = sizeof(throttle_event),
4212 },
4213 .time = perf_clock(),
4214 .id = primary_event_id(event),
4215 .stream_id = event->id,
4216 };
4217
4218 if (enable)
4219 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4220
4221 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
4222 if (ret)
4223 return;
4224
4225 perf_output_put(&handle, throttle_event);
4226 perf_output_end(&handle);
4227}
4228
4229/*
4230 * Generic event overflow handling, sampling.
4231 */
4232
4233static int __perf_event_overflow(struct perf_event *event, int nmi,
4234 int throttle, struct perf_sample_data *data,
4235 struct pt_regs *regs)
4236{
4237 int events = atomic_read(&event->event_limit);
4238 struct hw_perf_event *hwc = &event->hw;
4239 int ret = 0;
4240
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004241 if (!throttle) {
4242 hwc->interrupts++;
4243 } else {
4244 if (hwc->interrupts != MAX_INTERRUPTS) {
4245 hwc->interrupts++;
4246 if (HZ * hwc->interrupts >
4247 (u64)sysctl_perf_event_sample_rate) {
4248 hwc->interrupts = MAX_INTERRUPTS;
4249 perf_log_throttle(event, 0);
4250 ret = 1;
4251 }
4252 } else {
4253 /*
4254 * Keep re-disabling events even though on the previous
4255 * pass we disabled it - just in case we raced with a
4256 * sched-in and the event got enabled again:
4257 */
4258 ret = 1;
4259 }
4260 }
4261
4262 if (event->attr.freq) {
4263 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01004264 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004265
Peter Zijlstraabd50712010-01-26 18:50:16 +01004266 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004267
Peter Zijlstraabd50712010-01-26 18:50:16 +01004268 if (delta > 0 && delta < 2*TICK_NSEC)
4269 perf_adjust_period(event, delta, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004270 }
4271
4272 /*
4273 * XXX event_limit might not quite work as expected on inherited
4274 * events
4275 */
4276
4277 event->pending_kill = POLL_IN;
4278 if (events && atomic_dec_and_test(&event->event_limit)) {
4279 ret = 1;
4280 event->pending_kill = POLL_HUP;
4281 if (nmi) {
4282 event->pending_disable = 1;
Peter Zijlstrae360adb2010-10-14 14:01:34 +08004283 irq_work_queue(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004284 } else
4285 perf_event_disable(event);
4286 }
4287
Peter Zijlstra453f19e2009-11-20 22:19:43 +01004288 if (event->overflow_handler)
4289 event->overflow_handler(event, nmi, data, regs);
4290 else
4291 perf_event_output(event, nmi, data, regs);
4292
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004293 return ret;
4294}
4295
4296int perf_event_overflow(struct perf_event *event, int nmi,
4297 struct perf_sample_data *data,
4298 struct pt_regs *regs)
4299{
4300 return __perf_event_overflow(event, nmi, 1, data, regs);
4301}
4302
4303/*
4304 * Generic software event infrastructure
4305 */
4306
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004307struct swevent_htable {
4308 struct swevent_hlist *swevent_hlist;
4309 struct mutex hlist_mutex;
4310 int hlist_refcount;
4311
4312 /* Recursion avoidance in each contexts */
4313 int recursion[PERF_NR_CONTEXTS];
4314};
4315
4316static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4317
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004318/*
4319 * We directly increment event->count and keep a second value in
4320 * event->hw.period_left to count intervals. This period event
4321 * is kept in the range [-sample_period, 0] so that we can use the
4322 * sign as trigger.
4323 */
4324
4325static u64 perf_swevent_set_period(struct perf_event *event)
4326{
4327 struct hw_perf_event *hwc = &event->hw;
4328 u64 period = hwc->last_period;
4329 u64 nr, offset;
4330 s64 old, val;
4331
4332 hwc->last_period = hwc->sample_period;
4333
4334again:
Peter Zijlstrae7850592010-05-21 14:43:08 +02004335 old = val = local64_read(&hwc->period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004336 if (val < 0)
4337 return 0;
4338
4339 nr = div64_u64(period + val, period);
4340 offset = nr * period;
4341 val -= offset;
Peter Zijlstrae7850592010-05-21 14:43:08 +02004342 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004343 goto again;
4344
4345 return nr;
4346}
4347
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004348static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004349 int nmi, struct perf_sample_data *data,
4350 struct pt_regs *regs)
4351{
4352 struct hw_perf_event *hwc = &event->hw;
4353 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004354
4355 data->period = event->hw.last_period;
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004356 if (!overflow)
4357 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004358
4359 if (hwc->interrupts == MAX_INTERRUPTS)
4360 return;
4361
4362 for (; overflow; overflow--) {
4363 if (__perf_event_overflow(event, nmi, throttle,
4364 data, regs)) {
4365 /*
4366 * We inhibit the overflow from happening when
4367 * hwc->interrupts == MAX_INTERRUPTS.
4368 */
4369 break;
4370 }
4371 throttle = 1;
4372 }
4373}
4374
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004375static void perf_swevent_event(struct perf_event *event, u64 nr,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004376 int nmi, struct perf_sample_data *data,
4377 struct pt_regs *regs)
4378{
4379 struct hw_perf_event *hwc = &event->hw;
4380
Peter Zijlstrae7850592010-05-21 14:43:08 +02004381 local64_add(nr, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004382
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004383 if (!regs)
4384 return;
4385
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004386 if (!hwc->sample_period)
4387 return;
4388
4389 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4390 return perf_swevent_overflow(event, 1, nmi, data, regs);
4391
Peter Zijlstrae7850592010-05-21 14:43:08 +02004392 if (local64_add_negative(nr, &hwc->period_left))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004393 return;
4394
4395 perf_swevent_overflow(event, 0, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004396}
4397
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004398static int perf_exclude_event(struct perf_event *event,
4399 struct pt_regs *regs)
4400{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004401 if (event->hw.state & PERF_HES_STOPPED)
4402 return 0;
4403
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004404 if (regs) {
4405 if (event->attr.exclude_user && user_mode(regs))
4406 return 1;
4407
4408 if (event->attr.exclude_kernel && !user_mode(regs))
4409 return 1;
4410 }
4411
4412 return 0;
4413}
4414
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004415static int perf_swevent_match(struct perf_event *event,
4416 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08004417 u32 event_id,
4418 struct perf_sample_data *data,
4419 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004420{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004421 if (event->attr.type != type)
4422 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004423
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004424 if (event->attr.config != event_id)
4425 return 0;
4426
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004427 if (perf_exclude_event(event, regs))
4428 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004429
4430 return 1;
4431}
4432
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004433static inline u64 swevent_hash(u64 type, u32 event_id)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004434{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004435 u64 val = event_id | (type << 32);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004436
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004437 return hash_64(val, SWEVENT_HLIST_BITS);
4438}
4439
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004440static inline struct hlist_head *
4441__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004442{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004443 u64 hash = swevent_hash(type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004444
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004445 return &hlist->heads[hash];
4446}
4447
4448/* For the read side: events when they trigger */
4449static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004450find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004451{
4452 struct swevent_hlist *hlist;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004453
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004454 hlist = rcu_dereference(swhash->swevent_hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004455 if (!hlist)
4456 return NULL;
4457
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004458 return __find_swevent_head(hlist, type, event_id);
4459}
4460
4461/* For the event head insertion and removal in the hlist */
4462static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004463find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004464{
4465 struct swevent_hlist *hlist;
4466 u32 event_id = event->attr.config;
4467 u64 type = event->attr.type;
4468
4469 /*
4470 * Event scheduling is always serialized against hlist allocation
4471 * and release. Which makes the protected version suitable here.
4472 * The context lock guarantees that.
4473 */
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004474 hlist = rcu_dereference_protected(swhash->swevent_hlist,
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004475 lockdep_is_held(&event->ctx->lock));
4476 if (!hlist)
4477 return NULL;
4478
4479 return __find_swevent_head(hlist, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004480}
4481
4482static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4483 u64 nr, int nmi,
4484 struct perf_sample_data *data,
4485 struct pt_regs *regs)
4486{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004487 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004488 struct perf_event *event;
4489 struct hlist_node *node;
4490 struct hlist_head *head;
4491
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004492 rcu_read_lock();
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004493 head = find_swevent_head_rcu(swhash, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004494 if (!head)
4495 goto end;
4496
4497 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08004498 if (perf_swevent_match(event, type, event_id, data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004499 perf_swevent_event(event, nr, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004500 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004501end:
4502 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004503}
4504
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004505int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004506{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004507 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004508
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004509 return get_recursion_context(swhash->recursion);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004510}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004511EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004512
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004513void inline perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004514{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004515 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02004516
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004517 put_recursion_context(swhash->recursion, rctx);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004518}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004519
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004520void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4521 struct pt_regs *regs, u64 addr)
4522{
Ingo Molnara4234bf2009-11-23 10:57:59 +01004523 struct perf_sample_data data;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004524 int rctx;
4525
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004526 preempt_disable_notrace();
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004527 rctx = perf_swevent_get_recursion_context();
4528 if (rctx < 0)
4529 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004530
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004531 perf_sample_data_init(&data, addr);
Ingo Molnara4234bf2009-11-23 10:57:59 +01004532
4533 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004534
4535 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004536 preempt_enable_notrace();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004537}
4538
4539static void perf_swevent_read(struct perf_event *event)
4540{
4541}
4542
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004543static int perf_swevent_add(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004544{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004545 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004546 struct hw_perf_event *hwc = &event->hw;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004547 struct hlist_head *head;
4548
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004549 if (hwc->sample_period) {
4550 hwc->last_period = hwc->sample_period;
4551 perf_swevent_set_period(event);
4552 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004553
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004554 hwc->state = !(flags & PERF_EF_START);
4555
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004556 head = find_swevent_head(swhash, event);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004557 if (WARN_ON_ONCE(!head))
4558 return -EINVAL;
4559
4560 hlist_add_head_rcu(&event->hlist_entry, head);
4561
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004562 return 0;
4563}
4564
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004565static void perf_swevent_del(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004566{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004567 hlist_del_rcu(&event->hlist_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004568}
4569
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004570static void perf_swevent_start(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004571{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004572 event->hw.state = 0;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004573}
4574
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004575static void perf_swevent_stop(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004576{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004577 event->hw.state = PERF_HES_STOPPED;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004578}
4579
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004580/* Deref the hlist from the update side */
4581static inline struct swevent_hlist *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004582swevent_hlist_deref(struct swevent_htable *swhash)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004583{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004584 return rcu_dereference_protected(swhash->swevent_hlist,
4585 lockdep_is_held(&swhash->hlist_mutex));
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004586}
4587
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004588static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4589{
4590 struct swevent_hlist *hlist;
4591
4592 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4593 kfree(hlist);
4594}
4595
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004596static void swevent_hlist_release(struct swevent_htable *swhash)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004597{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004598 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004599
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004600 if (!hlist)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004601 return;
4602
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004603 rcu_assign_pointer(swhash->swevent_hlist, NULL);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004604 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4605}
4606
4607static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4608{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004609 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004610
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004611 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004612
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004613 if (!--swhash->hlist_refcount)
4614 swevent_hlist_release(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004615
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004616 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004617}
4618
4619static void swevent_hlist_put(struct perf_event *event)
4620{
4621 int cpu;
4622
4623 if (event->cpu != -1) {
4624 swevent_hlist_put_cpu(event, event->cpu);
4625 return;
4626 }
4627
4628 for_each_possible_cpu(cpu)
4629 swevent_hlist_put_cpu(event, cpu);
4630}
4631
4632static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4633{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004634 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004635 int err = 0;
4636
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004637 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004638
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004639 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004640 struct swevent_hlist *hlist;
4641
4642 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4643 if (!hlist) {
4644 err = -ENOMEM;
4645 goto exit;
4646 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004647 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004648 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004649 swhash->hlist_refcount++;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004650exit:
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004651 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004652
4653 return err;
4654}
4655
4656static int swevent_hlist_get(struct perf_event *event)
4657{
4658 int err;
4659 int cpu, failed_cpu;
4660
4661 if (event->cpu != -1)
4662 return swevent_hlist_get_cpu(event, event->cpu);
4663
4664 get_online_cpus();
4665 for_each_possible_cpu(cpu) {
4666 err = swevent_hlist_get_cpu(event, cpu);
4667 if (err) {
4668 failed_cpu = cpu;
4669 goto fail;
4670 }
4671 }
4672 put_online_cpus();
4673
4674 return 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004675fail:
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004676 for_each_possible_cpu(cpu) {
4677 if (cpu == failed_cpu)
4678 break;
4679 swevent_hlist_put_cpu(event, cpu);
4680 }
4681
4682 put_online_cpus();
4683 return err;
4684}
4685
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004686atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004687
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004688static void sw_perf_event_destroy(struct perf_event *event)
4689{
4690 u64 event_id = event->attr.config;
4691
4692 WARN_ON(event->parent);
4693
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02004694 jump_label_dec(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004695 swevent_hlist_put(event);
4696}
4697
4698static int perf_swevent_init(struct perf_event *event)
4699{
4700 int event_id = event->attr.config;
4701
4702 if (event->attr.type != PERF_TYPE_SOFTWARE)
4703 return -ENOENT;
4704
4705 switch (event_id) {
4706 case PERF_COUNT_SW_CPU_CLOCK:
4707 case PERF_COUNT_SW_TASK_CLOCK:
4708 return -ENOENT;
4709
4710 default:
4711 break;
4712 }
4713
4714 if (event_id > PERF_COUNT_SW_MAX)
4715 return -ENOENT;
4716
4717 if (!event->parent) {
4718 int err;
4719
4720 err = swevent_hlist_get(event);
4721 if (err)
4722 return err;
4723
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02004724 jump_label_inc(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004725 event->destroy = sw_perf_event_destroy;
4726 }
4727
4728 return 0;
4729}
4730
4731static struct pmu perf_swevent = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02004732 .task_ctx_nr = perf_sw_context,
4733
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004734 .event_init = perf_swevent_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004735 .add = perf_swevent_add,
4736 .del = perf_swevent_del,
4737 .start = perf_swevent_start,
4738 .stop = perf_swevent_stop,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004739 .read = perf_swevent_read,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004740};
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004741
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004742#ifdef CONFIG_EVENT_TRACING
4743
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004744static int perf_tp_filter_match(struct perf_event *event,
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004745 struct perf_sample_data *data)
4746{
4747 void *record = data->raw->data;
4748
4749 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4750 return 1;
4751 return 0;
4752}
4753
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004754static int perf_tp_event_match(struct perf_event *event,
4755 struct perf_sample_data *data,
4756 struct pt_regs *regs)
4757{
Peter Zijlstra580d6072010-05-20 20:54:31 +02004758 /*
4759 * All tracepoints are from kernel-space.
4760 */
4761 if (event->attr.exclude_kernel)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004762 return 0;
4763
4764 if (!perf_tp_filter_match(event, data))
4765 return 0;
4766
4767 return 1;
4768}
4769
4770void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004771 struct pt_regs *regs, struct hlist_head *head, int rctx)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004772{
4773 struct perf_sample_data data;
4774 struct perf_event *event;
4775 struct hlist_node *node;
4776
4777 struct perf_raw_record raw = {
4778 .size = entry_size,
4779 .data = record,
4780 };
4781
4782 perf_sample_data_init(&data, addr);
4783 data.raw = &raw;
4784
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004785 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4786 if (perf_tp_event_match(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004787 perf_swevent_event(event, count, 1, &data, regs);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004788 }
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004789
4790 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004791}
4792EXPORT_SYMBOL_GPL(perf_tp_event);
4793
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004794static void tp_perf_event_destroy(struct perf_event *event)
4795{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004796 perf_trace_destroy(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004797}
4798
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004799static int perf_tp_event_init(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004800{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004801 int err;
4802
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004803 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4804 return -ENOENT;
4805
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004806 /*
4807 * Raw tracepoint data is a severe data leak, only allow root to
4808 * have these.
4809 */
4810 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4811 perf_paranoid_tracepoint_raw() &&
4812 !capable(CAP_SYS_ADMIN))
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004813 return -EPERM;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004814
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004815 err = perf_trace_init(event);
4816 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004817 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004818
4819 event->destroy = tp_perf_event_destroy;
4820
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004821 return 0;
4822}
4823
4824static struct pmu perf_tracepoint = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02004825 .task_ctx_nr = perf_sw_context,
4826
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004827 .event_init = perf_tp_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004828 .add = perf_trace_add,
4829 .del = perf_trace_del,
4830 .start = perf_swevent_start,
4831 .stop = perf_swevent_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004832 .read = perf_swevent_read,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004833};
4834
4835static inline void perf_tp_register(void)
4836{
4837 perf_pmu_register(&perf_tracepoint);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004838}
Li Zefan6fb29152009-10-15 11:21:42 +08004839
4840static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4841{
4842 char *filter_str;
4843 int ret;
4844
4845 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4846 return -EINVAL;
4847
4848 filter_str = strndup_user(arg, PAGE_SIZE);
4849 if (IS_ERR(filter_str))
4850 return PTR_ERR(filter_str);
4851
4852 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4853
4854 kfree(filter_str);
4855 return ret;
4856}
4857
4858static void perf_event_free_filter(struct perf_event *event)
4859{
4860 ftrace_profile_free_filter(event);
4861}
4862
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004863#else
Li Zefan6fb29152009-10-15 11:21:42 +08004864
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004865static inline void perf_tp_register(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004866{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004867}
Li Zefan6fb29152009-10-15 11:21:42 +08004868
4869static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4870{
4871 return -ENOENT;
4872}
4873
4874static void perf_event_free_filter(struct perf_event *event)
4875{
4876}
4877
Li Zefan07b139c2009-12-21 14:27:35 +08004878#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004879
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004880#ifdef CONFIG_HAVE_HW_BREAKPOINT
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004881void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004882{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004883 struct perf_sample_data sample;
4884 struct pt_regs *regs = data;
4885
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004886 perf_sample_data_init(&sample, bp->attr.bp_addr);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004887
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004888 if (!bp->hw.state && !perf_exclude_event(bp, regs))
4889 perf_swevent_event(bp, 1, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004890}
4891#endif
4892
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004893/*
4894 * hrtimer based swevent callback
4895 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004896
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004897static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004898{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004899 enum hrtimer_restart ret = HRTIMER_RESTART;
4900 struct perf_sample_data data;
4901 struct pt_regs *regs;
4902 struct perf_event *event;
4903 u64 period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004904
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004905 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4906 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004907
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004908 perf_sample_data_init(&data, 0);
4909 data.period = event->hw.last_period;
4910 regs = get_irq_regs();
4911
4912 if (regs && !perf_exclude_event(event, regs)) {
4913 if (!(event->attr.exclude_idle && current->pid == 0))
4914 if (perf_event_overflow(event, 0, &data, regs))
4915 ret = HRTIMER_NORESTART;
4916 }
4917
4918 period = max_t(u64, 10000, event->hw.sample_period);
4919 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4920
4921 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004922}
4923
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004924static void perf_swevent_start_hrtimer(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004925{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004926 struct hw_perf_event *hwc = &event->hw;
4927
4928 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4929 hwc->hrtimer.function = perf_swevent_hrtimer;
4930 if (hwc->sample_period) {
Peter Zijlstrafa407f32010-06-24 12:35:12 +02004931 s64 period = local64_read(&hwc->period_left);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004932
Peter Zijlstrafa407f32010-06-24 12:35:12 +02004933 if (period) {
4934 if (period < 0)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004935 period = 10000;
Peter Zijlstrafa407f32010-06-24 12:35:12 +02004936
4937 local64_set(&hwc->period_left, 0);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004938 } else {
4939 period = max_t(u64, 10000, hwc->sample_period);
4940 }
4941 __hrtimer_start_range_ns(&hwc->hrtimer,
4942 ns_to_ktime(period), 0,
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02004943 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004944 }
4945}
4946
4947static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4948{
4949 struct hw_perf_event *hwc = &event->hw;
4950
4951 if (hwc->sample_period) {
4952 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
Peter Zijlstrafa407f32010-06-24 12:35:12 +02004953 local64_set(&hwc->period_left, ktime_to_ns(remaining));
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004954
4955 hrtimer_cancel(&hwc->hrtimer);
4956 }
4957}
4958
4959/*
4960 * Software event: cpu wall time clock
4961 */
4962
4963static void cpu_clock_event_update(struct perf_event *event)
4964{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004965 s64 prev;
4966 u64 now;
4967
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004968 now = local_clock();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004969 prev = local64_xchg(&event->hw.prev_count, now);
4970 local64_add(now - prev, &event->count);
4971}
4972
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004973static void cpu_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004974{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004975 local64_set(&event->hw.prev_count, local_clock());
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004976 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004977}
4978
4979static void cpu_clock_event_stop(struct perf_event *event, int flags)
4980{
4981 perf_swevent_cancel_hrtimer(event);
4982 cpu_clock_event_update(event);
4983}
4984
4985static int cpu_clock_event_add(struct perf_event *event, int flags)
4986{
4987 if (flags & PERF_EF_START)
4988 cpu_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004989
4990 return 0;
4991}
4992
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004993static void cpu_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004994{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004995 cpu_clock_event_stop(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004996}
4997
4998static void cpu_clock_event_read(struct perf_event *event)
4999{
5000 cpu_clock_event_update(event);
5001}
5002
5003static int cpu_clock_event_init(struct perf_event *event)
5004{
5005 if (event->attr.type != PERF_TYPE_SOFTWARE)
5006 return -ENOENT;
5007
5008 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5009 return -ENOENT;
5010
5011 return 0;
5012}
5013
5014static struct pmu perf_cpu_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005015 .task_ctx_nr = perf_sw_context,
5016
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005017 .event_init = cpu_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005018 .add = cpu_clock_event_add,
5019 .del = cpu_clock_event_del,
5020 .start = cpu_clock_event_start,
5021 .stop = cpu_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005022 .read = cpu_clock_event_read,
5023};
5024
5025/*
5026 * Software event: task time clock
5027 */
5028
5029static void task_clock_event_update(struct perf_event *event, u64 now)
5030{
5031 u64 prev;
5032 s64 delta;
5033
5034 prev = local64_xchg(&event->hw.prev_count, now);
5035 delta = now - prev;
5036 local64_add(delta, &event->count);
5037}
5038
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005039static void task_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005040{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005041 local64_set(&event->hw.prev_count, event->ctx->time);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005042 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005043}
5044
5045static void task_clock_event_stop(struct perf_event *event, int flags)
5046{
5047 perf_swevent_cancel_hrtimer(event);
5048 task_clock_event_update(event, event->ctx->time);
5049}
5050
5051static int task_clock_event_add(struct perf_event *event, int flags)
5052{
5053 if (flags & PERF_EF_START)
5054 task_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005055
5056 return 0;
5057}
5058
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005059static void task_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005060{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005061 task_clock_event_stop(event, PERF_EF_UPDATE);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005062}
5063
5064static void task_clock_event_read(struct perf_event *event)
5065{
5066 u64 time;
5067
5068 if (!in_nmi()) {
5069 update_context_time(event->ctx);
5070 time = event->ctx->time;
5071 } else {
5072 u64 now = perf_clock();
5073 u64 delta = now - event->ctx->timestamp;
5074 time = event->ctx->time + delta;
5075 }
5076
5077 task_clock_event_update(event, time);
5078}
5079
5080static int task_clock_event_init(struct perf_event *event)
5081{
5082 if (event->attr.type != PERF_TYPE_SOFTWARE)
5083 return -ENOENT;
5084
5085 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5086 return -ENOENT;
5087
5088 return 0;
5089}
5090
5091static struct pmu perf_task_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005092 .task_ctx_nr = perf_sw_context,
5093
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005094 .event_init = task_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005095 .add = task_clock_event_add,
5096 .del = task_clock_event_del,
5097 .start = task_clock_event_start,
5098 .stop = task_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005099 .read = task_clock_event_read,
5100};
5101
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005102static void perf_pmu_nop_void(struct pmu *pmu)
5103{
5104}
5105
5106static int perf_pmu_nop_int(struct pmu *pmu)
5107{
5108 return 0;
5109}
5110
5111static void perf_pmu_start_txn(struct pmu *pmu)
5112{
5113 perf_pmu_disable(pmu);
5114}
5115
5116static int perf_pmu_commit_txn(struct pmu *pmu)
5117{
5118 perf_pmu_enable(pmu);
5119 return 0;
5120}
5121
5122static void perf_pmu_cancel_txn(struct pmu *pmu)
5123{
5124 perf_pmu_enable(pmu);
5125}
5126
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005127/*
5128 * Ensures all contexts with the same task_ctx_nr have the same
5129 * pmu_cpu_context too.
5130 */
5131static void *find_pmu_context(int ctxn)
5132{
5133 struct pmu *pmu;
5134
5135 if (ctxn < 0)
5136 return NULL;
5137
5138 list_for_each_entry(pmu, &pmus, entry) {
5139 if (pmu->task_ctx_nr == ctxn)
5140 return pmu->pmu_cpu_context;
5141 }
5142
5143 return NULL;
5144}
5145
5146static void free_pmu_context(void * __percpu cpu_context)
5147{
5148 struct pmu *pmu;
5149
5150 mutex_lock(&pmus_lock);
5151 /*
5152 * Like a real lame refcount.
5153 */
5154 list_for_each_entry(pmu, &pmus, entry) {
5155 if (pmu->pmu_cpu_context == cpu_context)
5156 goto out;
5157 }
5158
5159 free_percpu(cpu_context);
5160out:
5161 mutex_unlock(&pmus_lock);
5162}
5163
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005164int perf_pmu_register(struct pmu *pmu)
5165{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005166 int cpu, ret;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005167
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005168 mutex_lock(&pmus_lock);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005169 ret = -ENOMEM;
5170 pmu->pmu_disable_count = alloc_percpu(int);
5171 if (!pmu->pmu_disable_count)
5172 goto unlock;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005173
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005174 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5175 if (pmu->pmu_cpu_context)
5176 goto got_cpu_context;
5177
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005178 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5179 if (!pmu->pmu_cpu_context)
5180 goto free_pdc;
5181
5182 for_each_possible_cpu(cpu) {
5183 struct perf_cpu_context *cpuctx;
5184
5185 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Peter Zijlstraeb184472010-09-07 15:55:13 +02005186 __perf_event_init_context(&cpuctx->ctx);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005187 cpuctx->ctx.type = cpu_context;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005188 cpuctx->ctx.pmu = pmu;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02005189 cpuctx->jiffies_interval = 1;
5190 INIT_LIST_HEAD(&cpuctx->rotation_list);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005191 }
5192
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005193got_cpu_context:
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005194 if (!pmu->start_txn) {
5195 if (pmu->pmu_enable) {
5196 /*
5197 * If we have pmu_enable/pmu_disable calls, install
5198 * transaction stubs that use that to try and batch
5199 * hardware accesses.
5200 */
5201 pmu->start_txn = perf_pmu_start_txn;
5202 pmu->commit_txn = perf_pmu_commit_txn;
5203 pmu->cancel_txn = perf_pmu_cancel_txn;
5204 } else {
5205 pmu->start_txn = perf_pmu_nop_void;
5206 pmu->commit_txn = perf_pmu_nop_int;
5207 pmu->cancel_txn = perf_pmu_nop_void;
5208 }
5209 }
5210
5211 if (!pmu->pmu_enable) {
5212 pmu->pmu_enable = perf_pmu_nop_void;
5213 pmu->pmu_disable = perf_pmu_nop_void;
5214 }
5215
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005216 list_add_rcu(&pmu->entry, &pmus);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005217 ret = 0;
5218unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005219 mutex_unlock(&pmus_lock);
5220
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005221 return ret;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005222
5223free_pdc:
5224 free_percpu(pmu->pmu_disable_count);
5225 goto unlock;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005226}
5227
5228void perf_pmu_unregister(struct pmu *pmu)
5229{
5230 mutex_lock(&pmus_lock);
5231 list_del_rcu(&pmu->entry);
5232 mutex_unlock(&pmus_lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005233
5234 /*
Peter Zijlstracde8e882010-09-13 11:06:55 +02005235 * We dereference the pmu list under both SRCU and regular RCU, so
5236 * synchronize against both of those.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005237 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005238 synchronize_srcu(&pmus_srcu);
Peter Zijlstracde8e882010-09-13 11:06:55 +02005239 synchronize_rcu();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005240
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005241 free_percpu(pmu->pmu_disable_count);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005242 free_pmu_context(pmu->pmu_cpu_context);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005243}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005244
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005245struct pmu *perf_init_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005246{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005247 struct pmu *pmu = NULL;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005248 int idx;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005249
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005250 idx = srcu_read_lock(&pmus_srcu);
5251 list_for_each_entry_rcu(pmu, &pmus, entry) {
5252 int ret = pmu->event_init(event);
5253 if (!ret)
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005254 goto unlock;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005255
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005256 if (ret != -ENOENT) {
5257 pmu = ERR_PTR(ret);
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005258 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005259 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005260 }
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005261 pmu = ERR_PTR(-ENOENT);
5262unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005263 srcu_read_unlock(&pmus_srcu, idx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005264
5265 return pmu;
5266}
5267
5268/*
5269 * Allocate and initialize a event structure
5270 */
5271static struct perf_event *
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005272perf_event_alloc(struct perf_event_attr *attr, int cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005273 struct task_struct *task,
5274 struct perf_event *group_leader,
5275 struct perf_event *parent_event,
5276 perf_overflow_handler_t overflow_handler)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005277{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005278 struct pmu *pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005279 struct perf_event *event;
5280 struct hw_perf_event *hwc;
5281 long err;
5282
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005283 event = kzalloc(sizeof(*event), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005284 if (!event)
5285 return ERR_PTR(-ENOMEM);
5286
5287 /*
5288 * Single events are their own group leaders, with an
5289 * empty sibling list:
5290 */
5291 if (!group_leader)
5292 group_leader = event;
5293
5294 mutex_init(&event->child_mutex);
5295 INIT_LIST_HEAD(&event->child_list);
5296
5297 INIT_LIST_HEAD(&event->group_entry);
5298 INIT_LIST_HEAD(&event->event_entry);
5299 INIT_LIST_HEAD(&event->sibling_list);
5300 init_waitqueue_head(&event->waitq);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08005301 init_irq_work(&event->pending, perf_pending_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005302
5303 mutex_init(&event->mmap_mutex);
5304
5305 event->cpu = cpu;
5306 event->attr = *attr;
5307 event->group_leader = group_leader;
5308 event->pmu = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005309 event->oncpu = -1;
5310
5311 event->parent = parent_event;
5312
5313 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5314 event->id = atomic64_inc_return(&perf_event_id);
5315
5316 event->state = PERF_EVENT_STATE_INACTIVE;
5317
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005318 if (task) {
5319 event->attach_state = PERF_ATTACH_TASK;
5320#ifdef CONFIG_HAVE_HW_BREAKPOINT
5321 /*
5322 * hw_breakpoint is a bit difficult here..
5323 */
5324 if (attr->type == PERF_TYPE_BREAKPOINT)
5325 event->hw.bp_target = task;
5326#endif
5327 }
5328
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005329 if (!overflow_handler && parent_event)
5330 overflow_handler = parent_event->overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005331
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005332 event->overflow_handler = overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005333
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005334 if (attr->disabled)
5335 event->state = PERF_EVENT_STATE_OFF;
5336
5337 pmu = NULL;
5338
5339 hwc = &event->hw;
5340 hwc->sample_period = attr->sample_period;
5341 if (attr->freq && attr->sample_freq)
5342 hwc->sample_period = 1;
5343 hwc->last_period = hwc->sample_period;
5344
Peter Zijlstrae7850592010-05-21 14:43:08 +02005345 local64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005346
5347 /*
5348 * we currently do not support PERF_FORMAT_GROUP on inherited events
5349 */
5350 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5351 goto done;
5352
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005353 pmu = perf_init_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005354
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005355done:
5356 err = 0;
5357 if (!pmu)
5358 err = -EINVAL;
5359 else if (IS_ERR(pmu))
5360 err = PTR_ERR(pmu);
5361
5362 if (err) {
5363 if (event->ns)
5364 put_pid_ns(event->ns);
5365 kfree(event);
5366 return ERR_PTR(err);
5367 }
5368
5369 event->pmu = pmu;
5370
5371 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02005372 if (event->attach_state & PERF_ATTACH_TASK)
5373 jump_label_inc(&perf_task_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01005374 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005375 atomic_inc(&nr_mmap_events);
5376 if (event->attr.comm)
5377 atomic_inc(&nr_comm_events);
5378 if (event->attr.task)
5379 atomic_inc(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02005380 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5381 err = get_callchain_buffers();
5382 if (err) {
5383 free_event(event);
5384 return ERR_PTR(err);
5385 }
5386 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005387 }
5388
5389 return event;
5390}
5391
5392static int perf_copy_attr(struct perf_event_attr __user *uattr,
5393 struct perf_event_attr *attr)
5394{
5395 u32 size;
5396 int ret;
5397
5398 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5399 return -EFAULT;
5400
5401 /*
5402 * zero the full structure, so that a short copy will be nice.
5403 */
5404 memset(attr, 0, sizeof(*attr));
5405
5406 ret = get_user(size, &uattr->size);
5407 if (ret)
5408 return ret;
5409
5410 if (size > PAGE_SIZE) /* silly large */
5411 goto err_size;
5412
5413 if (!size) /* abi compat */
5414 size = PERF_ATTR_SIZE_VER0;
5415
5416 if (size < PERF_ATTR_SIZE_VER0)
5417 goto err_size;
5418
5419 /*
5420 * If we're handed a bigger struct than we know of,
5421 * ensure all the unknown bits are 0 - i.e. new
5422 * user-space does not rely on any kernel feature
5423 * extensions we dont know about yet.
5424 */
5425 if (size > sizeof(*attr)) {
5426 unsigned char __user *addr;
5427 unsigned char __user *end;
5428 unsigned char val;
5429
5430 addr = (void __user *)uattr + sizeof(*attr);
5431 end = (void __user *)uattr + size;
5432
5433 for (; addr < end; addr++) {
5434 ret = get_user(val, addr);
5435 if (ret)
5436 return ret;
5437 if (val)
5438 goto err_size;
5439 }
5440 size = sizeof(*attr);
5441 }
5442
5443 ret = copy_from_user(attr, uattr, size);
5444 if (ret)
5445 return -EFAULT;
5446
5447 /*
5448 * If the type exists, the corresponding creation will verify
5449 * the attr->config.
5450 */
5451 if (attr->type >= PERF_TYPE_MAX)
5452 return -EINVAL;
5453
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05305454 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005455 return -EINVAL;
5456
5457 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5458 return -EINVAL;
5459
5460 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5461 return -EINVAL;
5462
5463out:
5464 return ret;
5465
5466err_size:
5467 put_user(sizeof(*attr), &uattr->size);
5468 ret = -E2BIG;
5469 goto out;
5470}
5471
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005472static int
5473perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005474{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005475 struct perf_buffer *buffer = NULL, *old_buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005476 int ret = -EINVAL;
5477
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005478 if (!output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005479 goto set;
5480
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005481 /* don't allow circular references */
5482 if (event == output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005483 goto out;
5484
Peter Zijlstra0f139302010-05-20 14:35:15 +02005485 /*
5486 * Don't allow cross-cpu buffers
5487 */
5488 if (output_event->cpu != event->cpu)
5489 goto out;
5490
5491 /*
5492 * If its not a per-cpu buffer, it must be the same task.
5493 */
5494 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5495 goto out;
5496
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005497set:
5498 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005499 /* Can't redirect output if we've got an active mmap() */
5500 if (atomic_read(&event->mmap_count))
5501 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005502
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005503 if (output_event) {
5504 /* get the buffer we want to redirect to */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005505 buffer = perf_buffer_get(output_event);
5506 if (!buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005507 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005508 }
5509
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005510 old_buffer = event->buffer;
5511 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005512 ret = 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005513unlock:
5514 mutex_unlock(&event->mmap_mutex);
5515
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005516 if (old_buffer)
5517 perf_buffer_put(old_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005518out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005519 return ret;
5520}
5521
5522/**
5523 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5524 *
5525 * @attr_uptr: event_id type attributes for monitoring/sampling
5526 * @pid: target pid
5527 * @cpu: target cpu
5528 * @group_fd: group leader event fd
5529 */
5530SYSCALL_DEFINE5(perf_event_open,
5531 struct perf_event_attr __user *, attr_uptr,
5532 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5533{
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005534 struct perf_event *group_leader = NULL, *output_event = NULL;
5535 struct perf_event *event, *sibling;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005536 struct perf_event_attr attr;
5537 struct perf_event_context *ctx;
5538 struct file *event_file = NULL;
5539 struct file *group_file = NULL;
Matt Helsley38a81da2010-09-13 13:01:20 -07005540 struct task_struct *task = NULL;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005541 struct pmu *pmu;
Al Viroea635c62010-05-26 17:40:29 -04005542 int event_fd;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005543 int move_group = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005544 int fput_needed = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005545 int err;
5546
5547 /* for future expandability... */
5548 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5549 return -EINVAL;
5550
5551 err = perf_copy_attr(attr_uptr, &attr);
5552 if (err)
5553 return err;
5554
5555 if (!attr.exclude_kernel) {
5556 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5557 return -EACCES;
5558 }
5559
5560 if (attr.freq) {
5561 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5562 return -EINVAL;
5563 }
5564
Al Viroea635c62010-05-26 17:40:29 -04005565 event_fd = get_unused_fd_flags(O_RDWR);
5566 if (event_fd < 0)
5567 return event_fd;
5568
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005569 if (group_fd != -1) {
5570 group_leader = perf_fget_light(group_fd, &fput_needed);
5571 if (IS_ERR(group_leader)) {
5572 err = PTR_ERR(group_leader);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005573 goto err_fd;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005574 }
5575 group_file = group_leader->filp;
5576 if (flags & PERF_FLAG_FD_OUTPUT)
5577 output_event = group_leader;
5578 if (flags & PERF_FLAG_FD_NO_GROUP)
5579 group_leader = NULL;
5580 }
5581
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005582 if (pid != -1) {
5583 task = find_lively_task_by_vpid(pid);
5584 if (IS_ERR(task)) {
5585 err = PTR_ERR(task);
5586 goto err_group_fd;
5587 }
5588 }
5589
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005590 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005591 if (IS_ERR(event)) {
5592 err = PTR_ERR(event);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005593 goto err_task;
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005594 }
5595
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005596 /*
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005597 * Special case software events and allow them to be part of
5598 * any hardware group.
5599 */
5600 pmu = event->pmu;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005601
5602 if (group_leader &&
5603 (is_software_event(event) != is_software_event(group_leader))) {
5604 if (is_software_event(event)) {
5605 /*
5606 * If event and group_leader are not both a software
5607 * event, and event is, then group leader is not.
5608 *
5609 * Allow the addition of software events to !software
5610 * groups, this is safe because software events never
5611 * fail to schedule.
5612 */
5613 pmu = group_leader->pmu;
5614 } else if (is_software_event(group_leader) &&
5615 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5616 /*
5617 * In case the group is a pure software group, and we
5618 * try to add a hardware event, move the whole group to
5619 * the hardware context.
5620 */
5621 move_group = 1;
5622 }
5623 }
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005624
5625 /*
5626 * Get the target context (task or percpu):
5627 */
Matt Helsley38a81da2010-09-13 13:01:20 -07005628 ctx = find_get_context(pmu, task, cpu);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005629 if (IS_ERR(ctx)) {
5630 err = PTR_ERR(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005631 goto err_alloc;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005632 }
5633
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005634 /*
5635 * Look up the group leader (we will attach this event to it):
5636 */
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005637 if (group_leader) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005638 err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005639
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005640 /*
5641 * Do not allow a recursive hierarchy (this new sibling
5642 * becoming part of another group-sibling):
5643 */
5644 if (group_leader->group_leader != group_leader)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005645 goto err_context;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005646 /*
5647 * Do not allow to attach to a group in a different
5648 * task or CPU context:
5649 */
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005650 if (move_group) {
5651 if (group_leader->ctx->type != ctx->type)
5652 goto err_context;
5653 } else {
5654 if (group_leader->ctx != ctx)
5655 goto err_context;
5656 }
5657
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005658 /*
5659 * Only a group leader can be exclusive or pinned
5660 */
5661 if (attr.exclusive || attr.pinned)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005662 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005663 }
5664
5665 if (output_event) {
5666 err = perf_event_set_output(event, output_event);
5667 if (err)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005668 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005669 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005670
Al Viroea635c62010-05-26 17:40:29 -04005671 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5672 if (IS_ERR(event_file)) {
5673 err = PTR_ERR(event_file);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005674 goto err_context;
Al Viroea635c62010-05-26 17:40:29 -04005675 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005676
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005677 if (move_group) {
5678 struct perf_event_context *gctx = group_leader->ctx;
5679
5680 mutex_lock(&gctx->mutex);
5681 perf_event_remove_from_context(group_leader);
5682 list_for_each_entry(sibling, &group_leader->sibling_list,
5683 group_entry) {
5684 perf_event_remove_from_context(sibling);
5685 put_ctx(gctx);
5686 }
5687 mutex_unlock(&gctx->mutex);
5688 put_ctx(gctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005689 }
5690
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005691 event->filp = event_file;
5692 WARN_ON_ONCE(ctx->parent_ctx);
5693 mutex_lock(&ctx->mutex);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005694
5695 if (move_group) {
5696 perf_install_in_context(ctx, group_leader, cpu);
5697 get_ctx(ctx);
5698 list_for_each_entry(sibling, &group_leader->sibling_list,
5699 group_entry) {
5700 perf_install_in_context(ctx, sibling, cpu);
5701 get_ctx(ctx);
5702 }
5703 }
5704
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005705 perf_install_in_context(ctx, event, cpu);
5706 ++ctx->generation;
5707 mutex_unlock(&ctx->mutex);
5708
5709 event->owner = current;
Peter Zijlstra88821352010-11-09 19:01:43 +01005710
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005711 mutex_lock(&current->perf_event_mutex);
5712 list_add_tail(&event->owner_entry, &current->perf_event_list);
5713 mutex_unlock(&current->perf_event_mutex);
5714
Peter Zijlstra8a495422010-05-27 15:47:49 +02005715 /*
5716 * Drop the reference on the group_event after placing the
5717 * new event on the sibling_list. This ensures destruction
5718 * of the group leader will find the pointer to itself in
5719 * perf_group_detach().
5720 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005721 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04005722 fd_install(event_fd, event_file);
5723 return event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005724
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005725err_context:
Al Viroea635c62010-05-26 17:40:29 -04005726 put_ctx(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005727err_alloc:
5728 free_event(event);
Peter Zijlstrae7d0bc02010-10-14 16:54:51 +02005729err_task:
5730 if (task)
5731 put_task_struct(task);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005732err_group_fd:
5733 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04005734err_fd:
5735 put_unused_fd(event_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005736 return err;
5737}
5738
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005739/**
5740 * perf_event_create_kernel_counter
5741 *
5742 * @attr: attributes of the counter to create
5743 * @cpu: cpu in which the counter is bound
Matt Helsley38a81da2010-09-13 13:01:20 -07005744 * @task: task to profile (NULL for percpu)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005745 */
5746struct perf_event *
5747perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Matt Helsley38a81da2010-09-13 13:01:20 -07005748 struct task_struct *task,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005749 perf_overflow_handler_t overflow_handler)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005750{
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005751 struct perf_event_context *ctx;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005752 struct perf_event *event;
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005753 int err;
5754
5755 /*
5756 * Get the target context (task or percpu):
5757 */
5758
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005759 event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005760 if (IS_ERR(event)) {
5761 err = PTR_ERR(event);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005762 goto err;
5763 }
5764
Matt Helsley38a81da2010-09-13 13:01:20 -07005765 ctx = find_get_context(event->pmu, task, cpu);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005766 if (IS_ERR(ctx)) {
5767 err = PTR_ERR(ctx);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005768 goto err_free;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005769 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005770
5771 event->filp = NULL;
5772 WARN_ON_ONCE(ctx->parent_ctx);
5773 mutex_lock(&ctx->mutex);
5774 perf_install_in_context(ctx, event, cpu);
5775 ++ctx->generation;
5776 mutex_unlock(&ctx->mutex);
5777
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005778 return event;
5779
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005780err_free:
5781 free_event(event);
5782err:
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005783 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005784}
5785EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5786
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005787static void sync_child_event(struct perf_event *child_event,
5788 struct task_struct *child)
5789{
5790 struct perf_event *parent_event = child_event->parent;
5791 u64 child_val;
5792
5793 if (child_event->attr.inherit_stat)
5794 perf_event_read_event(child_event, child);
5795
Peter Zijlstrab5e58792010-05-21 14:43:12 +02005796 child_val = perf_event_count(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005797
5798 /*
5799 * Add back the child's count to the parent's count:
5800 */
Peter Zijlstraa6e6dea2010-05-21 14:27:58 +02005801 atomic64_add(child_val, &parent_event->child_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005802 atomic64_add(child_event->total_time_enabled,
5803 &parent_event->child_total_time_enabled);
5804 atomic64_add(child_event->total_time_running,
5805 &parent_event->child_total_time_running);
5806
5807 /*
5808 * Remove this event from the parent's list
5809 */
5810 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5811 mutex_lock(&parent_event->child_mutex);
5812 list_del_init(&child_event->child_list);
5813 mutex_unlock(&parent_event->child_mutex);
5814
5815 /*
5816 * Release the parent event, if this was the last
5817 * reference to it.
5818 */
5819 fput(parent_event->filp);
5820}
5821
5822static void
5823__perf_event_exit_task(struct perf_event *child_event,
5824 struct perf_event_context *child_ctx,
5825 struct task_struct *child)
5826{
5827 struct perf_event *parent_event;
5828
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005829 perf_event_remove_from_context(child_event);
5830
5831 parent_event = child_event->parent;
5832 /*
5833 * It can happen that parent exits first, and has events
5834 * that are still around due to the child reference. These
5835 * events need to be zapped - but otherwise linger.
5836 */
5837 if (parent_event) {
5838 sync_child_event(child_event, child);
5839 free_event(child_event);
5840 }
5841}
5842
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005843static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005844{
5845 struct perf_event *child_event, *tmp;
5846 struct perf_event_context *child_ctx;
5847 unsigned long flags;
5848
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005849 if (likely(!child->perf_event_ctxp[ctxn])) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005850 perf_event_task(child, NULL, 0);
5851 return;
5852 }
5853
5854 local_irq_save(flags);
5855 /*
5856 * We can't reschedule here because interrupts are disabled,
5857 * and either child is current or it is a task that can't be
5858 * scheduled, so we are now safe from rescheduling changing
5859 * our context.
5860 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005861 child_ctx = child->perf_event_ctxp[ctxn];
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02005862 task_ctx_sched_out(child_ctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005863
5864 /*
5865 * Take the context lock here so that if find_get_context is
5866 * reading child->perf_event_ctxp, we wait until it has
5867 * incremented the context's refcount before we do put_ctx below.
5868 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +01005869 raw_spin_lock(&child_ctx->lock);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005870 child->perf_event_ctxp[ctxn] = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005871 /*
5872 * If this context is a clone; unclone it so it can't get
5873 * swapped to another process while we're removing all
5874 * the events from it.
5875 */
5876 unclone_ctx(child_ctx);
Peter Zijlstra5e942bb2009-11-23 11:37:26 +01005877 update_context_time(child_ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01005878 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005879
5880 /*
5881 * Report the task dead after unscheduling the events so that we
5882 * won't get any samples after PERF_RECORD_EXIT. We can however still
5883 * get a few PERF_RECORD_READ events.
5884 */
5885 perf_event_task(child, child_ctx, 0);
5886
5887 /*
5888 * We can recurse on the same lock type through:
5889 *
5890 * __perf_event_exit_task()
5891 * sync_child_event()
5892 * fput(parent_event->filp)
5893 * perf_release()
5894 * mutex_lock(&ctx->mutex)
5895 *
5896 * But since its the parent context it won't be the same instance.
5897 */
Peter Zijlstraa0507c82010-05-06 15:42:53 +02005898 mutex_lock(&child_ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005899
5900again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005901 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5902 group_entry)
5903 __perf_event_exit_task(child_event, child_ctx, child);
5904
5905 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005906 group_entry)
5907 __perf_event_exit_task(child_event, child_ctx, child);
5908
5909 /*
5910 * If the last event was a group event, it will have appended all
5911 * its siblings to the list, but we obtained 'tmp' before that which
5912 * will still point to the list head terminating the iteration.
5913 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005914 if (!list_empty(&child_ctx->pinned_groups) ||
5915 !list_empty(&child_ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005916 goto again;
5917
5918 mutex_unlock(&child_ctx->mutex);
5919
5920 put_ctx(child_ctx);
5921}
5922
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005923/*
5924 * When a child task exits, feed back event values to parent events.
5925 */
5926void perf_event_exit_task(struct task_struct *child)
5927{
Peter Zijlstra88821352010-11-09 19:01:43 +01005928 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005929 int ctxn;
5930
Peter Zijlstra88821352010-11-09 19:01:43 +01005931 mutex_lock(&child->perf_event_mutex);
5932 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
5933 owner_entry) {
5934 list_del_init(&event->owner_entry);
5935
5936 /*
5937 * Ensure the list deletion is visible before we clear
5938 * the owner, closes a race against perf_release() where
5939 * we need to serialize on the owner->perf_event_mutex.
5940 */
5941 smp_wmb();
5942 event->owner = NULL;
5943 }
5944 mutex_unlock(&child->perf_event_mutex);
5945
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005946 for_each_task_context_nr(ctxn)
5947 perf_event_exit_task_context(child, ctxn);
5948}
5949
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005950static void perf_free_event(struct perf_event *event,
5951 struct perf_event_context *ctx)
5952{
5953 struct perf_event *parent = event->parent;
5954
5955 if (WARN_ON_ONCE(!parent))
5956 return;
5957
5958 mutex_lock(&parent->child_mutex);
5959 list_del_init(&event->child_list);
5960 mutex_unlock(&parent->child_mutex);
5961
5962 fput(parent->filp);
5963
Peter Zijlstra8a495422010-05-27 15:47:49 +02005964 perf_group_detach(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005965 list_del_event(event, ctx);
5966 free_event(event);
5967}
5968
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005969/*
5970 * free an unexposed, unused context as created by inheritance by
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005971 * perf_event_init_task below, used by fork() in case of fail.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005972 */
5973void perf_event_free_task(struct task_struct *task)
5974{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005975 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005976 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005977 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005978
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005979 for_each_task_context_nr(ctxn) {
5980 ctx = task->perf_event_ctxp[ctxn];
5981 if (!ctx)
5982 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005983
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005984 mutex_lock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005985again:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005986 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
5987 group_entry)
5988 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005989
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005990 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5991 group_entry)
5992 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005993
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005994 if (!list_empty(&ctx->pinned_groups) ||
5995 !list_empty(&ctx->flexible_groups))
5996 goto again;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005997
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005998 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005999
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006000 put_ctx(ctx);
6001 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006002}
6003
Peter Zijlstra4e231c72010-09-09 21:01:59 +02006004void perf_event_delayed_put(struct task_struct *task)
6005{
6006 int ctxn;
6007
6008 for_each_task_context_nr(ctxn)
6009 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6010}
6011
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006012/*
6013 * inherit a event from parent task to child task:
6014 */
6015static struct perf_event *
6016inherit_event(struct perf_event *parent_event,
6017 struct task_struct *parent,
6018 struct perf_event_context *parent_ctx,
6019 struct task_struct *child,
6020 struct perf_event *group_leader,
6021 struct perf_event_context *child_ctx)
6022{
6023 struct perf_event *child_event;
Peter Zijlstracee010e2010-09-10 12:51:54 +02006024 unsigned long flags;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006025
6026 /*
6027 * Instead of creating recursive hierarchies of events,
6028 * we link inherited events back to the original parent,
6029 * which has a filp for sure, which we use as the reference
6030 * count:
6031 */
6032 if (parent_event->parent)
6033 parent_event = parent_event->parent;
6034
6035 child_event = perf_event_alloc(&parent_event->attr,
6036 parent_event->cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02006037 child,
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006038 group_leader, parent_event,
6039 NULL);
6040 if (IS_ERR(child_event))
6041 return child_event;
6042 get_ctx(child_ctx);
6043
6044 /*
6045 * Make the child state follow the state of the parent event,
6046 * not its attr.disabled bit. We hold the parent's mutex,
6047 * so we won't race with perf_event_{en, dis}able_family.
6048 */
6049 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6050 child_event->state = PERF_EVENT_STATE_INACTIVE;
6051 else
6052 child_event->state = PERF_EVENT_STATE_OFF;
6053
6054 if (parent_event->attr.freq) {
6055 u64 sample_period = parent_event->hw.sample_period;
6056 struct hw_perf_event *hwc = &child_event->hw;
6057
6058 hwc->sample_period = sample_period;
6059 hwc->last_period = sample_period;
6060
6061 local64_set(&hwc->period_left, sample_period);
6062 }
6063
6064 child_event->ctx = child_ctx;
6065 child_event->overflow_handler = parent_event->overflow_handler;
6066
6067 /*
6068 * Link it up in the child's context:
6069 */
Peter Zijlstracee010e2010-09-10 12:51:54 +02006070 raw_spin_lock_irqsave(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006071 add_event_to_ctx(child_event, child_ctx);
Peter Zijlstracee010e2010-09-10 12:51:54 +02006072 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006073
6074 /*
6075 * Get a reference to the parent filp - we will fput it
6076 * when the child event exits. This is safe to do because
6077 * we are in the parent and we know that the filp still
6078 * exists and has a nonzero count:
6079 */
6080 atomic_long_inc(&parent_event->filp->f_count);
6081
6082 /*
6083 * Link this into the parent event's child list
6084 */
6085 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6086 mutex_lock(&parent_event->child_mutex);
6087 list_add_tail(&child_event->child_list, &parent_event->child_list);
6088 mutex_unlock(&parent_event->child_mutex);
6089
6090 return child_event;
6091}
6092
6093static int inherit_group(struct perf_event *parent_event,
6094 struct task_struct *parent,
6095 struct perf_event_context *parent_ctx,
6096 struct task_struct *child,
6097 struct perf_event_context *child_ctx)
6098{
6099 struct perf_event *leader;
6100 struct perf_event *sub;
6101 struct perf_event *child_ctr;
6102
6103 leader = inherit_event(parent_event, parent, parent_ctx,
6104 child, NULL, child_ctx);
6105 if (IS_ERR(leader))
6106 return PTR_ERR(leader);
6107 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6108 child_ctr = inherit_event(sub, parent, parent_ctx,
6109 child, leader, child_ctx);
6110 if (IS_ERR(child_ctr))
6111 return PTR_ERR(child_ctr);
6112 }
6113 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006114}
6115
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006116static int
6117inherit_task_group(struct perf_event *event, struct task_struct *parent,
6118 struct perf_event_context *parent_ctx,
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006119 struct task_struct *child, int ctxn,
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006120 int *inherited_all)
6121{
6122 int ret;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006123 struct perf_event_context *child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006124
6125 if (!event->attr.inherit) {
6126 *inherited_all = 0;
6127 return 0;
6128 }
6129
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006130 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006131 if (!child_ctx) {
6132 /*
6133 * This is executed from the parent task context, so
6134 * inherit events that have been marked for cloning.
6135 * First allocate and initialize a context for the
6136 * child.
6137 */
6138
Peter Zijlstraeb184472010-09-07 15:55:13 +02006139 child_ctx = alloc_perf_context(event->pmu, child);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006140 if (!child_ctx)
6141 return -ENOMEM;
6142
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006143 child->perf_event_ctxp[ctxn] = child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006144 }
6145
6146 ret = inherit_group(event, parent, parent_ctx,
6147 child, child_ctx);
6148
6149 if (ret)
6150 *inherited_all = 0;
6151
6152 return ret;
6153}
6154
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006155/*
6156 * Initialize the perf_event context in task_struct
6157 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006158int perf_event_init_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006159{
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006160 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006161 struct perf_event_context *cloned_ctx;
6162 struct perf_event *event;
6163 struct task_struct *parent = current;
6164 int inherited_all = 1;
6165 int ret = 0;
6166
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006167 child->perf_event_ctxp[ctxn] = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006168
6169 mutex_init(&child->perf_event_mutex);
6170 INIT_LIST_HEAD(&child->perf_event_list);
6171
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006172 if (likely(!parent->perf_event_ctxp[ctxn]))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006173 return 0;
6174
6175 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006176 * If the parent's context is a clone, pin it so it won't get
6177 * swapped under us.
6178 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006179 parent_ctx = perf_pin_task_context(parent, ctxn);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006180
6181 /*
6182 * No need to check if parent_ctx != NULL here; since we saw
6183 * it non-NULL earlier, the only reason for it to become NULL
6184 * is if we exit, and since we're currently in the middle of
6185 * a fork we can't be exiting at the same time.
6186 */
6187
6188 /*
6189 * Lock the parent list. No need to lock the child - not PID
6190 * hashed yet and not running, so nobody can access it.
6191 */
6192 mutex_lock(&parent_ctx->mutex);
6193
6194 /*
6195 * We dont have to disable NMIs - we are only looking at
6196 * the list, not manipulating it:
6197 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006198 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006199 ret = inherit_task_group(event, parent, parent_ctx,
6200 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006201 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006202 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006203 }
6204
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006205 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006206 ret = inherit_task_group(event, parent, parent_ctx,
6207 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006208 if (ret)
6209 break;
6210 }
6211
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006212 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006213
Peter Zijlstra05cbaa22009-12-30 16:00:35 +01006214 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006215 /*
6216 * Mark the child context as a clone of the parent
6217 * context, or of whatever the parent is a clone of.
6218 * Note that if the parent is a clone, it could get
6219 * uncloned at any point, but that doesn't matter
6220 * because the list of events and the generation
6221 * count can't have changed since we took the mutex.
6222 */
6223 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
6224 if (cloned_ctx) {
6225 child_ctx->parent_ctx = cloned_ctx;
6226 child_ctx->parent_gen = parent_ctx->parent_gen;
6227 } else {
6228 child_ctx->parent_ctx = parent_ctx;
6229 child_ctx->parent_gen = parent_ctx->generation;
6230 }
6231 get_ctx(child_ctx->parent_ctx);
6232 }
6233
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006234 mutex_unlock(&parent_ctx->mutex);
6235
6236 perf_unpin_context(parent_ctx);
6237
6238 return ret;
6239}
6240
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006241/*
6242 * Initialize the perf_event context in task_struct
6243 */
6244int perf_event_init_task(struct task_struct *child)
6245{
6246 int ctxn, ret;
6247
6248 for_each_task_context_nr(ctxn) {
6249 ret = perf_event_init_context(child, ctxn);
6250 if (ret)
6251 return ret;
6252 }
6253
6254 return 0;
6255}
6256
Paul Mackerras220b1402010-03-10 20:45:52 +11006257static void __init perf_event_init_all_cpus(void)
6258{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006259 struct swevent_htable *swhash;
Paul Mackerras220b1402010-03-10 20:45:52 +11006260 int cpu;
Paul Mackerras220b1402010-03-10 20:45:52 +11006261
6262 for_each_possible_cpu(cpu) {
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006263 swhash = &per_cpu(swevent_htable, cpu);
6264 mutex_init(&swhash->hlist_mutex);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006265 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
Paul Mackerras220b1402010-03-10 20:45:52 +11006266 }
6267}
6268
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006269static void __cpuinit perf_event_init_cpu(int cpu)
6270{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006271 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006272
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006273 mutex_lock(&swhash->hlist_mutex);
6274 if (swhash->hlist_refcount > 0) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006275 struct swevent_hlist *hlist;
6276
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006277 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6278 WARN_ON(!hlist);
6279 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006280 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006281 mutex_unlock(&swhash->hlist_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006282}
6283
6284#ifdef CONFIG_HOTPLUG_CPU
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006285static void perf_pmu_rotate_stop(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006286{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006287 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6288
6289 WARN_ON(!irqs_disabled());
6290
6291 list_del_init(&cpuctx->rotation_list);
6292}
6293
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006294static void __perf_event_exit_context(void *__info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006295{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006296 struct perf_event_context *ctx = __info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006297 struct perf_event *event, *tmp;
6298
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006299 perf_pmu_rotate_stop(ctx->pmu);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02006300
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006301 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6302 __perf_event_remove_from_context(event);
6303 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006304 __perf_event_remove_from_context(event);
6305}
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006306
6307static void perf_event_exit_cpu_context(int cpu)
6308{
6309 struct perf_event_context *ctx;
6310 struct pmu *pmu;
6311 int idx;
6312
6313 idx = srcu_read_lock(&pmus_srcu);
6314 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra917bdd12010-09-17 11:28:49 +02006315 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006316
6317 mutex_lock(&ctx->mutex);
6318 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6319 mutex_unlock(&ctx->mutex);
6320 }
6321 srcu_read_unlock(&pmus_srcu, idx);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006322}
6323
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006324static void perf_event_exit_cpu(int cpu)
6325{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006326 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006327
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006328 mutex_lock(&swhash->hlist_mutex);
6329 swevent_hlist_release(swhash);
6330 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006331
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006332 perf_event_exit_cpu_context(cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006333}
6334#else
6335static inline void perf_event_exit_cpu(int cpu) { }
6336#endif
6337
6338static int __cpuinit
6339perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6340{
6341 unsigned int cpu = (long)hcpu;
6342
Peter Zijlstra5e116372010-06-11 13:35:08 +02006343 switch (action & ~CPU_TASKS_FROZEN) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006344
6345 case CPU_UP_PREPARE:
Peter Zijlstra5e116372010-06-11 13:35:08 +02006346 case CPU_DOWN_FAILED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006347 perf_event_init_cpu(cpu);
6348 break;
6349
Peter Zijlstra5e116372010-06-11 13:35:08 +02006350 case CPU_UP_CANCELED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006351 case CPU_DOWN_PREPARE:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006352 perf_event_exit_cpu(cpu);
6353 break;
6354
6355 default:
6356 break;
6357 }
6358
6359 return NOTIFY_OK;
6360}
6361
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006362void __init perf_event_init(void)
6363{
Jason Wessel3c502e72010-11-04 17:33:01 -05006364 int ret;
6365
Paul Mackerras220b1402010-03-10 20:45:52 +11006366 perf_event_init_all_cpus();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02006367 init_srcu_struct(&pmus_srcu);
6368 perf_pmu_register(&perf_swevent);
6369 perf_pmu_register(&perf_cpu_clock);
6370 perf_pmu_register(&perf_task_clock);
6371 perf_tp_register();
6372 perf_cpu_notifier(perf_cpu_notify);
Jason Wessel3c502e72010-11-04 17:33:01 -05006373
6374 ret = init_hw_breakpoint();
6375 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006376}