blob: 7d3faa25e136c2d929a8173047f216326f14dfaa [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
Peter Zijlstra2e80a822010-11-17 23:17:36 +010016#include <linux/idr.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020017#include <linux/file.h>
18#include <linux/poll.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020020#include <linux/hash.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020021#include <linux/sysfs.h>
22#include <linux/dcache.h>
23#include <linux/percpu.h>
24#include <linux/ptrace.h>
Peter Zijlstrac2774432010-12-08 15:29:02 +010025#include <linux/reboot.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020026#include <linux/vmstat.h>
Peter Zijlstraabe43402010-11-17 23:17:37 +010027#include <linux/device.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020028#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020029#include <linux/hardirq.h>
30#include <linux/rculist.h>
31#include <linux/uaccess.h>
32#include <linux/syscalls.h>
33#include <linux/anon_inodes.h>
34#include <linux/kernel_stat.h>
35#include <linux/perf_event.h>
Li Zefan6fb29152009-10-15 11:21:42 +080036#include <linux/ftrace_event.h>
Jason Wessel3c502e72010-11-04 17:33:01 -050037#include <linux/hw_breakpoint.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020038
39#include <asm/irq_regs.h>
40
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010041struct remote_function_call {
42 struct task_struct *p;
43 int (*func)(void *info);
44 void *info;
45 int ret;
46};
47
48static void remote_function(void *data)
49{
50 struct remote_function_call *tfc = data;
51 struct task_struct *p = tfc->p;
52
53 if (p) {
54 tfc->ret = -EAGAIN;
55 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
56 return;
57 }
58
59 tfc->ret = tfc->func(tfc->info);
60}
61
62/**
63 * task_function_call - call a function on the cpu on which a task runs
64 * @p: the task to evaluate
65 * @func: the function to be called
66 * @info: the function call argument
67 *
68 * Calls the function @func when the task is currently running. This might
69 * be on the current CPU, which just calls the function directly
70 *
71 * returns: @func return value, or
72 * -ESRCH - when the process isn't running
73 * -EAGAIN - when the process moved away
74 */
75static int
76task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
77{
78 struct remote_function_call data = {
79 .p = p,
80 .func = func,
81 .info = info,
82 .ret = -ESRCH, /* No such (running) process */
83 };
84
85 if (task_curr(p))
86 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
87
88 return data.ret;
89}
90
91/**
92 * cpu_function_call - call a function on the cpu
93 * @func: the function to be called
94 * @info: the function call argument
95 *
96 * Calls the function @func on the remote cpu.
97 *
98 * returns: @func return value or -ENXIO when the cpu is offline
99 */
100static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
101{
102 struct remote_function_call data = {
103 .p = NULL,
104 .func = func,
105 .info = info,
106 .ret = -ENXIO, /* No such CPU */
107 };
108
109 smp_call_function_single(cpu, remote_function, &data, 1);
110
111 return data.ret;
112}
113
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200114enum event_type_t {
115 EVENT_FLEXIBLE = 0x1,
116 EVENT_PINNED = 0x2,
117 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
118};
119
Peter Zijlstra82cd6de2010-10-14 17:57:23 +0200120atomic_t perf_task_events __read_mostly;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200121static atomic_t nr_mmap_events __read_mostly;
122static atomic_t nr_comm_events __read_mostly;
123static atomic_t nr_task_events __read_mostly;
124
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200125static LIST_HEAD(pmus);
126static DEFINE_MUTEX(pmus_lock);
127static struct srcu_struct pmus_srcu;
128
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200129/*
130 * perf event paranoia level:
131 * -1 - not paranoid at all
132 * 0 - disallow raw tracepoint access for unpriv
133 * 1 - disallow cpu events for unpriv
134 * 2 - disallow kernel profiling for unpriv
135 */
136int sysctl_perf_event_paranoid __read_mostly = 1;
137
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200138int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
139
140/*
141 * max perf event sample rate
142 */
143int sysctl_perf_event_sample_rate __read_mostly = 100000;
144
145static atomic64_t perf_event_id;
146
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200147static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
148 enum event_type_t event_type);
149
150static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
151 enum event_type_t event_type);
152
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200153void __weak perf_event_print_debug(void) { }
154
Matt Fleming84c79912010-10-03 21:41:13 +0100155extern __weak const char *perf_pmu_name(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200156{
Matt Fleming84c79912010-10-03 21:41:13 +0100157 return "pmu";
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200158}
159
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200160static inline u64 perf_clock(void)
161{
162 return local_clock();
163}
164
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200165void perf_pmu_disable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200166{
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200167 int *count = this_cpu_ptr(pmu->pmu_disable_count);
168 if (!(*count)++)
169 pmu->pmu_disable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200170}
171
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200172void perf_pmu_enable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200173{
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200174 int *count = this_cpu_ptr(pmu->pmu_disable_count);
175 if (!--(*count))
176 pmu->pmu_enable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200177}
178
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200179static DEFINE_PER_CPU(struct list_head, rotation_list);
180
181/*
182 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
183 * because they're strictly cpu affine and rotate_start is called with IRQs
184 * disabled, while rotate_context is called from IRQ context.
185 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200186static void perf_pmu_rotate_start(struct pmu *pmu)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200187{
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200188 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200189 struct list_head *head = &__get_cpu_var(rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200190
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200191 WARN_ON(!irqs_disabled());
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200192
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200193 if (list_empty(&cpuctx->rotation_list))
194 list_add(&cpuctx->rotation_list, head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200195}
196
197static void get_ctx(struct perf_event_context *ctx)
198{
199 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
200}
201
202static void free_ctx(struct rcu_head *head)
203{
204 struct perf_event_context *ctx;
205
206 ctx = container_of(head, struct perf_event_context, rcu_head);
207 kfree(ctx);
208}
209
210static void put_ctx(struct perf_event_context *ctx)
211{
212 if (atomic_dec_and_test(&ctx->refcount)) {
213 if (ctx->parent_ctx)
214 put_ctx(ctx->parent_ctx);
215 if (ctx->task)
216 put_task_struct(ctx->task);
217 call_rcu(&ctx->rcu_head, free_ctx);
218 }
219}
220
221static void unclone_ctx(struct perf_event_context *ctx)
222{
223 if (ctx->parent_ctx) {
224 put_ctx(ctx->parent_ctx);
225 ctx->parent_ctx = NULL;
226 }
227}
228
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200229static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
230{
231 /*
232 * only top level events have the pid namespace they were created in
233 */
234 if (event->parent)
235 event = event->parent;
236
237 return task_tgid_nr_ns(p, event->ns);
238}
239
240static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
241{
242 /*
243 * only top level events have the pid namespace they were created in
244 */
245 if (event->parent)
246 event = event->parent;
247
248 return task_pid_nr_ns(p, event->ns);
249}
250
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200251/*
252 * If we inherit events we want to return the parent event id
253 * to userspace.
254 */
255static u64 primary_event_id(struct perf_event *event)
256{
257 u64 id = event->id;
258
259 if (event->parent)
260 id = event->parent->id;
261
262 return id;
263}
264
265/*
266 * Get the perf_event_context for a task and lock it.
267 * This has to cope with with the fact that until it is locked,
268 * the context could get moved to another task.
269 */
270static struct perf_event_context *
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200271perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200272{
273 struct perf_event_context *ctx;
274
275 rcu_read_lock();
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200276retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200277 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200278 if (ctx) {
279 /*
280 * If this context is a clone of another, it might
281 * get swapped for another underneath us by
282 * perf_event_task_sched_out, though the
283 * rcu_read_lock() protects us from any context
284 * getting freed. Lock the context and check if it
285 * got swapped before we could get the lock, and retry
286 * if so. If we locked the right context, then it
287 * can't get swapped on us any more.
288 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100289 raw_spin_lock_irqsave(&ctx->lock, *flags);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200290 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100291 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200292 goto retry;
293 }
294
295 if (!atomic_inc_not_zero(&ctx->refcount)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100296 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200297 ctx = NULL;
298 }
299 }
300 rcu_read_unlock();
301 return ctx;
302}
303
304/*
305 * Get the context for a task and increment its pin_count so it
306 * can't get swapped to another task. This also increments its
307 * reference count so that the context can't get freed.
308 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200309static struct perf_event_context *
310perf_pin_task_context(struct task_struct *task, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200311{
312 struct perf_event_context *ctx;
313 unsigned long flags;
314
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200315 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200316 if (ctx) {
317 ++ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100318 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200319 }
320 return ctx;
321}
322
323static void perf_unpin_context(struct perf_event_context *ctx)
324{
325 unsigned long flags;
326
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100327 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200328 --ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100329 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200330}
331
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100332/*
333 * Update the record of the current time in a context.
334 */
335static void update_context_time(struct perf_event_context *ctx)
336{
337 u64 now = perf_clock();
338
339 ctx->time += now - ctx->timestamp;
340 ctx->timestamp = now;
341}
342
Stephane Eranian41587552011-01-03 18:20:01 +0200343static u64 perf_event_time(struct perf_event *event)
344{
345 struct perf_event_context *ctx = event->ctx;
346 return ctx ? ctx->time : 0;
347}
348
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100349/*
350 * Update the total_time_enabled and total_time_running fields for a event.
351 */
352static void update_event_times(struct perf_event *event)
353{
354 struct perf_event_context *ctx = event->ctx;
355 u64 run_end;
356
357 if (event->state < PERF_EVENT_STATE_INACTIVE ||
358 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
359 return;
360
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100361 if (ctx->is_active)
Stephane Eranian41587552011-01-03 18:20:01 +0200362 run_end = perf_event_time(event);
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100363 else
364 run_end = event->tstamp_stopped;
365
366 event->total_time_enabled = run_end - event->tstamp_enabled;
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100367
368 if (event->state == PERF_EVENT_STATE_INACTIVE)
369 run_end = event->tstamp_stopped;
370 else
Stephane Eranian41587552011-01-03 18:20:01 +0200371 run_end = perf_event_time(event);
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100372
373 event->total_time_running = run_end - event->tstamp_running;
374}
375
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200376/*
377 * Update total_time_enabled and total_time_running for all events in a group.
378 */
379static void update_group_times(struct perf_event *leader)
380{
381 struct perf_event *event;
382
383 update_event_times(leader);
384 list_for_each_entry(event, &leader->sibling_list, group_entry)
385 update_event_times(event);
386}
387
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100388static struct list_head *
389ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
390{
391 if (event->attr.pinned)
392 return &ctx->pinned_groups;
393 else
394 return &ctx->flexible_groups;
395}
396
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200397/*
398 * Add a event from the lists for its context.
399 * Must be called with ctx->mutex and ctx->lock held.
400 */
401static void
402list_add_event(struct perf_event *event, struct perf_event_context *ctx)
403{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200404 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
405 event->attach_state |= PERF_ATTACH_CONTEXT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200406
407 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +0200408 * If we're a stand alone event or group leader, we go to the context
409 * list, group events are kept attached to the group so that
410 * perf_group_detach can, at all times, locate all siblings.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200411 */
Peter Zijlstra8a495422010-05-27 15:47:49 +0200412 if (event->group_leader == event) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100413 struct list_head *list;
414
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100415 if (is_software_event(event))
416 event->group_flags |= PERF_GROUP_SOFTWARE;
417
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100418 list = ctx_group_list(event, ctx);
419 list_add_tail(&event->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200420 }
421
422 list_add_rcu(&event->event_entry, &ctx->event_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200423 if (!ctx->nr_events)
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200424 perf_pmu_rotate_start(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200425 ctx->nr_events++;
426 if (event->attr.inherit_stat)
427 ctx->nr_stat++;
428}
429
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200430/*
431 * Called at perf_event creation and when events are attached/detached from a
432 * group.
433 */
434static void perf_event__read_size(struct perf_event *event)
435{
436 int entry = sizeof(u64); /* value */
437 int size = 0;
438 int nr = 1;
439
440 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
441 size += sizeof(u64);
442
443 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
444 size += sizeof(u64);
445
446 if (event->attr.read_format & PERF_FORMAT_ID)
447 entry += sizeof(u64);
448
449 if (event->attr.read_format & PERF_FORMAT_GROUP) {
450 nr += event->group_leader->nr_siblings;
451 size += sizeof(u64);
452 }
453
454 size += entry * nr;
455 event->read_size = size;
456}
457
458static void perf_event__header_size(struct perf_event *event)
459{
460 struct perf_sample_data *data;
461 u64 sample_type = event->attr.sample_type;
462 u16 size = 0;
463
464 perf_event__read_size(event);
465
466 if (sample_type & PERF_SAMPLE_IP)
467 size += sizeof(data->ip);
468
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200469 if (sample_type & PERF_SAMPLE_ADDR)
470 size += sizeof(data->addr);
471
472 if (sample_type & PERF_SAMPLE_PERIOD)
473 size += sizeof(data->period);
474
475 if (sample_type & PERF_SAMPLE_READ)
476 size += event->read_size;
477
478 event->header_size = size;
479}
480
481static void perf_event__id_header_size(struct perf_event *event)
482{
483 struct perf_sample_data *data;
484 u64 sample_type = event->attr.sample_type;
485 u16 size = 0;
486
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200487 if (sample_type & PERF_SAMPLE_TID)
488 size += sizeof(data->tid_entry);
489
490 if (sample_type & PERF_SAMPLE_TIME)
491 size += sizeof(data->time);
492
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200493 if (sample_type & PERF_SAMPLE_ID)
494 size += sizeof(data->id);
495
496 if (sample_type & PERF_SAMPLE_STREAM_ID)
497 size += sizeof(data->stream_id);
498
499 if (sample_type & PERF_SAMPLE_CPU)
500 size += sizeof(data->cpu_entry);
501
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200502 event->id_header_size = size;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200503}
504
Peter Zijlstra8a495422010-05-27 15:47:49 +0200505static void perf_group_attach(struct perf_event *event)
506{
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200507 struct perf_event *group_leader = event->group_leader, *pos;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200508
Peter Zijlstra74c33372010-10-15 11:40:29 +0200509 /*
510 * We can have double attach due to group movement in perf_event_open.
511 */
512 if (event->attach_state & PERF_ATTACH_GROUP)
513 return;
514
Peter Zijlstra8a495422010-05-27 15:47:49 +0200515 event->attach_state |= PERF_ATTACH_GROUP;
516
517 if (group_leader == event)
518 return;
519
520 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
521 !is_software_event(event))
522 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
523
524 list_add_tail(&event->group_entry, &group_leader->sibling_list);
525 group_leader->nr_siblings++;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200526
527 perf_event__header_size(group_leader);
528
529 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
530 perf_event__header_size(pos);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200531}
532
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200533/*
534 * Remove a event from the lists for its context.
535 * Must be called with ctx->mutex and ctx->lock held.
536 */
537static void
538list_del_event(struct perf_event *event, struct perf_event_context *ctx)
539{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200540 /*
541 * We can have double detach due to exit/hot-unplug + close.
542 */
543 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200544 return;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200545
546 event->attach_state &= ~PERF_ATTACH_CONTEXT;
547
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200548 ctx->nr_events--;
549 if (event->attr.inherit_stat)
550 ctx->nr_stat--;
551
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200552 list_del_rcu(&event->event_entry);
553
Peter Zijlstra8a495422010-05-27 15:47:49 +0200554 if (event->group_leader == event)
555 list_del_init(&event->group_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200556
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200557 update_group_times(event);
Stephane Eranianb2e74a22009-11-26 09:24:30 -0800558
559 /*
560 * If event was in error state, then keep it
561 * that way, otherwise bogus counts will be
562 * returned on read(). The only way to get out
563 * of error state is by explicit re-enabling
564 * of the event
565 */
566 if (event->state > PERF_EVENT_STATE_OFF)
567 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra050735b2010-05-11 11:51:53 +0200568}
569
Peter Zijlstra8a495422010-05-27 15:47:49 +0200570static void perf_group_detach(struct perf_event *event)
Peter Zijlstra050735b2010-05-11 11:51:53 +0200571{
572 struct perf_event *sibling, *tmp;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200573 struct list_head *list = NULL;
574
575 /*
576 * We can have double detach due to exit/hot-unplug + close.
577 */
578 if (!(event->attach_state & PERF_ATTACH_GROUP))
579 return;
580
581 event->attach_state &= ~PERF_ATTACH_GROUP;
582
583 /*
584 * If this is a sibling, remove it from its group.
585 */
586 if (event->group_leader != event) {
587 list_del_init(&event->group_entry);
588 event->group_leader->nr_siblings--;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200589 goto out;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200590 }
591
592 if (!list_empty(&event->group_entry))
593 list = &event->group_entry;
Peter Zijlstra2e2af502009-11-23 11:37:25 +0100594
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200595 /*
596 * If this was a group event with sibling events then
597 * upgrade the siblings to singleton events by adding them
Peter Zijlstra8a495422010-05-27 15:47:49 +0200598 * to whatever list we are on.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200599 */
600 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Peter Zijlstra8a495422010-05-27 15:47:49 +0200601 if (list)
602 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200603 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100604
605 /* Inherit group flags from the previous leader */
606 sibling->group_flags = event->group_flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200607 }
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200608
609out:
610 perf_event__header_size(event->group_leader);
611
612 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
613 perf_event__header_size(tmp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200614}
615
Stephane Eranianfa66f072010-08-26 16:40:01 +0200616static inline int
617event_filter_match(struct perf_event *event)
618{
619 return event->cpu == -1 || event->cpu == smp_processor_id();
620}
621
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200622static void
623event_sched_out(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200624 struct perf_cpu_context *cpuctx,
625 struct perf_event_context *ctx)
626{
Stephane Eranian41587552011-01-03 18:20:01 +0200627 u64 tstamp = perf_event_time(event);
Stephane Eranianfa66f072010-08-26 16:40:01 +0200628 u64 delta;
629 /*
630 * An event which could not be activated because of
631 * filter mismatch still needs to have its timings
632 * maintained, otherwise bogus information is return
633 * via read() for time_enabled, time_running:
634 */
635 if (event->state == PERF_EVENT_STATE_INACTIVE
636 && !event_filter_match(event)) {
637 delta = ctx->time - event->tstamp_stopped;
638 event->tstamp_running += delta;
Stephane Eranian41587552011-01-03 18:20:01 +0200639 event->tstamp_stopped = tstamp;
Stephane Eranianfa66f072010-08-26 16:40:01 +0200640 }
641
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200642 if (event->state != PERF_EVENT_STATE_ACTIVE)
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200643 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200644
645 event->state = PERF_EVENT_STATE_INACTIVE;
646 if (event->pending_disable) {
647 event->pending_disable = 0;
648 event->state = PERF_EVENT_STATE_OFF;
649 }
Stephane Eranian41587552011-01-03 18:20:01 +0200650 event->tstamp_stopped = tstamp;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200651 event->pmu->del(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200652 event->oncpu = -1;
653
654 if (!is_software_event(event))
655 cpuctx->active_oncpu--;
656 ctx->nr_active--;
657 if (event->attr.exclusive || !cpuctx->active_oncpu)
658 cpuctx->exclusive = 0;
659}
660
661static void
662group_sched_out(struct perf_event *group_event,
663 struct perf_cpu_context *cpuctx,
664 struct perf_event_context *ctx)
665{
666 struct perf_event *event;
Stephane Eranianfa66f072010-08-26 16:40:01 +0200667 int state = group_event->state;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200668
669 event_sched_out(group_event, cpuctx, ctx);
670
671 /*
672 * Schedule out siblings (if any):
673 */
674 list_for_each_entry(event, &group_event->sibling_list, group_entry)
675 event_sched_out(event, cpuctx, ctx);
676
Stephane Eranianfa66f072010-08-26 16:40:01 +0200677 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200678 cpuctx->exclusive = 0;
679}
680
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200681static inline struct perf_cpu_context *
682__get_cpu_context(struct perf_event_context *ctx)
683{
684 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
685}
686
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200687/*
688 * Cross CPU call to remove a performance event
689 *
690 * We disable the event on the hardware level first. After that we
691 * remove it from the context list.
692 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100693static int __perf_remove_from_context(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200694{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200695 struct perf_event *event = info;
696 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200697 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200698
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100699 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200700 event_sched_out(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200701 list_del_event(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100702 raw_spin_unlock(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100703
704 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200705}
706
707
708/*
709 * Remove the event from a task's (or a CPU's) list of events.
710 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200711 * CPU events are removed with a smp call. For task events we only
712 * call when the task is on a CPU.
713 *
714 * If event->ctx is a cloned context, callers must make sure that
715 * every task struct that event->ctx->task could possibly point to
716 * remains valid. This is OK when called from perf_release since
717 * that only calls us on the top-level context, which can't be a clone.
718 * When called from perf_event_exit_task, it's OK because the
719 * context has been detached from its task.
720 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100721static void perf_remove_from_context(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200722{
723 struct perf_event_context *ctx = event->ctx;
724 struct task_struct *task = ctx->task;
725
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100726 lockdep_assert_held(&ctx->mutex);
727
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200728 if (!task) {
729 /*
730 * Per cpu events are removed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200731 * the removal is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200732 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100733 cpu_function_call(event->cpu, __perf_remove_from_context, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200734 return;
735 }
736
737retry:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100738 if (!task_function_call(task, __perf_remove_from_context, event))
739 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200740
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100741 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200742 /*
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100743 * If we failed to find a running task, but find the context active now
744 * that we've acquired the ctx->lock, retry.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200745 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100746 if (ctx->is_active) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100747 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200748 goto retry;
749 }
750
751 /*
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100752 * Since the task isn't running, its safe to remove the event, us
753 * holding the ctx->lock ensures the task won't get scheduled in.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200754 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100755 list_del_event(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100756 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200757}
758
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200759/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200760 * Cross CPU call to disable a performance event
761 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100762static int __perf_event_disable(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200763{
764 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200765 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200766 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200767
768 /*
769 * If this is a per-task event, need to check whether this
770 * event's task is the current task on this cpu.
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100771 *
772 * Can trigger due to concurrent perf_event_context_sched_out()
773 * flipping contexts around.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200774 */
775 if (ctx->task && cpuctx->task_ctx != ctx)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100776 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200777
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100778 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200779
780 /*
781 * If the event is on, turn it off.
782 * If it is in error state, leave it in error state.
783 */
784 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
785 update_context_time(ctx);
786 update_group_times(event);
787 if (event == event->group_leader)
788 group_sched_out(event, cpuctx, ctx);
789 else
790 event_sched_out(event, cpuctx, ctx);
791 event->state = PERF_EVENT_STATE_OFF;
792 }
793
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100794 raw_spin_unlock(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100795
796 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200797}
798
799/*
800 * Disable a event.
801 *
802 * If event->ctx is a cloned context, callers must make sure that
803 * every task struct that event->ctx->task could possibly point to
804 * remains valid. This condition is satisifed when called through
805 * perf_event_for_each_child or perf_event_for_each because they
806 * hold the top-level event's child_mutex, so any descendant that
807 * goes to exit will block in sync_child_event.
808 * When called from perf_pending_event it's OK because event->ctx
809 * is the current context on this CPU and preemption is disabled,
810 * hence we can't get into perf_event_task_sched_out for this context.
811 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100812void perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200813{
814 struct perf_event_context *ctx = event->ctx;
815 struct task_struct *task = ctx->task;
816
817 if (!task) {
818 /*
819 * Disable the event on the cpu that it's on
820 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100821 cpu_function_call(event->cpu, __perf_event_disable, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200822 return;
823 }
824
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200825retry:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100826 if (!task_function_call(task, __perf_event_disable, event))
827 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200828
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100829 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200830 /*
831 * If the event is still active, we need to retry the cross-call.
832 */
833 if (event->state == PERF_EVENT_STATE_ACTIVE) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100834 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100835 /*
836 * Reload the task pointer, it might have been changed by
837 * a concurrent perf_event_context_sched_out().
838 */
839 task = ctx->task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200840 goto retry;
841 }
842
843 /*
844 * Since we have the lock this context can't be scheduled
845 * in, so we can change the state safely.
846 */
847 if (event->state == PERF_EVENT_STATE_INACTIVE) {
848 update_group_times(event);
849 event->state = PERF_EVENT_STATE_OFF;
850 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100851 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200852}
853
854static int
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200855event_sched_in(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200856 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100857 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200858{
Stephane Eranian41587552011-01-03 18:20:01 +0200859 u64 tstamp = perf_event_time(event);
860
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200861 if (event->state <= PERF_EVENT_STATE_OFF)
862 return 0;
863
864 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +0100865 event->oncpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200866 /*
867 * The new state must be visible before we turn it on in the hardware:
868 */
869 smp_wmb();
870
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200871 if (event->pmu->add(event, PERF_EF_START)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200872 event->state = PERF_EVENT_STATE_INACTIVE;
873 event->oncpu = -1;
874 return -EAGAIN;
875 }
876
Stephane Eranian41587552011-01-03 18:20:01 +0200877 event->tstamp_running += tstamp - event->tstamp_stopped;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200878
Stephane Eranian41587552011-01-03 18:20:01 +0200879 event->shadow_ctx_time = tstamp - ctx->timestamp;
Stephane Eranianeed01522010-10-26 16:08:01 +0200880
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200881 if (!is_software_event(event))
882 cpuctx->active_oncpu++;
883 ctx->nr_active++;
884
885 if (event->attr.exclusive)
886 cpuctx->exclusive = 1;
887
888 return 0;
889}
890
891static int
892group_sched_in(struct perf_event *group_event,
893 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100894 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200895{
Lin Ming6bde9b62010-04-23 13:56:00 +0800896 struct perf_event *event, *partial_group = NULL;
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200897 struct pmu *pmu = group_event->pmu;
Stephane Eraniand7842da2010-10-20 15:25:01 +0200898 u64 now = ctx->time;
899 bool simulate = false;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200900
901 if (group_event->state == PERF_EVENT_STATE_OFF)
902 return 0;
903
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200904 pmu->start_txn(pmu);
Lin Ming6bde9b62010-04-23 13:56:00 +0800905
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200906 if (event_sched_in(group_event, cpuctx, ctx)) {
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200907 pmu->cancel_txn(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200908 return -EAGAIN;
Stephane Eranian90151c32010-05-25 16:23:10 +0200909 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200910
911 /*
912 * Schedule in siblings as one group (if any):
913 */
914 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200915 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200916 partial_group = event;
917 goto group_error;
918 }
919 }
920
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200921 if (!pmu->commit_txn(pmu))
Paul Mackerras6e851582010-05-08 20:58:00 +1000922 return 0;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200923
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200924group_error:
925 /*
926 * Groups can be scheduled in as one unit only, so undo any
927 * partial group before returning:
Stephane Eraniand7842da2010-10-20 15:25:01 +0200928 * The events up to the failed event are scheduled out normally,
929 * tstamp_stopped will be updated.
930 *
931 * The failed events and the remaining siblings need to have
932 * their timings updated as if they had gone thru event_sched_in()
933 * and event_sched_out(). This is required to get consistent timings
934 * across the group. This also takes care of the case where the group
935 * could never be scheduled by ensuring tstamp_stopped is set to mark
936 * the time the event was actually stopped, such that time delta
937 * calculation in update_event_times() is correct.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200938 */
939 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
940 if (event == partial_group)
Stephane Eraniand7842da2010-10-20 15:25:01 +0200941 simulate = true;
942
943 if (simulate) {
944 event->tstamp_running += now - event->tstamp_stopped;
945 event->tstamp_stopped = now;
946 } else {
947 event_sched_out(event, cpuctx, ctx);
948 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200949 }
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200950 event_sched_out(group_event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200951
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200952 pmu->cancel_txn(pmu);
Stephane Eranian90151c32010-05-25 16:23:10 +0200953
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200954 return -EAGAIN;
955}
956
957/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200958 * Work out whether we can put this event group on the CPU now.
959 */
960static int group_can_go_on(struct perf_event *event,
961 struct perf_cpu_context *cpuctx,
962 int can_add_hw)
963{
964 /*
965 * Groups consisting entirely of software events can always go on.
966 */
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100967 if (event->group_flags & PERF_GROUP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200968 return 1;
969 /*
970 * If an exclusive group is already on, no other hardware
971 * events can go on.
972 */
973 if (cpuctx->exclusive)
974 return 0;
975 /*
976 * If this group is exclusive and there are already
977 * events on the CPU, it can't go on.
978 */
979 if (event->attr.exclusive && cpuctx->active_oncpu)
980 return 0;
981 /*
982 * Otherwise, try to add it if all previous groups were able
983 * to go on.
984 */
985 return can_add_hw;
986}
987
988static void add_event_to_ctx(struct perf_event *event,
989 struct perf_event_context *ctx)
990{
Stephane Eranian41587552011-01-03 18:20:01 +0200991 u64 tstamp = perf_event_time(event);
992
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200993 list_add_event(event, ctx);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200994 perf_group_attach(event);
Stephane Eranian41587552011-01-03 18:20:01 +0200995 event->tstamp_enabled = tstamp;
996 event->tstamp_running = tstamp;
997 event->tstamp_stopped = tstamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200998}
999
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001000static void perf_event_context_sched_in(struct perf_event_context *ctx);
1001
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001002/*
1003 * Cross CPU call to install and enable a performance event
1004 *
1005 * Must be called with ctx->mutex held
1006 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001007static int __perf_install_in_context(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001008{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001009 struct perf_event *event = info;
1010 struct perf_event_context *ctx = event->ctx;
1011 struct perf_event *leader = event->group_leader;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001012 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001013 int err;
1014
1015 /*
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001016 * In case we're installing a new context to an already running task,
1017 * could also happen before perf_event_task_sched_in() on architectures
1018 * which do context switches with IRQs enabled.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001019 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001020 if (ctx->task && !cpuctx->task_ctx)
1021 perf_event_context_sched_in(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001022
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001023 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001024 ctx->is_active = 1;
1025 update_context_time(ctx);
1026
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001027 add_event_to_ctx(event, ctx);
1028
Stephane Eranian5632ab12011-01-03 18:20:01 +02001029 if (!event_filter_match(event))
Peter Zijlstraf4c41762009-12-16 17:55:54 +01001030 goto unlock;
1031
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001032 /*
1033 * Don't put the event on if it is disabled or if
1034 * it is in a group and the group isn't on.
1035 */
1036 if (event->state != PERF_EVENT_STATE_INACTIVE ||
1037 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
1038 goto unlock;
1039
1040 /*
1041 * An exclusive event can't go on if there are already active
1042 * hardware events, and no hardware event can go on if there
1043 * is already an exclusive event on.
1044 */
1045 if (!group_can_go_on(event, cpuctx, 1))
1046 err = -EEXIST;
1047 else
Peter Zijlstra6e377382010-02-11 13:21:58 +01001048 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001049
1050 if (err) {
1051 /*
1052 * This event couldn't go on. If it is in a group
1053 * then we have to pull the whole group off.
1054 * If the event group is pinned then put it in error state.
1055 */
1056 if (leader != event)
1057 group_sched_out(leader, cpuctx, ctx);
1058 if (leader->attr.pinned) {
1059 update_group_times(leader);
1060 leader->state = PERF_EVENT_STATE_ERROR;
1061 }
1062 }
1063
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001064unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001065 raw_spin_unlock(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001066
1067 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001068}
1069
1070/*
1071 * Attach a performance event to a context
1072 *
1073 * First we add the event to the list with the hardware enable bit
1074 * in event->hw_config cleared.
1075 *
1076 * If the event is attached to a task which is on a CPU we use a smp
1077 * call to enable it in the task context. The task might have been
1078 * scheduled away, but we check this in the smp call again.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001079 */
1080static void
1081perf_install_in_context(struct perf_event_context *ctx,
1082 struct perf_event *event,
1083 int cpu)
1084{
1085 struct task_struct *task = ctx->task;
1086
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001087 lockdep_assert_held(&ctx->mutex);
1088
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02001089 event->ctx = ctx;
1090
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001091 if (!task) {
1092 /*
1093 * Per cpu events are installed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001094 * the install is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001095 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001096 cpu_function_call(cpu, __perf_install_in_context, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001097 return;
1098 }
1099
1100retry:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001101 if (!task_function_call(task, __perf_install_in_context, event))
1102 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001103
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001104 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001105 /*
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001106 * If we failed to find a running task, but find the context active now
1107 * that we've acquired the ctx->lock, retry.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001108 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001109 if (ctx->is_active) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001110 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001111 goto retry;
1112 }
1113
1114 /*
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001115 * Since the task isn't running, its safe to add the event, us holding
1116 * the ctx->lock ensures the task won't get scheduled in.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001117 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001118 add_event_to_ctx(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001119 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001120}
1121
1122/*
1123 * Put a event into inactive state and update time fields.
1124 * Enabling the leader of a group effectively enables all
1125 * the group members that aren't explicitly disabled, so we
1126 * have to update their ->tstamp_enabled also.
1127 * Note: this works for group members as well as group leaders
1128 * since the non-leader members' sibling_lists will be empty.
1129 */
1130static void __perf_event_mark_enabled(struct perf_event *event,
1131 struct perf_event_context *ctx)
1132{
1133 struct perf_event *sub;
Stephane Eranian41587552011-01-03 18:20:01 +02001134 u64 tstamp = perf_event_time(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001135
1136 event->state = PERF_EVENT_STATE_INACTIVE;
Stephane Eranian41587552011-01-03 18:20:01 +02001137 event->tstamp_enabled = tstamp - event->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001138 list_for_each_entry(sub, &event->sibling_list, group_entry) {
Stephane Eranian41587552011-01-03 18:20:01 +02001139 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1140 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001141 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001142}
1143
1144/*
1145 * Cross CPU call to enable a performance event
1146 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001147static int __perf_event_enable(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001148{
1149 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001150 struct perf_event_context *ctx = event->ctx;
1151 struct perf_event *leader = event->group_leader;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001152 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001153 int err;
1154
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001155 if (WARN_ON_ONCE(!ctx->is_active))
1156 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001157
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001158 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001159 update_context_time(ctx);
1160
1161 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1162 goto unlock;
1163 __perf_event_mark_enabled(event, ctx);
1164
Stephane Eranian5632ab12011-01-03 18:20:01 +02001165 if (!event_filter_match(event))
Peter Zijlstraf4c41762009-12-16 17:55:54 +01001166 goto unlock;
1167
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001168 /*
1169 * If the event is in a group and isn't the group leader,
1170 * then don't put it on unless the group is on.
1171 */
1172 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1173 goto unlock;
1174
1175 if (!group_can_go_on(event, cpuctx, 1)) {
1176 err = -EEXIST;
1177 } else {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001178 if (event == leader)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001179 err = group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001180 else
Peter Zijlstra6e377382010-02-11 13:21:58 +01001181 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001182 }
1183
1184 if (err) {
1185 /*
1186 * If this event can't go on and it's part of a
1187 * group, then the whole group has to come off.
1188 */
1189 if (leader != event)
1190 group_sched_out(leader, cpuctx, ctx);
1191 if (leader->attr.pinned) {
1192 update_group_times(leader);
1193 leader->state = PERF_EVENT_STATE_ERROR;
1194 }
1195 }
1196
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001197unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001198 raw_spin_unlock(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001199
1200 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001201}
1202
1203/*
1204 * Enable a event.
1205 *
1206 * If event->ctx is a cloned context, callers must make sure that
1207 * every task struct that event->ctx->task could possibly point to
1208 * remains valid. This condition is satisfied when called through
1209 * perf_event_for_each_child or perf_event_for_each as described
1210 * for perf_event_disable.
1211 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +01001212void perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001213{
1214 struct perf_event_context *ctx = event->ctx;
1215 struct task_struct *task = ctx->task;
1216
1217 if (!task) {
1218 /*
1219 * Enable the event on the cpu that it's on
1220 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001221 cpu_function_call(event->cpu, __perf_event_enable, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001222 return;
1223 }
1224
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001225 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001226 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1227 goto out;
1228
1229 /*
1230 * If the event is in error state, clear that first.
1231 * That way, if we see the event in error state below, we
1232 * know that it has gone back into error state, as distinct
1233 * from the task having been scheduled away before the
1234 * cross-call arrived.
1235 */
1236 if (event->state == PERF_EVENT_STATE_ERROR)
1237 event->state = PERF_EVENT_STATE_OFF;
1238
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001239retry:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001240 if (!ctx->is_active) {
1241 __perf_event_mark_enabled(event, ctx);
1242 goto out;
1243 }
1244
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001245 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001246
1247 if (!task_function_call(task, __perf_event_enable, event))
1248 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001249
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001250 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001251
1252 /*
1253 * If the context is active and the event is still off,
1254 * we need to retry the cross-call.
1255 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001256 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1257 /*
1258 * task could have been flipped by a concurrent
1259 * perf_event_context_sched_out()
1260 */
1261 task = ctx->task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001262 goto retry;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001263 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001264
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001265out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001266 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001267}
1268
1269static int perf_event_refresh(struct perf_event *event, int refresh)
1270{
1271 /*
1272 * not supported on inherited events
1273 */
Franck Bui-Huu2e939d12010-11-23 16:21:44 +01001274 if (event->attr.inherit || !is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001275 return -EINVAL;
1276
1277 atomic_add(refresh, &event->event_limit);
1278 perf_event_enable(event);
1279
1280 return 0;
1281}
1282
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001283static void ctx_sched_out(struct perf_event_context *ctx,
1284 struct perf_cpu_context *cpuctx,
1285 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001286{
1287 struct perf_event *event;
1288
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001289 raw_spin_lock(&ctx->lock);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001290 perf_pmu_disable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001291 ctx->is_active = 0;
1292 if (likely(!ctx->nr_events))
1293 goto out;
1294 update_context_time(ctx);
1295
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001296 if (!ctx->nr_active)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001297 goto out;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001298
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001299 if (event_type & EVENT_PINNED) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001300 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1301 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001302 }
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001303
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001304 if (event_type & EVENT_FLEXIBLE) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001305 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001306 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001307 }
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001308out:
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001309 perf_pmu_enable(ctx->pmu);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001310 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001311}
1312
1313/*
1314 * Test whether two contexts are equivalent, i.e. whether they
1315 * have both been cloned from the same version of the same context
1316 * and they both have the same number of enabled events.
1317 * If the number of enabled events is the same, then the set
1318 * of enabled events should be the same, because these are both
1319 * inherited contexts, therefore we can't access individual events
1320 * in them directly with an fd; we can only enable/disable all
1321 * events via prctl, or enable/disable all events in a family
1322 * via ioctl, which will have the same effect on both contexts.
1323 */
1324static int context_equiv(struct perf_event_context *ctx1,
1325 struct perf_event_context *ctx2)
1326{
1327 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1328 && ctx1->parent_gen == ctx2->parent_gen
1329 && !ctx1->pin_count && !ctx2->pin_count;
1330}
1331
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001332static void __perf_event_sync_stat(struct perf_event *event,
1333 struct perf_event *next_event)
1334{
1335 u64 value;
1336
1337 if (!event->attr.inherit_stat)
1338 return;
1339
1340 /*
1341 * Update the event value, we cannot use perf_event_read()
1342 * because we're in the middle of a context switch and have IRQs
1343 * disabled, which upsets smp_call_function_single(), however
1344 * we know the event must be on the current CPU, therefore we
1345 * don't need to use it.
1346 */
1347 switch (event->state) {
1348 case PERF_EVENT_STATE_ACTIVE:
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01001349 event->pmu->read(event);
1350 /* fall-through */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001351
1352 case PERF_EVENT_STATE_INACTIVE:
1353 update_event_times(event);
1354 break;
1355
1356 default:
1357 break;
1358 }
1359
1360 /*
1361 * In order to keep per-task stats reliable we need to flip the event
1362 * values when we flip the contexts.
1363 */
Peter Zijlstrae7850592010-05-21 14:43:08 +02001364 value = local64_read(&next_event->count);
1365 value = local64_xchg(&event->count, value);
1366 local64_set(&next_event->count, value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001367
1368 swap(event->total_time_enabled, next_event->total_time_enabled);
1369 swap(event->total_time_running, next_event->total_time_running);
1370
1371 /*
1372 * Since we swizzled the values, update the user visible data too.
1373 */
1374 perf_event_update_userpage(event);
1375 perf_event_update_userpage(next_event);
1376}
1377
1378#define list_next_entry(pos, member) \
1379 list_entry(pos->member.next, typeof(*pos), member)
1380
1381static void perf_event_sync_stat(struct perf_event_context *ctx,
1382 struct perf_event_context *next_ctx)
1383{
1384 struct perf_event *event, *next_event;
1385
1386 if (!ctx->nr_stat)
1387 return;
1388
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01001389 update_context_time(ctx);
1390
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001391 event = list_first_entry(&ctx->event_list,
1392 struct perf_event, event_entry);
1393
1394 next_event = list_first_entry(&next_ctx->event_list,
1395 struct perf_event, event_entry);
1396
1397 while (&event->event_entry != &ctx->event_list &&
1398 &next_event->event_entry != &next_ctx->event_list) {
1399
1400 __perf_event_sync_stat(event, next_event);
1401
1402 event = list_next_entry(event, event_entry);
1403 next_event = list_next_entry(next_event, event_entry);
1404 }
1405}
1406
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001407static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1408 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001409{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001410 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001411 struct perf_event_context *next_ctx;
1412 struct perf_event_context *parent;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001413 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001414 int do_switch = 1;
1415
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001416 if (likely(!ctx))
1417 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001418
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001419 cpuctx = __get_cpu_context(ctx);
1420 if (!cpuctx->task_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001421 return;
1422
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001423 rcu_read_lock();
1424 parent = rcu_dereference(ctx->parent_ctx);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001425 next_ctx = next->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001426 if (parent && next_ctx &&
1427 rcu_dereference(next_ctx->parent_ctx) == parent) {
1428 /*
1429 * Looks like the two contexts are clones, so we might be
1430 * able to optimize the context switch. We lock both
1431 * contexts and check that they are clones under the
1432 * lock (including re-checking that neither has been
1433 * uncloned in the meantime). It doesn't matter which
1434 * order we take the locks because no other cpu could
1435 * be trying to lock both of these tasks.
1436 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001437 raw_spin_lock(&ctx->lock);
1438 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001439 if (context_equiv(ctx, next_ctx)) {
1440 /*
1441 * XXX do we need a memory barrier of sorts
1442 * wrt to rcu_dereference() of perf_event_ctxp
1443 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001444 task->perf_event_ctxp[ctxn] = next_ctx;
1445 next->perf_event_ctxp[ctxn] = ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001446 ctx->task = next;
1447 next_ctx->task = task;
1448 do_switch = 0;
1449
1450 perf_event_sync_stat(ctx, next_ctx);
1451 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001452 raw_spin_unlock(&next_ctx->lock);
1453 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001454 }
1455 rcu_read_unlock();
1456
1457 if (do_switch) {
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001458 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001459 cpuctx->task_ctx = NULL;
1460 }
1461}
1462
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001463#define for_each_task_context_nr(ctxn) \
1464 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
1465
1466/*
1467 * Called from scheduler to remove the events of the current task,
1468 * with interrupts disabled.
1469 *
1470 * We stop each event and update the event value in event->count.
1471 *
1472 * This does not protect us against NMI, but disable()
1473 * sets the disabled bit in the control field of event _before_
1474 * accessing the event control register. If a NMI hits, then it will
1475 * not restart the event.
1476 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02001477void __perf_event_task_sched_out(struct task_struct *task,
1478 struct task_struct *next)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001479{
1480 int ctxn;
1481
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001482 for_each_task_context_nr(ctxn)
1483 perf_event_context_sched_out(task, ctxn, next);
1484}
1485
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001486static void task_ctx_sched_out(struct perf_event_context *ctx,
1487 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001488{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001489 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001490
1491 if (!cpuctx->task_ctx)
1492 return;
1493
1494 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1495 return;
1496
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001497 ctx_sched_out(ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001498 cpuctx->task_ctx = NULL;
1499}
1500
1501/*
1502 * Called with IRQs disabled
1503 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001504static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1505 enum event_type_t event_type)
1506{
1507 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001508}
1509
1510static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001511ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001512 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001513{
1514 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001515
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001516 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1517 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001518 continue;
Stephane Eranian5632ab12011-01-03 18:20:01 +02001519 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001520 continue;
1521
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001522 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01001523 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001524
1525 /*
1526 * If this pinned group hasn't been scheduled,
1527 * put it in error state.
1528 */
1529 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1530 update_group_times(event);
1531 event->state = PERF_EVENT_STATE_ERROR;
1532 }
1533 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001534}
1535
1536static void
1537ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001538 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001539{
1540 struct perf_event *event;
1541 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001542
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001543 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1544 /* Ignore events in OFF or ERROR state */
1545 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001546 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001547 /*
1548 * Listen to the 'cpu' scheduling filter constraint
1549 * of events:
1550 */
Stephane Eranian5632ab12011-01-03 18:20:01 +02001551 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001552 continue;
1553
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001554 if (group_can_go_on(event, cpuctx, can_add_hw)) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001555 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001556 can_add_hw = 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001557 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001558 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001559}
1560
1561static void
1562ctx_sched_in(struct perf_event_context *ctx,
1563 struct perf_cpu_context *cpuctx,
1564 enum event_type_t event_type)
1565{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001566 raw_spin_lock(&ctx->lock);
1567 ctx->is_active = 1;
1568 if (likely(!ctx->nr_events))
1569 goto out;
1570
1571 ctx->timestamp = perf_clock();
1572
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001573 /*
1574 * First go through the list and put on any pinned groups
1575 * in order to give them the best chance of going on.
1576 */
1577 if (event_type & EVENT_PINNED)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001578 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001579
1580 /* Then walk through the lower prio flexible groups */
1581 if (event_type & EVENT_FLEXIBLE)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001582 ctx_flexible_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001583
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001584out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001585 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001586}
1587
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001588static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1589 enum event_type_t event_type)
1590{
1591 struct perf_event_context *ctx = &cpuctx->ctx;
1592
1593 ctx_sched_in(ctx, cpuctx, event_type);
1594}
1595
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001596static void task_ctx_sched_in(struct perf_event_context *ctx,
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001597 enum event_type_t event_type)
1598{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001599 struct perf_cpu_context *cpuctx;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001600
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001601 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001602 if (cpuctx->task_ctx == ctx)
1603 return;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001604
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001605 ctx_sched_in(ctx, cpuctx, event_type);
1606 cpuctx->task_ctx = ctx;
1607}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001608
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001609static void perf_event_context_sched_in(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001610{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001611 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001612
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001613 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001614 if (cpuctx->task_ctx == ctx)
1615 return;
1616
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001617 perf_pmu_disable(ctx->pmu);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001618 /*
1619 * We want to keep the following priority order:
1620 * cpu pinned (that don't need to move), task pinned,
1621 * cpu flexible, task flexible.
1622 */
1623 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1624
1625 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1626 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1627 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1628
1629 cpuctx->task_ctx = ctx;
eranian@google.com9b33fa62010-03-10 22:26:05 -08001630
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001631 /*
1632 * Since these rotations are per-cpu, we need to ensure the
1633 * cpu-context we got scheduled on is actually rotating.
1634 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001635 perf_pmu_rotate_start(ctx->pmu);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001636 perf_pmu_enable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001637}
1638
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001639/*
1640 * Called from scheduler to add the events of the current task
1641 * with interrupts disabled.
1642 *
1643 * We restore the event value and then enable it.
1644 *
1645 * This does not protect us against NMI, but enable()
1646 * sets the enabled bit in the control field of event _before_
1647 * accessing the event control register. If a NMI hits, then it will
1648 * keep the event running.
1649 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02001650void __perf_event_task_sched_in(struct task_struct *task)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001651{
1652 struct perf_event_context *ctx;
1653 int ctxn;
1654
1655 for_each_task_context_nr(ctxn) {
1656 ctx = task->perf_event_ctxp[ctxn];
1657 if (likely(!ctx))
1658 continue;
1659
1660 perf_event_context_sched_in(ctx);
1661 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001662}
1663
1664#define MAX_INTERRUPTS (~0ULL)
1665
1666static void perf_log_throttle(struct perf_event *event, int enable);
1667
Peter Zijlstraabd50712010-01-26 18:50:16 +01001668static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1669{
1670 u64 frequency = event->attr.sample_freq;
1671 u64 sec = NSEC_PER_SEC;
1672 u64 divisor, dividend;
1673
1674 int count_fls, nsec_fls, frequency_fls, sec_fls;
1675
1676 count_fls = fls64(count);
1677 nsec_fls = fls64(nsec);
1678 frequency_fls = fls64(frequency);
1679 sec_fls = 30;
1680
1681 /*
1682 * We got @count in @nsec, with a target of sample_freq HZ
1683 * the target period becomes:
1684 *
1685 * @count * 10^9
1686 * period = -------------------
1687 * @nsec * sample_freq
1688 *
1689 */
1690
1691 /*
1692 * Reduce accuracy by one bit such that @a and @b converge
1693 * to a similar magnitude.
1694 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001695#define REDUCE_FLS(a, b) \
Peter Zijlstraabd50712010-01-26 18:50:16 +01001696do { \
1697 if (a##_fls > b##_fls) { \
1698 a >>= 1; \
1699 a##_fls--; \
1700 } else { \
1701 b >>= 1; \
1702 b##_fls--; \
1703 } \
1704} while (0)
1705
1706 /*
1707 * Reduce accuracy until either term fits in a u64, then proceed with
1708 * the other, so that finally we can do a u64/u64 division.
1709 */
1710 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1711 REDUCE_FLS(nsec, frequency);
1712 REDUCE_FLS(sec, count);
1713 }
1714
1715 if (count_fls + sec_fls > 64) {
1716 divisor = nsec * frequency;
1717
1718 while (count_fls + sec_fls > 64) {
1719 REDUCE_FLS(count, sec);
1720 divisor >>= 1;
1721 }
1722
1723 dividend = count * sec;
1724 } else {
1725 dividend = count * sec;
1726
1727 while (nsec_fls + frequency_fls > 64) {
1728 REDUCE_FLS(nsec, frequency);
1729 dividend >>= 1;
1730 }
1731
1732 divisor = nsec * frequency;
1733 }
1734
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001735 if (!divisor)
1736 return dividend;
1737
Peter Zijlstraabd50712010-01-26 18:50:16 +01001738 return div64_u64(dividend, divisor);
1739}
1740
1741static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001742{
1743 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001744 s64 period, sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001745 s64 delta;
1746
Peter Zijlstraabd50712010-01-26 18:50:16 +01001747 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001748
1749 delta = (s64)(period - hwc->sample_period);
1750 delta = (delta + 7) / 8; /* low pass filter */
1751
1752 sample_period = hwc->sample_period + delta;
1753
1754 if (!sample_period)
1755 sample_period = 1;
1756
1757 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001758
Peter Zijlstrae7850592010-05-21 14:43:08 +02001759 if (local64_read(&hwc->period_left) > 8*sample_period) {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001760 event->pmu->stop(event, PERF_EF_UPDATE);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001761 local64_set(&hwc->period_left, 0);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001762 event->pmu->start(event, PERF_EF_RELOAD);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001763 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001764}
1765
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001766static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001767{
1768 struct perf_event *event;
1769 struct hw_perf_event *hwc;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001770 u64 interrupts, now;
1771 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001772
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001773 raw_spin_lock(&ctx->lock);
Paul Mackerras03541f82009-10-14 16:58:03 +11001774 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001775 if (event->state != PERF_EVENT_STATE_ACTIVE)
1776 continue;
1777
Stephane Eranian5632ab12011-01-03 18:20:01 +02001778 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01001779 continue;
1780
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001781 hwc = &event->hw;
1782
1783 interrupts = hwc->interrupts;
1784 hwc->interrupts = 0;
1785
1786 /*
1787 * unthrottle events on the tick
1788 */
1789 if (interrupts == MAX_INTERRUPTS) {
1790 perf_log_throttle(event, 1);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001791 event->pmu->start(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001792 }
1793
1794 if (!event->attr.freq || !event->attr.sample_freq)
1795 continue;
1796
Peter Zijlstraabd50712010-01-26 18:50:16 +01001797 event->pmu->read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001798 now = local64_read(&event->count);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001799 delta = now - hwc->freq_count_stamp;
1800 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001801
Peter Zijlstraabd50712010-01-26 18:50:16 +01001802 if (delta > 0)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001803 perf_adjust_period(event, period, delta);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001804 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001805 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001806}
1807
1808/*
1809 * Round-robin a context's events:
1810 */
1811static void rotate_ctx(struct perf_event_context *ctx)
1812{
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001813 raw_spin_lock(&ctx->lock);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001814
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01001815 /*
1816 * Rotate the first entry last of non-pinned groups. Rotation might be
1817 * disabled by the inheritance code.
1818 */
1819 if (!ctx->rotate_disable)
1820 list_rotate_left(&ctx->flexible_groups);
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001821
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001822 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001823}
1824
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001825/*
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001826 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
1827 * because they're strictly cpu affine and rotate_start is called with IRQs
1828 * disabled, while rotate_context is called from IRQ context.
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001829 */
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001830static void perf_rotate_context(struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001831{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001832 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001833 struct perf_event_context *ctx = NULL;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001834 int rotate = 0, remove = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001835
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001836 if (cpuctx->ctx.nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001837 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001838 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1839 rotate = 1;
1840 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001841
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001842 ctx = cpuctx->task_ctx;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001843 if (ctx && ctx->nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001844 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001845 if (ctx->nr_events != ctx->nr_active)
1846 rotate = 1;
1847 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001848
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001849 perf_pmu_disable(cpuctx->ctx.pmu);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001850 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001851 if (ctx)
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001852 perf_ctx_adjust_freq(ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001853
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001854 if (!rotate)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001855 goto done;
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001856
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001857 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001858 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001859 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001860
1861 rotate_ctx(&cpuctx->ctx);
1862 if (ctx)
1863 rotate_ctx(ctx);
1864
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001865 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001866 if (ctx)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001867 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001868
1869done:
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001870 if (remove)
1871 list_del_init(&cpuctx->rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001872
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001873 perf_pmu_enable(cpuctx->ctx.pmu);
1874}
1875
1876void perf_event_task_tick(void)
1877{
1878 struct list_head *head = &__get_cpu_var(rotation_list);
1879 struct perf_cpu_context *cpuctx, *tmp;
1880
1881 WARN_ON(!irqs_disabled());
1882
1883 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
1884 if (cpuctx->jiffies_interval == 1 ||
1885 !(jiffies % cpuctx->jiffies_interval))
1886 perf_rotate_context(cpuctx);
1887 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001888}
1889
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001890static int event_enable_on_exec(struct perf_event *event,
1891 struct perf_event_context *ctx)
1892{
1893 if (!event->attr.enable_on_exec)
1894 return 0;
1895
1896 event->attr.enable_on_exec = 0;
1897 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1898 return 0;
1899
1900 __perf_event_mark_enabled(event, ctx);
1901
1902 return 1;
1903}
1904
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001905/*
1906 * Enable all of a task's events that have been marked enable-on-exec.
1907 * This expects task == current.
1908 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001909static void perf_event_enable_on_exec(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001910{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001911 struct perf_event *event;
1912 unsigned long flags;
1913 int enabled = 0;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001914 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001915
1916 local_irq_save(flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001917 if (!ctx || !ctx->nr_events)
1918 goto out;
1919
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001920 task_ctx_sched_out(ctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001921
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001922 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001923
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001924 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1925 ret = event_enable_on_exec(event, ctx);
1926 if (ret)
1927 enabled = 1;
1928 }
1929
1930 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1931 ret = event_enable_on_exec(event, ctx);
1932 if (ret)
1933 enabled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001934 }
1935
1936 /*
1937 * Unclone this context if we enabled any event.
1938 */
1939 if (enabled)
1940 unclone_ctx(ctx);
1941
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001942 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001943
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001944 perf_event_context_sched_in(ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001945out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001946 local_irq_restore(flags);
1947}
1948
1949/*
1950 * Cross CPU call to read the hardware event
1951 */
1952static void __perf_event_read(void *info)
1953{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001954 struct perf_event *event = info;
1955 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001956 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001957
1958 /*
1959 * If this is a task context, we need to check whether it is
1960 * the current task context of this cpu. If not it has been
1961 * scheduled out before the smp call arrived. In that case
1962 * event->count would have been updated to a recent sample
1963 * when the event was scheduled out.
1964 */
1965 if (ctx->task && cpuctx->task_ctx != ctx)
1966 return;
1967
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001968 raw_spin_lock(&ctx->lock);
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001969 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001970 update_event_times(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001971 raw_spin_unlock(&ctx->lock);
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001972
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001973 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001974}
1975
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001976static inline u64 perf_event_count(struct perf_event *event)
1977{
Peter Zijlstrae7850592010-05-21 14:43:08 +02001978 return local64_read(&event->count) + atomic64_read(&event->child_count);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001979}
1980
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001981static u64 perf_event_read(struct perf_event *event)
1982{
1983 /*
1984 * If event is enabled and currently active on a CPU, update the
1985 * value in the event structure:
1986 */
1987 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1988 smp_call_function_single(event->oncpu,
1989 __perf_event_read, event, 1);
1990 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001991 struct perf_event_context *ctx = event->ctx;
1992 unsigned long flags;
1993
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001994 raw_spin_lock_irqsave(&ctx->lock, flags);
Stephane Eranianc530ccd2010-10-15 15:26:01 +02001995 /*
1996 * may read while context is not active
1997 * (e.g., thread is blocked), in that case
1998 * we cannot update context time
1999 */
2000 if (ctx->is_active)
2001 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002002 update_event_times(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002003 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002004 }
2005
Peter Zijlstrab5e58792010-05-21 14:43:12 +02002006 return perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002007}
2008
2009/*
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002010 * Callchain support
2011 */
2012
2013struct callchain_cpus_entries {
2014 struct rcu_head rcu_head;
2015 struct perf_callchain_entry *cpu_entries[0];
2016};
2017
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02002018static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002019static atomic_t nr_callchain_events;
2020static DEFINE_MUTEX(callchain_mutex);
2021struct callchain_cpus_entries *callchain_cpus_entries;
2022
2023
2024__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
2025 struct pt_regs *regs)
2026{
2027}
2028
2029__weak void perf_callchain_user(struct perf_callchain_entry *entry,
2030 struct pt_regs *regs)
2031{
2032}
2033
2034static void release_callchain_buffers_rcu(struct rcu_head *head)
2035{
2036 struct callchain_cpus_entries *entries;
2037 int cpu;
2038
2039 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
2040
2041 for_each_possible_cpu(cpu)
2042 kfree(entries->cpu_entries[cpu]);
2043
2044 kfree(entries);
2045}
2046
2047static void release_callchain_buffers(void)
2048{
2049 struct callchain_cpus_entries *entries;
2050
2051 entries = callchain_cpus_entries;
2052 rcu_assign_pointer(callchain_cpus_entries, NULL);
2053 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
2054}
2055
2056static int alloc_callchain_buffers(void)
2057{
2058 int cpu;
2059 int size;
2060 struct callchain_cpus_entries *entries;
2061
2062 /*
2063 * We can't use the percpu allocation API for data that can be
2064 * accessed from NMI. Use a temporary manual per cpu allocation
2065 * until that gets sorted out.
2066 */
2067 size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
2068 num_possible_cpus();
2069
2070 entries = kzalloc(size, GFP_KERNEL);
2071 if (!entries)
2072 return -ENOMEM;
2073
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02002074 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002075
2076 for_each_possible_cpu(cpu) {
2077 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
2078 cpu_to_node(cpu));
2079 if (!entries->cpu_entries[cpu])
2080 goto fail;
2081 }
2082
2083 rcu_assign_pointer(callchain_cpus_entries, entries);
2084
2085 return 0;
2086
2087fail:
2088 for_each_possible_cpu(cpu)
2089 kfree(entries->cpu_entries[cpu]);
2090 kfree(entries);
2091
2092 return -ENOMEM;
2093}
2094
2095static int get_callchain_buffers(void)
2096{
2097 int err = 0;
2098 int count;
2099
2100 mutex_lock(&callchain_mutex);
2101
2102 count = atomic_inc_return(&nr_callchain_events);
2103 if (WARN_ON_ONCE(count < 1)) {
2104 err = -EINVAL;
2105 goto exit;
2106 }
2107
2108 if (count > 1) {
2109 /* If the allocation failed, give up */
2110 if (!callchain_cpus_entries)
2111 err = -ENOMEM;
2112 goto exit;
2113 }
2114
2115 err = alloc_callchain_buffers();
2116 if (err)
2117 release_callchain_buffers();
2118exit:
2119 mutex_unlock(&callchain_mutex);
2120
2121 return err;
2122}
2123
2124static void put_callchain_buffers(void)
2125{
2126 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
2127 release_callchain_buffers();
2128 mutex_unlock(&callchain_mutex);
2129 }
2130}
2131
2132static int get_recursion_context(int *recursion)
2133{
2134 int rctx;
2135
2136 if (in_nmi())
2137 rctx = 3;
2138 else if (in_irq())
2139 rctx = 2;
2140 else if (in_softirq())
2141 rctx = 1;
2142 else
2143 rctx = 0;
2144
2145 if (recursion[rctx])
2146 return -1;
2147
2148 recursion[rctx]++;
2149 barrier();
2150
2151 return rctx;
2152}
2153
2154static inline void put_recursion_context(int *recursion, int rctx)
2155{
2156 barrier();
2157 recursion[rctx]--;
2158}
2159
2160static struct perf_callchain_entry *get_callchain_entry(int *rctx)
2161{
2162 int cpu;
2163 struct callchain_cpus_entries *entries;
2164
2165 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
2166 if (*rctx == -1)
2167 return NULL;
2168
2169 entries = rcu_dereference(callchain_cpus_entries);
2170 if (!entries)
2171 return NULL;
2172
2173 cpu = smp_processor_id();
2174
2175 return &entries->cpu_entries[cpu][*rctx];
2176}
2177
2178static void
2179put_callchain_entry(int rctx)
2180{
2181 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
2182}
2183
2184static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2185{
2186 int rctx;
2187 struct perf_callchain_entry *entry;
2188
2189
2190 entry = get_callchain_entry(&rctx);
2191 if (rctx == -1)
2192 return NULL;
2193
2194 if (!entry)
2195 goto exit_put;
2196
2197 entry->nr = 0;
2198
2199 if (!user_mode(regs)) {
2200 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
2201 perf_callchain_kernel(entry, regs);
2202 if (current->mm)
2203 regs = task_pt_regs(current);
2204 else
2205 regs = NULL;
2206 }
2207
2208 if (regs) {
2209 perf_callchain_store(entry, PERF_CONTEXT_USER);
2210 perf_callchain_user(entry, regs);
2211 }
2212
2213exit_put:
2214 put_callchain_entry(rctx);
2215
2216 return entry;
2217}
2218
2219/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002220 * Initialize the perf_event context in a task_struct:
2221 */
Peter Zijlstraeb184472010-09-07 15:55:13 +02002222static void __perf_event_init_context(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002223{
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002224 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002225 mutex_init(&ctx->mutex);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002226 INIT_LIST_HEAD(&ctx->pinned_groups);
2227 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002228 INIT_LIST_HEAD(&ctx->event_list);
2229 atomic_set(&ctx->refcount, 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002230}
2231
Peter Zijlstraeb184472010-09-07 15:55:13 +02002232static struct perf_event_context *
2233alloc_perf_context(struct pmu *pmu, struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002234{
2235 struct perf_event_context *ctx;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002236
2237 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2238 if (!ctx)
2239 return NULL;
2240
2241 __perf_event_init_context(ctx);
2242 if (task) {
2243 ctx->task = task;
2244 get_task_struct(task);
2245 }
2246 ctx->pmu = pmu;
2247
2248 return ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002249}
2250
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002251static struct task_struct *
2252find_lively_task_by_vpid(pid_t vpid)
2253{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002254 struct task_struct *task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002255 int err;
2256
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002257 rcu_read_lock();
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002258 if (!vpid)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002259 task = current;
2260 else
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002261 task = find_task_by_vpid(vpid);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002262 if (task)
2263 get_task_struct(task);
2264 rcu_read_unlock();
2265
2266 if (!task)
2267 return ERR_PTR(-ESRCH);
2268
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002269 /* Reuse ptrace permission checks for now. */
2270 err = -EACCES;
2271 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2272 goto errout;
2273
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002274 return task;
2275errout:
2276 put_task_struct(task);
2277 return ERR_PTR(err);
2278
2279}
2280
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002281/*
2282 * Returns a matching context with refcount and pincount.
2283 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002284static struct perf_event_context *
Matt Helsley38a81da2010-09-13 13:01:20 -07002285find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002286{
2287 struct perf_event_context *ctx;
2288 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002289 unsigned long flags;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002290 int ctxn, err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002291
Oleg Nesterov22a4ec72011-01-18 17:10:08 +01002292 if (!task) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002293 /* Must be root to operate on a CPU event: */
2294 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2295 return ERR_PTR(-EACCES);
2296
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002297 /*
2298 * We could be clever and allow to attach a event to an
2299 * offline CPU and activate it when the CPU comes up, but
2300 * that's for later.
2301 */
2302 if (!cpu_online(cpu))
2303 return ERR_PTR(-ENODEV);
2304
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002305 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002306 ctx = &cpuctx->ctx;
2307 get_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002308 ++ctx->pin_count;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002309
2310 return ctx;
2311 }
2312
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002313 err = -EINVAL;
2314 ctxn = pmu->task_ctx_nr;
2315 if (ctxn < 0)
2316 goto errout;
2317
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002318retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002319 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002320 if (ctx) {
2321 unclone_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002322 ++ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002323 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002324 }
2325
2326 if (!ctx) {
Peter Zijlstraeb184472010-09-07 15:55:13 +02002327 ctx = alloc_perf_context(pmu, task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002328 err = -ENOMEM;
2329 if (!ctx)
2330 goto errout;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002331
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002332 get_ctx(ctx);
Peter Zijlstraeb184472010-09-07 15:55:13 +02002333
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002334 err = 0;
2335 mutex_lock(&task->perf_event_mutex);
2336 /*
2337 * If it has already passed perf_event_exit_task().
2338 * we must see PF_EXITING, it takes this mutex too.
2339 */
2340 if (task->flags & PF_EXITING)
2341 err = -ESRCH;
2342 else if (task->perf_event_ctxp[ctxn])
2343 err = -EAGAIN;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002344 else {
2345 ++ctx->pin_count;
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002346 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002347 }
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002348 mutex_unlock(&task->perf_event_mutex);
2349
2350 if (unlikely(err)) {
Peter Zijlstraeb184472010-09-07 15:55:13 +02002351 put_task_struct(task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002352 kfree(ctx);
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002353
2354 if (err == -EAGAIN)
2355 goto retry;
2356 goto errout;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002357 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002358 }
2359
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002360 return ctx;
2361
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002362errout:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002363 return ERR_PTR(err);
2364}
2365
Li Zefan6fb29152009-10-15 11:21:42 +08002366static void perf_event_free_filter(struct perf_event *event);
2367
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002368static void free_event_rcu(struct rcu_head *head)
2369{
2370 struct perf_event *event;
2371
2372 event = container_of(head, struct perf_event, rcu_head);
2373 if (event->ns)
2374 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08002375 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002376 kfree(event);
2377}
2378
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002379static void perf_buffer_put(struct perf_buffer *buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002380
2381static void free_event(struct perf_event *event)
2382{
Peter Zijlstrae360adb2010-10-14 14:01:34 +08002383 irq_work_sync(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002384
2385 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02002386 if (event->attach_state & PERF_ATTACH_TASK)
2387 jump_label_dec(&perf_task_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01002388 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002389 atomic_dec(&nr_mmap_events);
2390 if (event->attr.comm)
2391 atomic_dec(&nr_comm_events);
2392 if (event->attr.task)
2393 atomic_dec(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002394 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2395 put_callchain_buffers();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002396 }
2397
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002398 if (event->buffer) {
2399 perf_buffer_put(event->buffer);
2400 event->buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002401 }
2402
2403 if (event->destroy)
2404 event->destroy(event);
2405
Peter Zijlstra0c67b402010-09-13 11:15:58 +02002406 if (event->ctx)
2407 put_ctx(event->ctx);
2408
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002409 call_rcu(&event->rcu_head, free_event_rcu);
2410}
2411
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002412int perf_event_release_kernel(struct perf_event *event)
2413{
2414 struct perf_event_context *ctx = event->ctx;
2415
Peter Zijlstra050735b2010-05-11 11:51:53 +02002416 /*
2417 * Remove from the PMU, can't get re-enabled since we got
2418 * here because the last ref went.
2419 */
2420 perf_event_disable(event);
2421
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002422 WARN_ON_ONCE(ctx->parent_ctx);
Peter Zijlstraa0507c82010-05-06 15:42:53 +02002423 /*
2424 * There are two ways this annotation is useful:
2425 *
2426 * 1) there is a lock recursion from perf_event_exit_task
2427 * see the comment there.
2428 *
2429 * 2) there is a lock-inversion with mmap_sem through
2430 * perf_event_read_group(), which takes faults while
2431 * holding ctx->mutex, however this is called after
2432 * the last filedesc died, so there is no possibility
2433 * to trigger the AB-BA case.
2434 */
2435 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002436 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra8a495422010-05-27 15:47:49 +02002437 perf_group_detach(event);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002438 list_del_event(event, ctx);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002439 raw_spin_unlock_irq(&ctx->lock);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002440 mutex_unlock(&ctx->mutex);
2441
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002442 free_event(event);
2443
2444 return 0;
2445}
2446EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2447
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002448/*
2449 * Called when the last reference to the file is gone.
2450 */
2451static int perf_release(struct inode *inode, struct file *file)
2452{
2453 struct perf_event *event = file->private_data;
Peter Zijlstra88821352010-11-09 19:01:43 +01002454 struct task_struct *owner;
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002455
2456 file->private_data = NULL;
2457
Peter Zijlstra88821352010-11-09 19:01:43 +01002458 rcu_read_lock();
2459 owner = ACCESS_ONCE(event->owner);
2460 /*
2461 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2462 * !owner it means the list deletion is complete and we can indeed
2463 * free this event, otherwise we need to serialize on
2464 * owner->perf_event_mutex.
2465 */
2466 smp_read_barrier_depends();
2467 if (owner) {
2468 /*
2469 * Since delayed_put_task_struct() also drops the last
2470 * task reference we can safely take a new reference
2471 * while holding the rcu_read_lock().
2472 */
2473 get_task_struct(owner);
2474 }
2475 rcu_read_unlock();
2476
2477 if (owner) {
2478 mutex_lock(&owner->perf_event_mutex);
2479 /*
2480 * We have to re-check the event->owner field, if it is cleared
2481 * we raced with perf_event_exit_task(), acquiring the mutex
2482 * ensured they're done, and we can proceed with freeing the
2483 * event.
2484 */
2485 if (event->owner)
2486 list_del_init(&event->owner_entry);
2487 mutex_unlock(&owner->perf_event_mutex);
2488 put_task_struct(owner);
2489 }
2490
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002491 return perf_event_release_kernel(event);
2492}
2493
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002494u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002495{
2496 struct perf_event *child;
2497 u64 total = 0;
2498
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002499 *enabled = 0;
2500 *running = 0;
2501
Peter Zijlstra6f105812009-11-20 22:19:56 +01002502 mutex_lock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002503 total += perf_event_read(event);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002504 *enabled += event->total_time_enabled +
2505 atomic64_read(&event->child_total_time_enabled);
2506 *running += event->total_time_running +
2507 atomic64_read(&event->child_total_time_running);
2508
2509 list_for_each_entry(child, &event->child_list, child_list) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002510 total += perf_event_read(child);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002511 *enabled += child->total_time_enabled;
2512 *running += child->total_time_running;
2513 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002514 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002515
2516 return total;
2517}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002518EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002519
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002520static int perf_event_read_group(struct perf_event *event,
2521 u64 read_format, char __user *buf)
2522{
2523 struct perf_event *leader = event->group_leader, *sub;
Peter Zijlstra6f105812009-11-20 22:19:56 +01002524 int n = 0, size = 0, ret = -EFAULT;
2525 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002526 u64 values[5];
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002527 u64 count, enabled, running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002528
Peter Zijlstra6f105812009-11-20 22:19:56 +01002529 mutex_lock(&ctx->mutex);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002530 count = perf_event_read_value(leader, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002531
2532 values[n++] = 1 + leader->nr_siblings;
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002533 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2534 values[n++] = enabled;
2535 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2536 values[n++] = running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002537 values[n++] = count;
2538 if (read_format & PERF_FORMAT_ID)
2539 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002540
2541 size = n * sizeof(u64);
2542
2543 if (copy_to_user(buf, values, size))
Peter Zijlstra6f105812009-11-20 22:19:56 +01002544 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002545
Peter Zijlstra6f105812009-11-20 22:19:56 +01002546 ret = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002547
2548 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstraabf48682009-11-20 22:19:49 +01002549 n = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002550
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002551 values[n++] = perf_event_read_value(sub, &enabled, &running);
Peter Zijlstraabf48682009-11-20 22:19:49 +01002552 if (read_format & PERF_FORMAT_ID)
2553 values[n++] = primary_event_id(sub);
2554
2555 size = n * sizeof(u64);
2556
Stephane Eranian184d3da2009-11-23 21:40:49 -08002557 if (copy_to_user(buf + ret, values, size)) {
Peter Zijlstra6f105812009-11-20 22:19:56 +01002558 ret = -EFAULT;
2559 goto unlock;
2560 }
Peter Zijlstraabf48682009-11-20 22:19:49 +01002561
2562 ret += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002563 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002564unlock:
2565 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002566
Peter Zijlstraabf48682009-11-20 22:19:49 +01002567 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002568}
2569
2570static int perf_event_read_one(struct perf_event *event,
2571 u64 read_format, char __user *buf)
2572{
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002573 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002574 u64 values[4];
2575 int n = 0;
2576
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002577 values[n++] = perf_event_read_value(event, &enabled, &running);
2578 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2579 values[n++] = enabled;
2580 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2581 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002582 if (read_format & PERF_FORMAT_ID)
2583 values[n++] = primary_event_id(event);
2584
2585 if (copy_to_user(buf, values, n * sizeof(u64)))
2586 return -EFAULT;
2587
2588 return n * sizeof(u64);
2589}
2590
2591/*
2592 * Read the performance event - simple non blocking version for now
2593 */
2594static ssize_t
2595perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2596{
2597 u64 read_format = event->attr.read_format;
2598 int ret;
2599
2600 /*
2601 * Return end-of-file for a read on a event that is in
2602 * error state (i.e. because it was pinned but it couldn't be
2603 * scheduled on to the CPU at some point).
2604 */
2605 if (event->state == PERF_EVENT_STATE_ERROR)
2606 return 0;
2607
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02002608 if (count < event->read_size)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002609 return -ENOSPC;
2610
2611 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002612 if (read_format & PERF_FORMAT_GROUP)
2613 ret = perf_event_read_group(event, read_format, buf);
2614 else
2615 ret = perf_event_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002616
2617 return ret;
2618}
2619
2620static ssize_t
2621perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2622{
2623 struct perf_event *event = file->private_data;
2624
2625 return perf_read_hw(event, buf, count);
2626}
2627
2628static unsigned int perf_poll(struct file *file, poll_table *wait)
2629{
2630 struct perf_event *event = file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002631 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002632 unsigned int events = POLL_HUP;
2633
2634 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002635 buffer = rcu_dereference(event->buffer);
2636 if (buffer)
2637 events = atomic_xchg(&buffer->poll, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002638 rcu_read_unlock();
2639
2640 poll_wait(file, &event->waitq, wait);
2641
2642 return events;
2643}
2644
2645static void perf_event_reset(struct perf_event *event)
2646{
2647 (void)perf_event_read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02002648 local64_set(&event->count, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002649 perf_event_update_userpage(event);
2650}
2651
2652/*
2653 * Holding the top-level event's child_mutex means that any
2654 * descendant process that has inherited this event will block
2655 * in sync_child_event if it goes to exit, thus satisfying the
2656 * task existence requirements of perf_event_enable/disable.
2657 */
2658static void perf_event_for_each_child(struct perf_event *event,
2659 void (*func)(struct perf_event *))
2660{
2661 struct perf_event *child;
2662
2663 WARN_ON_ONCE(event->ctx->parent_ctx);
2664 mutex_lock(&event->child_mutex);
2665 func(event);
2666 list_for_each_entry(child, &event->child_list, child_list)
2667 func(child);
2668 mutex_unlock(&event->child_mutex);
2669}
2670
2671static void perf_event_for_each(struct perf_event *event,
2672 void (*func)(struct perf_event *))
2673{
2674 struct perf_event_context *ctx = event->ctx;
2675 struct perf_event *sibling;
2676
2677 WARN_ON_ONCE(ctx->parent_ctx);
2678 mutex_lock(&ctx->mutex);
2679 event = event->group_leader;
2680
2681 perf_event_for_each_child(event, func);
2682 func(event);
2683 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2684 perf_event_for_each_child(event, func);
2685 mutex_unlock(&ctx->mutex);
2686}
2687
2688static int perf_event_period(struct perf_event *event, u64 __user *arg)
2689{
2690 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002691 int ret = 0;
2692 u64 value;
2693
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01002694 if (!is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002695 return -EINVAL;
2696
John Blackwoodad0cf342010-09-28 18:03:11 -04002697 if (copy_from_user(&value, arg, sizeof(value)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002698 return -EFAULT;
2699
2700 if (!value)
2701 return -EINVAL;
2702
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002703 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002704 if (event->attr.freq) {
2705 if (value > sysctl_perf_event_sample_rate) {
2706 ret = -EINVAL;
2707 goto unlock;
2708 }
2709
2710 event->attr.sample_freq = value;
2711 } else {
2712 event->attr.sample_period = value;
2713 event->hw.sample_period = value;
2714 }
2715unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002716 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002717
2718 return ret;
2719}
2720
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002721static const struct file_operations perf_fops;
2722
2723static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2724{
2725 struct file *file;
2726
2727 file = fget_light(fd, fput_needed);
2728 if (!file)
2729 return ERR_PTR(-EBADF);
2730
2731 if (file->f_op != &perf_fops) {
2732 fput_light(file, *fput_needed);
2733 *fput_needed = 0;
2734 return ERR_PTR(-EBADF);
2735 }
2736
2737 return file->private_data;
2738}
2739
2740static int perf_event_set_output(struct perf_event *event,
2741 struct perf_event *output_event);
Li Zefan6fb29152009-10-15 11:21:42 +08002742static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002743
2744static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2745{
2746 struct perf_event *event = file->private_data;
2747 void (*func)(struct perf_event *);
2748 u32 flags = arg;
2749
2750 switch (cmd) {
2751 case PERF_EVENT_IOC_ENABLE:
2752 func = perf_event_enable;
2753 break;
2754 case PERF_EVENT_IOC_DISABLE:
2755 func = perf_event_disable;
2756 break;
2757 case PERF_EVENT_IOC_RESET:
2758 func = perf_event_reset;
2759 break;
2760
2761 case PERF_EVENT_IOC_REFRESH:
2762 return perf_event_refresh(event, arg);
2763
2764 case PERF_EVENT_IOC_PERIOD:
2765 return perf_event_period(event, (u64 __user *)arg);
2766
2767 case PERF_EVENT_IOC_SET_OUTPUT:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002768 {
2769 struct perf_event *output_event = NULL;
2770 int fput_needed = 0;
2771 int ret;
2772
2773 if (arg != -1) {
2774 output_event = perf_fget_light(arg, &fput_needed);
2775 if (IS_ERR(output_event))
2776 return PTR_ERR(output_event);
2777 }
2778
2779 ret = perf_event_set_output(event, output_event);
2780 if (output_event)
2781 fput_light(output_event->filp, fput_needed);
2782
2783 return ret;
2784 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002785
Li Zefan6fb29152009-10-15 11:21:42 +08002786 case PERF_EVENT_IOC_SET_FILTER:
2787 return perf_event_set_filter(event, (void __user *)arg);
2788
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002789 default:
2790 return -ENOTTY;
2791 }
2792
2793 if (flags & PERF_IOC_FLAG_GROUP)
2794 perf_event_for_each(event, func);
2795 else
2796 perf_event_for_each_child(event, func);
2797
2798 return 0;
2799}
2800
2801int perf_event_task_enable(void)
2802{
2803 struct perf_event *event;
2804
2805 mutex_lock(&current->perf_event_mutex);
2806 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2807 perf_event_for_each_child(event, perf_event_enable);
2808 mutex_unlock(&current->perf_event_mutex);
2809
2810 return 0;
2811}
2812
2813int perf_event_task_disable(void)
2814{
2815 struct perf_event *event;
2816
2817 mutex_lock(&current->perf_event_mutex);
2818 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2819 perf_event_for_each_child(event, perf_event_disable);
2820 mutex_unlock(&current->perf_event_mutex);
2821
2822 return 0;
2823}
2824
2825#ifndef PERF_EVENT_INDEX_OFFSET
2826# define PERF_EVENT_INDEX_OFFSET 0
2827#endif
2828
2829static int perf_event_index(struct perf_event *event)
2830{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002831 if (event->hw.state & PERF_HES_STOPPED)
2832 return 0;
2833
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002834 if (event->state != PERF_EVENT_STATE_ACTIVE)
2835 return 0;
2836
2837 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2838}
2839
2840/*
2841 * Callers need to ensure there can be no nesting of this function, otherwise
2842 * the seqlock logic goes bad. We can not serialize this because the arch
2843 * code calls this from NMI context.
2844 */
2845void perf_event_update_userpage(struct perf_event *event)
2846{
2847 struct perf_event_mmap_page *userpg;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002848 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002849
2850 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002851 buffer = rcu_dereference(event->buffer);
2852 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002853 goto unlock;
2854
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002855 userpg = buffer->user_page;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002856
2857 /*
2858 * Disable preemption so as to not let the corresponding user-space
2859 * spin too long if we get preempted.
2860 */
2861 preempt_disable();
2862 ++userpg->lock;
2863 barrier();
2864 userpg->index = perf_event_index(event);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02002865 userpg->offset = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002866 if (event->state == PERF_EVENT_STATE_ACTIVE)
Peter Zijlstrae7850592010-05-21 14:43:08 +02002867 userpg->offset -= local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002868
2869 userpg->time_enabled = event->total_time_enabled +
2870 atomic64_read(&event->child_total_time_enabled);
2871
2872 userpg->time_running = event->total_time_running +
2873 atomic64_read(&event->child_total_time_running);
2874
2875 barrier();
2876 ++userpg->lock;
2877 preempt_enable();
2878unlock:
2879 rcu_read_unlock();
2880}
2881
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002882static unsigned long perf_data_size(struct perf_buffer *buffer);
2883
2884static void
2885perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2886{
2887 long max_size = perf_data_size(buffer);
2888
2889 if (watermark)
2890 buffer->watermark = min(max_size, watermark);
2891
2892 if (!buffer->watermark)
2893 buffer->watermark = max_size / 2;
2894
2895 if (flags & PERF_BUFFER_WRITABLE)
2896 buffer->writable = 1;
2897
2898 atomic_set(&buffer->refcount, 1);
2899}
2900
Peter Zijlstra906010b2009-09-21 16:08:49 +02002901#ifndef CONFIG_PERF_USE_VMALLOC
2902
2903/*
2904 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2905 */
2906
2907static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002908perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002909{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002910 if (pgoff > buffer->nr_pages)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002911 return NULL;
2912
2913 if (pgoff == 0)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002914 return virt_to_page(buffer->user_page);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002915
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002916 return virt_to_page(buffer->data_pages[pgoff - 1]);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002917}
2918
Peter Zijlstraa19d35c2010-05-17 18:48:00 +02002919static void *perf_mmap_alloc_page(int cpu)
2920{
2921 struct page *page;
2922 int node;
2923
2924 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2925 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2926 if (!page)
2927 return NULL;
2928
2929 return page_address(page);
2930}
2931
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002932static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002933perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002934{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002935 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002936 unsigned long size;
2937 int i;
2938
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002939 size = sizeof(struct perf_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002940 size += nr_pages * sizeof(void *);
2941
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002942 buffer = kzalloc(size, GFP_KERNEL);
2943 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002944 goto fail;
2945
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002946 buffer->user_page = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002947 if (!buffer->user_page)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002948 goto fail_user_page;
2949
2950 for (i = 0; i < nr_pages; i++) {
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002951 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002952 if (!buffer->data_pages[i])
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002953 goto fail_data_pages;
2954 }
2955
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002956 buffer->nr_pages = nr_pages;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002957
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002958 perf_buffer_init(buffer, watermark, flags);
2959
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002960 return buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002961
2962fail_data_pages:
2963 for (i--; i >= 0; i--)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002964 free_page((unsigned long)buffer->data_pages[i]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002965
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002966 free_page((unsigned long)buffer->user_page);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002967
2968fail_user_page:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002969 kfree(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002970
2971fail:
Peter Zijlstra906010b2009-09-21 16:08:49 +02002972 return NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002973}
2974
2975static void perf_mmap_free_page(unsigned long addr)
2976{
2977 struct page *page = virt_to_page((void *)addr);
2978
2979 page->mapping = NULL;
2980 __free_page(page);
2981}
2982
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002983static void perf_buffer_free(struct perf_buffer *buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002984{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002985 int i;
2986
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002987 perf_mmap_free_page((unsigned long)buffer->user_page);
2988 for (i = 0; i < buffer->nr_pages; i++)
2989 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2990 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002991}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002992
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002993static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002994{
2995 return 0;
2996}
2997
Peter Zijlstra906010b2009-09-21 16:08:49 +02002998#else
2999
3000/*
3001 * Back perf_mmap() with vmalloc memory.
3002 *
3003 * Required for architectures that have d-cache aliasing issues.
3004 */
3005
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003006static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003007{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003008 return buffer->page_order;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003009}
3010
Peter Zijlstra906010b2009-09-21 16:08:49 +02003011static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003012perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003013{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003014 if (pgoff > (1UL << page_order(buffer)))
Peter Zijlstra906010b2009-09-21 16:08:49 +02003015 return NULL;
3016
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003017 return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003018}
3019
3020static void perf_mmap_unmark_page(void *addr)
3021{
3022 struct page *page = vmalloc_to_page(addr);
3023
3024 page->mapping = NULL;
3025}
3026
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003027static void perf_buffer_free_work(struct work_struct *work)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003028{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003029 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003030 void *base;
3031 int i, nr;
3032
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003033 buffer = container_of(work, struct perf_buffer, work);
3034 nr = 1 << page_order(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003035
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003036 base = buffer->user_page;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003037 for (i = 0; i < nr + 1; i++)
3038 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
3039
3040 vfree(base);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003041 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003042}
3043
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003044static void perf_buffer_free(struct perf_buffer *buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003045{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003046 schedule_work(&buffer->work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003047}
3048
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003049static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003050perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003051{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003052 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003053 unsigned long size;
3054 void *all_buf;
3055
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003056 size = sizeof(struct perf_buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003057 size += sizeof(void *);
3058
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003059 buffer = kzalloc(size, GFP_KERNEL);
3060 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003061 goto fail;
3062
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003063 INIT_WORK(&buffer->work, perf_buffer_free_work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003064
3065 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
3066 if (!all_buf)
3067 goto fail_all_buf;
3068
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003069 buffer->user_page = all_buf;
3070 buffer->data_pages[0] = all_buf + PAGE_SIZE;
3071 buffer->page_order = ilog2(nr_pages);
3072 buffer->nr_pages = 1;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003073
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003074 perf_buffer_init(buffer, watermark, flags);
3075
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003076 return buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003077
3078fail_all_buf:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003079 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003080
3081fail:
3082 return NULL;
3083}
3084
3085#endif
3086
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003087static unsigned long perf_data_size(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003088{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003089 return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003090}
3091
Peter Zijlstra906010b2009-09-21 16:08:49 +02003092static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3093{
3094 struct perf_event *event = vma->vm_file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003095 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003096 int ret = VM_FAULT_SIGBUS;
3097
3098 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3099 if (vmf->pgoff == 0)
3100 ret = 0;
3101 return ret;
3102 }
3103
3104 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003105 buffer = rcu_dereference(event->buffer);
3106 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003107 goto unlock;
3108
3109 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3110 goto unlock;
3111
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003112 vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003113 if (!vmf->page)
3114 goto unlock;
3115
3116 get_page(vmf->page);
3117 vmf->page->mapping = vma->vm_file->f_mapping;
3118 vmf->page->index = vmf->pgoff;
3119
3120 ret = 0;
3121unlock:
3122 rcu_read_unlock();
3123
3124 return ret;
3125}
3126
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003127static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003128{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003129 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003130
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003131 buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
3132 perf_buffer_free(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003133}
3134
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003135static struct perf_buffer *perf_buffer_get(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003136{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003137 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003138
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003139 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003140 buffer = rcu_dereference(event->buffer);
3141 if (buffer) {
3142 if (!atomic_inc_not_zero(&buffer->refcount))
3143 buffer = NULL;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003144 }
3145 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003146
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003147 return buffer;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003148}
3149
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003150static void perf_buffer_put(struct perf_buffer *buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003151{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003152 if (!atomic_dec_and_test(&buffer->refcount))
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003153 return;
3154
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003155 call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003156}
3157
3158static void perf_mmap_open(struct vm_area_struct *vma)
3159{
3160 struct perf_event *event = vma->vm_file->private_data;
3161
3162 atomic_inc(&event->mmap_count);
3163}
3164
3165static void perf_mmap_close(struct vm_area_struct *vma)
3166{
3167 struct perf_event *event = vma->vm_file->private_data;
3168
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003169 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003170 unsigned long size = perf_data_size(event->buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003171 struct user_struct *user = event->mmap_user;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003172 struct perf_buffer *buffer = event->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003173
Peter Zijlstra906010b2009-09-21 16:08:49 +02003174 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003175 vma->vm_mm->locked_vm -= event->mmap_locked;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003176 rcu_assign_pointer(event->buffer, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003177 mutex_unlock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003178
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003179 perf_buffer_put(buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003180 free_uid(user);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003181 }
3182}
3183
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003184static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003185 .open = perf_mmap_open,
3186 .close = perf_mmap_close,
3187 .fault = perf_mmap_fault,
3188 .page_mkwrite = perf_mmap_fault,
3189};
3190
3191static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3192{
3193 struct perf_event *event = file->private_data;
3194 unsigned long user_locked, user_lock_limit;
3195 struct user_struct *user = current_user();
3196 unsigned long locked, lock_limit;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003197 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003198 unsigned long vma_size;
3199 unsigned long nr_pages;
3200 long user_extra, extra;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003201 int ret = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003202
Peter Zijlstrac7920612010-05-18 10:33:24 +02003203 /*
3204 * Don't allow mmap() of inherited per-task counters. This would
3205 * create a performance issue due to all children writing to the
3206 * same buffer.
3207 */
3208 if (event->cpu == -1 && event->attr.inherit)
3209 return -EINVAL;
3210
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003211 if (!(vma->vm_flags & VM_SHARED))
3212 return -EINVAL;
3213
3214 vma_size = vma->vm_end - vma->vm_start;
3215 nr_pages = (vma_size / PAGE_SIZE) - 1;
3216
3217 /*
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003218 * If we have buffer pages ensure they're a power-of-two number, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003219 * can do bitmasks instead of modulo.
3220 */
3221 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3222 return -EINVAL;
3223
3224 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3225 return -EINVAL;
3226
3227 if (vma->vm_pgoff != 0)
3228 return -EINVAL;
3229
3230 WARN_ON_ONCE(event->ctx->parent_ctx);
3231 mutex_lock(&event->mmap_mutex);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003232 if (event->buffer) {
3233 if (event->buffer->nr_pages == nr_pages)
3234 atomic_inc(&event->buffer->refcount);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003235 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003236 ret = -EINVAL;
3237 goto unlock;
3238 }
3239
3240 user_extra = nr_pages + 1;
3241 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3242
3243 /*
3244 * Increase the limit linearly with more CPUs:
3245 */
3246 user_lock_limit *= num_online_cpus();
3247
3248 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3249
3250 extra = 0;
3251 if (user_locked > user_lock_limit)
3252 extra = user_locked - user_lock_limit;
3253
Jiri Slaby78d7d402010-03-05 13:42:54 -08003254 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003255 lock_limit >>= PAGE_SHIFT;
3256 locked = vma->vm_mm->locked_vm + extra;
3257
3258 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3259 !capable(CAP_IPC_LOCK)) {
3260 ret = -EPERM;
3261 goto unlock;
3262 }
3263
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003264 WARN_ON(event->buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003265
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003266 if (vma->vm_flags & VM_WRITE)
3267 flags |= PERF_BUFFER_WRITABLE;
3268
3269 buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
3270 event->cpu, flags);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003271 if (!buffer) {
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003272 ret = -ENOMEM;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003273 goto unlock;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003274 }
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003275 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003276
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003277 atomic_long_add(user_extra, &user->locked_vm);
3278 event->mmap_locked = extra;
3279 event->mmap_user = get_current_user();
3280 vma->vm_mm->locked_vm += event->mmap_locked;
3281
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003282unlock:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003283 if (!ret)
3284 atomic_inc(&event->mmap_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003285 mutex_unlock(&event->mmap_mutex);
3286
3287 vma->vm_flags |= VM_RESERVED;
3288 vma->vm_ops = &perf_mmap_vmops;
3289
3290 return ret;
3291}
3292
3293static int perf_fasync(int fd, struct file *filp, int on)
3294{
3295 struct inode *inode = filp->f_path.dentry->d_inode;
3296 struct perf_event *event = filp->private_data;
3297 int retval;
3298
3299 mutex_lock(&inode->i_mutex);
3300 retval = fasync_helper(fd, filp, on, &event->fasync);
3301 mutex_unlock(&inode->i_mutex);
3302
3303 if (retval < 0)
3304 return retval;
3305
3306 return 0;
3307}
3308
3309static const struct file_operations perf_fops = {
Arnd Bergmann3326c1c2010-03-23 19:09:33 +01003310 .llseek = no_llseek,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003311 .release = perf_release,
3312 .read = perf_read,
3313 .poll = perf_poll,
3314 .unlocked_ioctl = perf_ioctl,
3315 .compat_ioctl = perf_ioctl,
3316 .mmap = perf_mmap,
3317 .fasync = perf_fasync,
3318};
3319
3320/*
3321 * Perf event wakeup
3322 *
3323 * If there's data, ensure we set the poll() state and publish everything
3324 * to user-space before waking everybody up.
3325 */
3326
3327void perf_event_wakeup(struct perf_event *event)
3328{
3329 wake_up_all(&event->waitq);
3330
3331 if (event->pending_kill) {
3332 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3333 event->pending_kill = 0;
3334 }
3335}
3336
Peter Zijlstrae360adb2010-10-14 14:01:34 +08003337static void perf_pending_event(struct irq_work *entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003338{
3339 struct perf_event *event = container_of(entry,
3340 struct perf_event, pending);
3341
3342 if (event->pending_disable) {
3343 event->pending_disable = 0;
3344 __perf_event_disable(event);
3345 }
3346
3347 if (event->pending_wakeup) {
3348 event->pending_wakeup = 0;
3349 perf_event_wakeup(event);
3350 }
3351}
3352
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003353/*
Zhang, Yanmin39447b32010-04-19 13:32:41 +08003354 * We assume there is only KVM supporting the callbacks.
3355 * Later on, we might change it to a list if there is
3356 * another virtualization implementation supporting the callbacks.
3357 */
3358struct perf_guest_info_callbacks *perf_guest_cbs;
3359
3360int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3361{
3362 perf_guest_cbs = cbs;
3363 return 0;
3364}
3365EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3366
3367int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3368{
3369 perf_guest_cbs = NULL;
3370 return 0;
3371}
3372EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3373
3374/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003375 * Output
3376 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003377static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003378 unsigned long offset, unsigned long head)
3379{
3380 unsigned long mask;
3381
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003382 if (!buffer->writable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003383 return true;
3384
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003385 mask = perf_data_size(buffer) - 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003386
3387 offset = (offset - tail) & mask;
3388 head = (head - tail) & mask;
3389
3390 if ((int)(head - offset) < 0)
3391 return false;
3392
3393 return true;
3394}
3395
3396static void perf_output_wakeup(struct perf_output_handle *handle)
3397{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003398 atomic_set(&handle->buffer->poll, POLL_IN);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003399
3400 if (handle->nmi) {
3401 handle->event->pending_wakeup = 1;
Peter Zijlstrae360adb2010-10-14 14:01:34 +08003402 irq_work_queue(&handle->event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003403 } else
3404 perf_event_wakeup(handle->event);
3405}
3406
3407/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003408 * We need to ensure a later event_id doesn't publish a head when a former
Peter Zijlstraef607772010-05-18 10:50:41 +02003409 * event isn't done writing. However since we need to deal with NMIs we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003410 * cannot fully serialize things.
3411 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003412 * We only publish the head (and generate a wakeup) when the outer-most
Peter Zijlstraef607772010-05-18 10:50:41 +02003413 * event completes.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003414 */
Peter Zijlstraef607772010-05-18 10:50:41 +02003415static void perf_output_get_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003416{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003417 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003418
Peter Zijlstraef607772010-05-18 10:50:41 +02003419 preempt_disable();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003420 local_inc(&buffer->nest);
3421 handle->wakeup = local_read(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003422}
3423
Peter Zijlstraef607772010-05-18 10:50:41 +02003424static void perf_output_put_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003425{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003426 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003427 unsigned long head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003428
3429again:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003430 head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003431
3432 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003433 * IRQ/NMI can happen here, which means we can miss a head update.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003434 */
3435
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003436 if (!local_dec_and_test(&buffer->nest))
Frederic Weisbeckeracd35a42010-05-20 21:28:34 +02003437 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003438
3439 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003440 * Publish the known good head. Rely on the full barrier implied
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003441 * by atomic_dec_and_test() order the buffer->head read and this
Peter Zijlstraef607772010-05-18 10:50:41 +02003442 * write.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003443 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003444 buffer->user_page->data_head = head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003445
Peter Zijlstraef607772010-05-18 10:50:41 +02003446 /*
3447 * Now check if we missed an update, rely on the (compiler)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003448 * barrier in atomic_dec_and_test() to re-read buffer->head.
Peter Zijlstraef607772010-05-18 10:50:41 +02003449 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003450 if (unlikely(head != local_read(&buffer->head))) {
3451 local_inc(&buffer->nest);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003452 goto again;
3453 }
3454
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003455 if (handle->wakeup != local_read(&buffer->wakeup))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003456 perf_output_wakeup(handle);
Peter Zijlstraef607772010-05-18 10:50:41 +02003457
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003458out:
Peter Zijlstraef607772010-05-18 10:50:41 +02003459 preempt_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003460}
3461
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003462__always_inline void perf_output_copy(struct perf_output_handle *handle,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003463 const void *buf, unsigned int len)
3464{
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003465 do {
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003466 unsigned long size = min_t(unsigned long, handle->size, len);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003467
3468 memcpy(handle->addr, buf, size);
3469
3470 len -= size;
3471 handle->addr += size;
Frederic Weisbecker74048f82010-05-27 21:34:58 +02003472 buf += size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003473 handle->size -= size;
3474 if (!handle->size) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003475 struct perf_buffer *buffer = handle->buffer;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003476
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003477 handle->page++;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003478 handle->page &= buffer->nr_pages - 1;
3479 handle->addr = buffer->data_pages[handle->page];
3480 handle->size = PAGE_SIZE << page_order(buffer);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003481 }
3482 } while (len);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003483}
3484
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003485static void __perf_event_header__init_id(struct perf_event_header *header,
3486 struct perf_sample_data *data,
3487 struct perf_event *event)
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02003488{
3489 u64 sample_type = event->attr.sample_type;
3490
3491 data->type = sample_type;
3492 header->size += event->id_header_size;
3493
3494 if (sample_type & PERF_SAMPLE_TID) {
3495 /* namespace issues */
3496 data->tid_entry.pid = perf_event_pid(event, current);
3497 data->tid_entry.tid = perf_event_tid(event, current);
3498 }
3499
3500 if (sample_type & PERF_SAMPLE_TIME)
3501 data->time = perf_clock();
3502
3503 if (sample_type & PERF_SAMPLE_ID)
3504 data->id = primary_event_id(event);
3505
3506 if (sample_type & PERF_SAMPLE_STREAM_ID)
3507 data->stream_id = event->id;
3508
3509 if (sample_type & PERF_SAMPLE_CPU) {
3510 data->cpu_entry.cpu = raw_smp_processor_id();
3511 data->cpu_entry.reserved = 0;
3512 }
3513}
3514
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003515static void perf_event_header__init_id(struct perf_event_header *header,
3516 struct perf_sample_data *data,
3517 struct perf_event *event)
3518{
3519 if (event->attr.sample_id_all)
3520 __perf_event_header__init_id(header, data, event);
3521}
3522
3523static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3524 struct perf_sample_data *data)
3525{
3526 u64 sample_type = data->type;
3527
3528 if (sample_type & PERF_SAMPLE_TID)
3529 perf_output_put(handle, data->tid_entry);
3530
3531 if (sample_type & PERF_SAMPLE_TIME)
3532 perf_output_put(handle, data->time);
3533
3534 if (sample_type & PERF_SAMPLE_ID)
3535 perf_output_put(handle, data->id);
3536
3537 if (sample_type & PERF_SAMPLE_STREAM_ID)
3538 perf_output_put(handle, data->stream_id);
3539
3540 if (sample_type & PERF_SAMPLE_CPU)
3541 perf_output_put(handle, data->cpu_entry);
3542}
3543
3544static void perf_event__output_id_sample(struct perf_event *event,
3545 struct perf_output_handle *handle,
3546 struct perf_sample_data *sample)
3547{
3548 if (event->attr.sample_id_all)
3549 __perf_event__output_id_sample(handle, sample);
3550}
3551
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003552int perf_output_begin(struct perf_output_handle *handle,
3553 struct perf_event *event, unsigned int size,
3554 int nmi, int sample)
3555{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003556 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003557 unsigned long tail, offset, head;
3558 int have_lost;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003559 struct perf_sample_data sample_data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003560 struct {
3561 struct perf_event_header header;
3562 u64 id;
3563 u64 lost;
3564 } lost_event;
3565
3566 rcu_read_lock();
3567 /*
3568 * For inherited events we send all the output towards the parent.
3569 */
3570 if (event->parent)
3571 event = event->parent;
3572
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003573 buffer = rcu_dereference(event->buffer);
3574 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003575 goto out;
3576
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003577 handle->buffer = buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003578 handle->event = event;
3579 handle->nmi = nmi;
3580 handle->sample = sample;
3581
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003582 if (!buffer->nr_pages)
Stephane Eranian00d1d0b2010-05-17 12:46:01 +02003583 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003584
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003585 have_lost = local_read(&buffer->lost);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003586 if (have_lost) {
3587 lost_event.header.size = sizeof(lost_event);
3588 perf_event_header__init_id(&lost_event.header, &sample_data,
3589 event);
3590 size += lost_event.header.size;
3591 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003592
Peter Zijlstraef607772010-05-18 10:50:41 +02003593 perf_output_get_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003594
3595 do {
3596 /*
3597 * Userspace could choose to issue a mb() before updating the
3598 * tail pointer. So that all reads will be completed before the
3599 * write is issued.
3600 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003601 tail = ACCESS_ONCE(buffer->user_page->data_tail);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003602 smp_rmb();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003603 offset = head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003604 head += size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003605 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003606 goto fail;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003607 } while (local_cmpxchg(&buffer->head, offset, head) != offset);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003608
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003609 if (head - local_read(&buffer->wakeup) > buffer->watermark)
3610 local_add(buffer->watermark, &buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003611
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003612 handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3613 handle->page &= buffer->nr_pages - 1;
3614 handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3615 handle->addr = buffer->data_pages[handle->page];
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003616 handle->addr += handle->size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003617 handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003618
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003619 if (have_lost) {
3620 lost_event.header.type = PERF_RECORD_LOST;
3621 lost_event.header.misc = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003622 lost_event.id = event->id;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003623 lost_event.lost = local_xchg(&buffer->lost, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003624
3625 perf_output_put(handle, lost_event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003626 perf_event__output_id_sample(event, handle, &sample_data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003627 }
3628
3629 return 0;
3630
3631fail:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003632 local_inc(&buffer->lost);
Peter Zijlstraef607772010-05-18 10:50:41 +02003633 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003634out:
3635 rcu_read_unlock();
3636
3637 return -ENOSPC;
3638}
3639
3640void perf_output_end(struct perf_output_handle *handle)
3641{
3642 struct perf_event *event = handle->event;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003643 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003644
3645 int wakeup_events = event->attr.wakeup_events;
3646
3647 if (handle->sample && wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003648 int events = local_inc_return(&buffer->events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003649 if (events >= wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003650 local_sub(wakeup_events, &buffer->events);
3651 local_inc(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003652 }
3653 }
3654
Peter Zijlstraef607772010-05-18 10:50:41 +02003655 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003656 rcu_read_unlock();
3657}
3658
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003659static void perf_output_read_one(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003660 struct perf_event *event,
3661 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003662{
3663 u64 read_format = event->attr.read_format;
3664 u64 values[4];
3665 int n = 0;
3666
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003667 values[n++] = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003668 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003669 values[n++] = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003670 atomic64_read(&event->child_total_time_enabled);
3671 }
3672 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003673 values[n++] = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003674 atomic64_read(&event->child_total_time_running);
3675 }
3676 if (read_format & PERF_FORMAT_ID)
3677 values[n++] = primary_event_id(event);
3678
3679 perf_output_copy(handle, values, n * sizeof(u64));
3680}
3681
3682/*
3683 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3684 */
3685static void perf_output_read_group(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003686 struct perf_event *event,
3687 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003688{
3689 struct perf_event *leader = event->group_leader, *sub;
3690 u64 read_format = event->attr.read_format;
3691 u64 values[5];
3692 int n = 0;
3693
3694 values[n++] = 1 + leader->nr_siblings;
3695
3696 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
Stephane Eranianeed01522010-10-26 16:08:01 +02003697 values[n++] = enabled;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003698
3699 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
Stephane Eranianeed01522010-10-26 16:08:01 +02003700 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003701
3702 if (leader != event)
3703 leader->pmu->read(leader);
3704
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003705 values[n++] = perf_event_count(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003706 if (read_format & PERF_FORMAT_ID)
3707 values[n++] = primary_event_id(leader);
3708
3709 perf_output_copy(handle, values, n * sizeof(u64));
3710
3711 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3712 n = 0;
3713
3714 if (sub != event)
3715 sub->pmu->read(sub);
3716
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003717 values[n++] = perf_event_count(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003718 if (read_format & PERF_FORMAT_ID)
3719 values[n++] = primary_event_id(sub);
3720
3721 perf_output_copy(handle, values, n * sizeof(u64));
3722 }
3723}
3724
Stephane Eranianeed01522010-10-26 16:08:01 +02003725#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3726 PERF_FORMAT_TOTAL_TIME_RUNNING)
3727
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003728static void perf_output_read(struct perf_output_handle *handle,
3729 struct perf_event *event)
3730{
Stephane Eranianeed01522010-10-26 16:08:01 +02003731 u64 enabled = 0, running = 0, now, ctx_time;
3732 u64 read_format = event->attr.read_format;
3733
3734 /*
3735 * compute total_time_enabled, total_time_running
3736 * based on snapshot values taken when the event
3737 * was last scheduled in.
3738 *
3739 * we cannot simply called update_context_time()
3740 * because of locking issue as we are called in
3741 * NMI context
3742 */
3743 if (read_format & PERF_FORMAT_TOTAL_TIMES) {
3744 now = perf_clock();
3745 ctx_time = event->shadow_ctx_time + now;
3746 enabled = ctx_time - event->tstamp_enabled;
3747 running = ctx_time - event->tstamp_running;
3748 }
3749
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003750 if (event->attr.read_format & PERF_FORMAT_GROUP)
Stephane Eranianeed01522010-10-26 16:08:01 +02003751 perf_output_read_group(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003752 else
Stephane Eranianeed01522010-10-26 16:08:01 +02003753 perf_output_read_one(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003754}
3755
3756void perf_output_sample(struct perf_output_handle *handle,
3757 struct perf_event_header *header,
3758 struct perf_sample_data *data,
3759 struct perf_event *event)
3760{
3761 u64 sample_type = data->type;
3762
3763 perf_output_put(handle, *header);
3764
3765 if (sample_type & PERF_SAMPLE_IP)
3766 perf_output_put(handle, data->ip);
3767
3768 if (sample_type & PERF_SAMPLE_TID)
3769 perf_output_put(handle, data->tid_entry);
3770
3771 if (sample_type & PERF_SAMPLE_TIME)
3772 perf_output_put(handle, data->time);
3773
3774 if (sample_type & PERF_SAMPLE_ADDR)
3775 perf_output_put(handle, data->addr);
3776
3777 if (sample_type & PERF_SAMPLE_ID)
3778 perf_output_put(handle, data->id);
3779
3780 if (sample_type & PERF_SAMPLE_STREAM_ID)
3781 perf_output_put(handle, data->stream_id);
3782
3783 if (sample_type & PERF_SAMPLE_CPU)
3784 perf_output_put(handle, data->cpu_entry);
3785
3786 if (sample_type & PERF_SAMPLE_PERIOD)
3787 perf_output_put(handle, data->period);
3788
3789 if (sample_type & PERF_SAMPLE_READ)
3790 perf_output_read(handle, event);
3791
3792 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3793 if (data->callchain) {
3794 int size = 1;
3795
3796 if (data->callchain)
3797 size += data->callchain->nr;
3798
3799 size *= sizeof(u64);
3800
3801 perf_output_copy(handle, data->callchain, size);
3802 } else {
3803 u64 nr = 0;
3804 perf_output_put(handle, nr);
3805 }
3806 }
3807
3808 if (sample_type & PERF_SAMPLE_RAW) {
3809 if (data->raw) {
3810 perf_output_put(handle, data->raw->size);
3811 perf_output_copy(handle, data->raw->data,
3812 data->raw->size);
3813 } else {
3814 struct {
3815 u32 size;
3816 u32 data;
3817 } raw = {
3818 .size = sizeof(u32),
3819 .data = 0,
3820 };
3821 perf_output_put(handle, raw);
3822 }
3823 }
3824}
3825
3826void perf_prepare_sample(struct perf_event_header *header,
3827 struct perf_sample_data *data,
3828 struct perf_event *event,
3829 struct pt_regs *regs)
3830{
3831 u64 sample_type = event->attr.sample_type;
3832
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003833 header->type = PERF_RECORD_SAMPLE;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003834 header->size = sizeof(*header) + event->header_size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003835
3836 header->misc = 0;
3837 header->misc |= perf_misc_flags(regs);
3838
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003839 __perf_event_header__init_id(header, data, event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02003840
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003841 if (sample_type & PERF_SAMPLE_IP)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003842 data->ip = perf_instruction_pointer(regs);
3843
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003844 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3845 int size = 1;
3846
3847 data->callchain = perf_callchain(regs);
3848
3849 if (data->callchain)
3850 size += data->callchain->nr;
3851
3852 header->size += size * sizeof(u64);
3853 }
3854
3855 if (sample_type & PERF_SAMPLE_RAW) {
3856 int size = sizeof(u32);
3857
3858 if (data->raw)
3859 size += data->raw->size;
3860 else
3861 size += sizeof(u32);
3862
3863 WARN_ON_ONCE(size & (sizeof(u64)-1));
3864 header->size += size;
3865 }
3866}
3867
3868static void perf_event_output(struct perf_event *event, int nmi,
3869 struct perf_sample_data *data,
3870 struct pt_regs *regs)
3871{
3872 struct perf_output_handle handle;
3873 struct perf_event_header header;
3874
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003875 /* protect the callchain buffers */
3876 rcu_read_lock();
3877
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003878 perf_prepare_sample(&header, data, event, regs);
3879
3880 if (perf_output_begin(&handle, event, header.size, nmi, 1))
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003881 goto exit;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003882
3883 perf_output_sample(&handle, &header, data, event);
3884
3885 perf_output_end(&handle);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003886
3887exit:
3888 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003889}
3890
3891/*
3892 * read event_id
3893 */
3894
3895struct perf_read_event {
3896 struct perf_event_header header;
3897
3898 u32 pid;
3899 u32 tid;
3900};
3901
3902static void
3903perf_event_read_event(struct perf_event *event,
3904 struct task_struct *task)
3905{
3906 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003907 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003908 struct perf_read_event read_event = {
3909 .header = {
3910 .type = PERF_RECORD_READ,
3911 .misc = 0,
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003912 .size = sizeof(read_event) + event->read_size,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003913 },
3914 .pid = perf_event_pid(event, task),
3915 .tid = perf_event_tid(event, task),
3916 };
3917 int ret;
3918
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003919 perf_event_header__init_id(&read_event.header, &sample, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003920 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3921 if (ret)
3922 return;
3923
3924 perf_output_put(&handle, read_event);
3925 perf_output_read(&handle, event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003926 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003927
3928 perf_output_end(&handle);
3929}
3930
3931/*
3932 * task tracking -- fork/exit
3933 *
Eric B Munson3af9e852010-05-18 15:30:49 +01003934 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003935 */
3936
3937struct perf_task_event {
3938 struct task_struct *task;
3939 struct perf_event_context *task_ctx;
3940
3941 struct {
3942 struct perf_event_header header;
3943
3944 u32 pid;
3945 u32 ppid;
3946 u32 tid;
3947 u32 ptid;
3948 u64 time;
3949 } event_id;
3950};
3951
3952static void perf_event_task_output(struct perf_event *event,
3953 struct perf_task_event *task_event)
3954{
3955 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003956 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003957 struct task_struct *task = task_event->task;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003958 int ret, size = task_event->event_id.header.size;
Mike Galbraith8bb39f92010-03-26 11:11:33 +01003959
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003960 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003961
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003962 ret = perf_output_begin(&handle, event,
3963 task_event->event_id.header.size, 0, 0);
Peter Zijlstraef607772010-05-18 10:50:41 +02003964 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003965 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003966
3967 task_event->event_id.pid = perf_event_pid(event, task);
3968 task_event->event_id.ppid = perf_event_pid(event, current);
3969
3970 task_event->event_id.tid = perf_event_tid(event, task);
3971 task_event->event_id.ptid = perf_event_tid(event, current);
3972
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003973 perf_output_put(&handle, task_event->event_id);
3974
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003975 perf_event__output_id_sample(event, &handle, &sample);
3976
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003977 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003978out:
3979 task_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003980}
3981
3982static int perf_event_task_match(struct perf_event *event)
3983{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003984 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003985 return 0;
3986
Stephane Eranian5632ab12011-01-03 18:20:01 +02003987 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003988 return 0;
3989
Eric B Munson3af9e852010-05-18 15:30:49 +01003990 if (event->attr.comm || event->attr.mmap ||
3991 event->attr.mmap_data || event->attr.task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003992 return 1;
3993
3994 return 0;
3995}
3996
3997static void perf_event_task_ctx(struct perf_event_context *ctx,
3998 struct perf_task_event *task_event)
3999{
4000 struct perf_event *event;
4001
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004002 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4003 if (perf_event_task_match(event))
4004 perf_event_task_output(event, task_event);
4005 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004006}
4007
4008static void perf_event_task_event(struct perf_task_event *task_event)
4009{
4010 struct perf_cpu_context *cpuctx;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004011 struct perf_event_context *ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004012 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004013 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004014
Peter Zijlstrad6ff86c2009-11-20 22:19:46 +01004015 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004016 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004017 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004018 if (cpuctx->active_pmu != pmu)
4019 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004020 perf_event_task_ctx(&cpuctx->ctx, task_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004021
4022 ctx = task_event->task_ctx;
4023 if (!ctx) {
4024 ctxn = pmu->task_ctx_nr;
4025 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004026 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004027 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4028 }
4029 if (ctx)
4030 perf_event_task_ctx(ctx, task_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02004031next:
4032 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004033 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004034 rcu_read_unlock();
4035}
4036
4037static void perf_event_task(struct task_struct *task,
4038 struct perf_event_context *task_ctx,
4039 int new)
4040{
4041 struct perf_task_event task_event;
4042
4043 if (!atomic_read(&nr_comm_events) &&
4044 !atomic_read(&nr_mmap_events) &&
4045 !atomic_read(&nr_task_events))
4046 return;
4047
4048 task_event = (struct perf_task_event){
4049 .task = task,
4050 .task_ctx = task_ctx,
4051 .event_id = {
4052 .header = {
4053 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4054 .misc = 0,
4055 .size = sizeof(task_event.event_id),
4056 },
4057 /* .pid */
4058 /* .ppid */
4059 /* .tid */
4060 /* .ptid */
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004061 .time = perf_clock(),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004062 },
4063 };
4064
4065 perf_event_task_event(&task_event);
4066}
4067
4068void perf_event_fork(struct task_struct *task)
4069{
4070 perf_event_task(task, NULL, 1);
4071}
4072
4073/*
4074 * comm tracking
4075 */
4076
4077struct perf_comm_event {
4078 struct task_struct *task;
4079 char *comm;
4080 int comm_size;
4081
4082 struct {
4083 struct perf_event_header header;
4084
4085 u32 pid;
4086 u32 tid;
4087 } event_id;
4088};
4089
4090static void perf_event_comm_output(struct perf_event *event,
4091 struct perf_comm_event *comm_event)
4092{
4093 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004094 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004095 int size = comm_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004096 int ret;
4097
4098 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4099 ret = perf_output_begin(&handle, event,
4100 comm_event->event_id.header.size, 0, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004101
4102 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004103 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004104
4105 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4106 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4107
4108 perf_output_put(&handle, comm_event->event_id);
4109 perf_output_copy(&handle, comm_event->comm,
4110 comm_event->comm_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004111
4112 perf_event__output_id_sample(event, &handle, &sample);
4113
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004114 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004115out:
4116 comm_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004117}
4118
4119static int perf_event_comm_match(struct perf_event *event)
4120{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004121 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004122 return 0;
4123
Stephane Eranian5632ab12011-01-03 18:20:01 +02004124 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004125 return 0;
4126
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004127 if (event->attr.comm)
4128 return 1;
4129
4130 return 0;
4131}
4132
4133static void perf_event_comm_ctx(struct perf_event_context *ctx,
4134 struct perf_comm_event *comm_event)
4135{
4136 struct perf_event *event;
4137
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004138 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4139 if (perf_event_comm_match(event))
4140 perf_event_comm_output(event, comm_event);
4141 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004142}
4143
4144static void perf_event_comm_event(struct perf_comm_event *comm_event)
4145{
4146 struct perf_cpu_context *cpuctx;
4147 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004148 char comm[TASK_COMM_LEN];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004149 unsigned int size;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004150 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004151 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004152
4153 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01004154 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004155 size = ALIGN(strlen(comm)+1, sizeof(u64));
4156
4157 comm_event->comm = comm;
4158 comm_event->comm_size = size;
4159
4160 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
Peter Zijlstraf6595f32009-11-20 22:19:47 +01004161 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004162 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004163 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004164 if (cpuctx->active_pmu != pmu)
4165 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004166 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004167
4168 ctxn = pmu->task_ctx_nr;
4169 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004170 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004171
4172 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4173 if (ctx)
4174 perf_event_comm_ctx(ctx, comm_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02004175next:
4176 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004177 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004178 rcu_read_unlock();
4179}
4180
4181void perf_event_comm(struct task_struct *task)
4182{
4183 struct perf_comm_event comm_event;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004184 struct perf_event_context *ctx;
4185 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004186
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004187 for_each_task_context_nr(ctxn) {
4188 ctx = task->perf_event_ctxp[ctxn];
4189 if (!ctx)
4190 continue;
4191
4192 perf_event_enable_on_exec(ctx);
4193 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004194
4195 if (!atomic_read(&nr_comm_events))
4196 return;
4197
4198 comm_event = (struct perf_comm_event){
4199 .task = task,
4200 /* .comm */
4201 /* .comm_size */
4202 .event_id = {
4203 .header = {
4204 .type = PERF_RECORD_COMM,
4205 .misc = 0,
4206 /* .size */
4207 },
4208 /* .pid */
4209 /* .tid */
4210 },
4211 };
4212
4213 perf_event_comm_event(&comm_event);
4214}
4215
4216/*
4217 * mmap tracking
4218 */
4219
4220struct perf_mmap_event {
4221 struct vm_area_struct *vma;
4222
4223 const char *file_name;
4224 int file_size;
4225
4226 struct {
4227 struct perf_event_header header;
4228
4229 u32 pid;
4230 u32 tid;
4231 u64 start;
4232 u64 len;
4233 u64 pgoff;
4234 } event_id;
4235};
4236
4237static void perf_event_mmap_output(struct perf_event *event,
4238 struct perf_mmap_event *mmap_event)
4239{
4240 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004241 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004242 int size = mmap_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004243 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004244
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004245 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4246 ret = perf_output_begin(&handle, event,
4247 mmap_event->event_id.header.size, 0, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004248 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004249 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004250
4251 mmap_event->event_id.pid = perf_event_pid(event, current);
4252 mmap_event->event_id.tid = perf_event_tid(event, current);
4253
4254 perf_output_put(&handle, mmap_event->event_id);
4255 perf_output_copy(&handle, mmap_event->file_name,
4256 mmap_event->file_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004257
4258 perf_event__output_id_sample(event, &handle, &sample);
4259
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004260 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004261out:
4262 mmap_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004263}
4264
4265static int perf_event_mmap_match(struct perf_event *event,
Eric B Munson3af9e852010-05-18 15:30:49 +01004266 struct perf_mmap_event *mmap_event,
4267 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004268{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004269 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004270 return 0;
4271
Stephane Eranian5632ab12011-01-03 18:20:01 +02004272 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004273 return 0;
4274
Eric B Munson3af9e852010-05-18 15:30:49 +01004275 if ((!executable && event->attr.mmap_data) ||
4276 (executable && event->attr.mmap))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004277 return 1;
4278
4279 return 0;
4280}
4281
4282static void perf_event_mmap_ctx(struct perf_event_context *ctx,
Eric B Munson3af9e852010-05-18 15:30:49 +01004283 struct perf_mmap_event *mmap_event,
4284 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004285{
4286 struct perf_event *event;
4287
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004288 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Eric B Munson3af9e852010-05-18 15:30:49 +01004289 if (perf_event_mmap_match(event, mmap_event, executable))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004290 perf_event_mmap_output(event, mmap_event);
4291 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004292}
4293
4294static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4295{
4296 struct perf_cpu_context *cpuctx;
4297 struct perf_event_context *ctx;
4298 struct vm_area_struct *vma = mmap_event->vma;
4299 struct file *file = vma->vm_file;
4300 unsigned int size;
4301 char tmp[16];
4302 char *buf = NULL;
4303 const char *name;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004304 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004305 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004306
4307 memset(tmp, 0, sizeof(tmp));
4308
4309 if (file) {
4310 /*
4311 * d_path works from the end of the buffer backwards, so we
4312 * need to add enough zero bytes after the string to handle
4313 * the 64bit alignment we do later.
4314 */
4315 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4316 if (!buf) {
4317 name = strncpy(tmp, "//enomem", sizeof(tmp));
4318 goto got_name;
4319 }
4320 name = d_path(&file->f_path, buf, PATH_MAX);
4321 if (IS_ERR(name)) {
4322 name = strncpy(tmp, "//toolong", sizeof(tmp));
4323 goto got_name;
4324 }
4325 } else {
4326 if (arch_vma_name(mmap_event->vma)) {
4327 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4328 sizeof(tmp));
4329 goto got_name;
4330 }
4331
4332 if (!vma->vm_mm) {
4333 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4334 goto got_name;
Eric B Munson3af9e852010-05-18 15:30:49 +01004335 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4336 vma->vm_end >= vma->vm_mm->brk) {
4337 name = strncpy(tmp, "[heap]", sizeof(tmp));
4338 goto got_name;
4339 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4340 vma->vm_end >= vma->vm_mm->start_stack) {
4341 name = strncpy(tmp, "[stack]", sizeof(tmp));
4342 goto got_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004343 }
4344
4345 name = strncpy(tmp, "//anon", sizeof(tmp));
4346 goto got_name;
4347 }
4348
4349got_name:
4350 size = ALIGN(strlen(name)+1, sizeof(u64));
4351
4352 mmap_event->file_name = name;
4353 mmap_event->file_size = size;
4354
4355 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4356
Peter Zijlstraf6d9dd22009-11-20 22:19:48 +01004357 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004358 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004359 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004360 if (cpuctx->active_pmu != pmu)
4361 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004362 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4363 vma->vm_flags & VM_EXEC);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004364
4365 ctxn = pmu->task_ctx_nr;
4366 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004367 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004368
4369 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4370 if (ctx) {
4371 perf_event_mmap_ctx(ctx, mmap_event,
4372 vma->vm_flags & VM_EXEC);
4373 }
Peter Zijlstra41945f62010-09-16 19:17:24 +02004374next:
4375 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004376 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004377 rcu_read_unlock();
4378
4379 kfree(buf);
4380}
4381
Eric B Munson3af9e852010-05-18 15:30:49 +01004382void perf_event_mmap(struct vm_area_struct *vma)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004383{
4384 struct perf_mmap_event mmap_event;
4385
4386 if (!atomic_read(&nr_mmap_events))
4387 return;
4388
4389 mmap_event = (struct perf_mmap_event){
4390 .vma = vma,
4391 /* .file_name */
4392 /* .file_size */
4393 .event_id = {
4394 .header = {
4395 .type = PERF_RECORD_MMAP,
Zhang, Yanmin39447b32010-04-19 13:32:41 +08004396 .misc = PERF_RECORD_MISC_USER,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004397 /* .size */
4398 },
4399 /* .pid */
4400 /* .tid */
4401 .start = vma->vm_start,
4402 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01004403 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004404 },
4405 };
4406
4407 perf_event_mmap_event(&mmap_event);
4408}
4409
4410/*
4411 * IRQ throttle logging
4412 */
4413
4414static void perf_log_throttle(struct perf_event *event, int enable)
4415{
4416 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004417 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004418 int ret;
4419
4420 struct {
4421 struct perf_event_header header;
4422 u64 time;
4423 u64 id;
4424 u64 stream_id;
4425 } throttle_event = {
4426 .header = {
4427 .type = PERF_RECORD_THROTTLE,
4428 .misc = 0,
4429 .size = sizeof(throttle_event),
4430 },
4431 .time = perf_clock(),
4432 .id = primary_event_id(event),
4433 .stream_id = event->id,
4434 };
4435
4436 if (enable)
4437 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4438
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004439 perf_event_header__init_id(&throttle_event.header, &sample, event);
4440
4441 ret = perf_output_begin(&handle, event,
4442 throttle_event.header.size, 1, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004443 if (ret)
4444 return;
4445
4446 perf_output_put(&handle, throttle_event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004447 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004448 perf_output_end(&handle);
4449}
4450
4451/*
4452 * Generic event overflow handling, sampling.
4453 */
4454
4455static int __perf_event_overflow(struct perf_event *event, int nmi,
4456 int throttle, struct perf_sample_data *data,
4457 struct pt_regs *regs)
4458{
4459 int events = atomic_read(&event->event_limit);
4460 struct hw_perf_event *hwc = &event->hw;
4461 int ret = 0;
4462
Peter Zijlstra96398822010-11-24 18:55:29 +01004463 /*
4464 * Non-sampling counters might still use the PMI to fold short
4465 * hardware counters, ignore those.
4466 */
4467 if (unlikely(!is_sampling_event(event)))
4468 return 0;
4469
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004470 if (!throttle) {
4471 hwc->interrupts++;
4472 } else {
4473 if (hwc->interrupts != MAX_INTERRUPTS) {
4474 hwc->interrupts++;
4475 if (HZ * hwc->interrupts >
4476 (u64)sysctl_perf_event_sample_rate) {
4477 hwc->interrupts = MAX_INTERRUPTS;
4478 perf_log_throttle(event, 0);
4479 ret = 1;
4480 }
4481 } else {
4482 /*
4483 * Keep re-disabling events even though on the previous
4484 * pass we disabled it - just in case we raced with a
4485 * sched-in and the event got enabled again:
4486 */
4487 ret = 1;
4488 }
4489 }
4490
4491 if (event->attr.freq) {
4492 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01004493 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004494
Peter Zijlstraabd50712010-01-26 18:50:16 +01004495 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004496
Peter Zijlstraabd50712010-01-26 18:50:16 +01004497 if (delta > 0 && delta < 2*TICK_NSEC)
4498 perf_adjust_period(event, delta, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004499 }
4500
4501 /*
4502 * XXX event_limit might not quite work as expected on inherited
4503 * events
4504 */
4505
4506 event->pending_kill = POLL_IN;
4507 if (events && atomic_dec_and_test(&event->event_limit)) {
4508 ret = 1;
4509 event->pending_kill = POLL_HUP;
4510 if (nmi) {
4511 event->pending_disable = 1;
Peter Zijlstrae360adb2010-10-14 14:01:34 +08004512 irq_work_queue(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004513 } else
4514 perf_event_disable(event);
4515 }
4516
Peter Zijlstra453f19e2009-11-20 22:19:43 +01004517 if (event->overflow_handler)
4518 event->overflow_handler(event, nmi, data, regs);
4519 else
4520 perf_event_output(event, nmi, data, regs);
4521
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004522 return ret;
4523}
4524
4525int perf_event_overflow(struct perf_event *event, int nmi,
4526 struct perf_sample_data *data,
4527 struct pt_regs *regs)
4528{
4529 return __perf_event_overflow(event, nmi, 1, data, regs);
4530}
4531
4532/*
4533 * Generic software event infrastructure
4534 */
4535
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004536struct swevent_htable {
4537 struct swevent_hlist *swevent_hlist;
4538 struct mutex hlist_mutex;
4539 int hlist_refcount;
4540
4541 /* Recursion avoidance in each contexts */
4542 int recursion[PERF_NR_CONTEXTS];
4543};
4544
4545static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4546
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004547/*
4548 * We directly increment event->count and keep a second value in
4549 * event->hw.period_left to count intervals. This period event
4550 * is kept in the range [-sample_period, 0] so that we can use the
4551 * sign as trigger.
4552 */
4553
4554static u64 perf_swevent_set_period(struct perf_event *event)
4555{
4556 struct hw_perf_event *hwc = &event->hw;
4557 u64 period = hwc->last_period;
4558 u64 nr, offset;
4559 s64 old, val;
4560
4561 hwc->last_period = hwc->sample_period;
4562
4563again:
Peter Zijlstrae7850592010-05-21 14:43:08 +02004564 old = val = local64_read(&hwc->period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004565 if (val < 0)
4566 return 0;
4567
4568 nr = div64_u64(period + val, period);
4569 offset = nr * period;
4570 val -= offset;
Peter Zijlstrae7850592010-05-21 14:43:08 +02004571 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004572 goto again;
4573
4574 return nr;
4575}
4576
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004577static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004578 int nmi, struct perf_sample_data *data,
4579 struct pt_regs *regs)
4580{
4581 struct hw_perf_event *hwc = &event->hw;
4582 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004583
4584 data->period = event->hw.last_period;
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004585 if (!overflow)
4586 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004587
4588 if (hwc->interrupts == MAX_INTERRUPTS)
4589 return;
4590
4591 for (; overflow; overflow--) {
4592 if (__perf_event_overflow(event, nmi, throttle,
4593 data, regs)) {
4594 /*
4595 * We inhibit the overflow from happening when
4596 * hwc->interrupts == MAX_INTERRUPTS.
4597 */
4598 break;
4599 }
4600 throttle = 1;
4601 }
4602}
4603
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004604static void perf_swevent_event(struct perf_event *event, u64 nr,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004605 int nmi, struct perf_sample_data *data,
4606 struct pt_regs *regs)
4607{
4608 struct hw_perf_event *hwc = &event->hw;
4609
Peter Zijlstrae7850592010-05-21 14:43:08 +02004610 local64_add(nr, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004611
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004612 if (!regs)
4613 return;
4614
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01004615 if (!is_sampling_event(event))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004616 return;
4617
4618 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4619 return perf_swevent_overflow(event, 1, nmi, data, regs);
4620
Peter Zijlstrae7850592010-05-21 14:43:08 +02004621 if (local64_add_negative(nr, &hwc->period_left))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004622 return;
4623
4624 perf_swevent_overflow(event, 0, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004625}
4626
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004627static int perf_exclude_event(struct perf_event *event,
4628 struct pt_regs *regs)
4629{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004630 if (event->hw.state & PERF_HES_STOPPED)
4631 return 0;
4632
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004633 if (regs) {
4634 if (event->attr.exclude_user && user_mode(regs))
4635 return 1;
4636
4637 if (event->attr.exclude_kernel && !user_mode(regs))
4638 return 1;
4639 }
4640
4641 return 0;
4642}
4643
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004644static int perf_swevent_match(struct perf_event *event,
4645 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08004646 u32 event_id,
4647 struct perf_sample_data *data,
4648 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004649{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004650 if (event->attr.type != type)
4651 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004652
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004653 if (event->attr.config != event_id)
4654 return 0;
4655
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004656 if (perf_exclude_event(event, regs))
4657 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004658
4659 return 1;
4660}
4661
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004662static inline u64 swevent_hash(u64 type, u32 event_id)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004663{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004664 u64 val = event_id | (type << 32);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004665
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004666 return hash_64(val, SWEVENT_HLIST_BITS);
4667}
4668
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004669static inline struct hlist_head *
4670__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004671{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004672 u64 hash = swevent_hash(type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004673
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004674 return &hlist->heads[hash];
4675}
4676
4677/* For the read side: events when they trigger */
4678static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004679find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004680{
4681 struct swevent_hlist *hlist;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004682
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004683 hlist = rcu_dereference(swhash->swevent_hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004684 if (!hlist)
4685 return NULL;
4686
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004687 return __find_swevent_head(hlist, type, event_id);
4688}
4689
4690/* For the event head insertion and removal in the hlist */
4691static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004692find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004693{
4694 struct swevent_hlist *hlist;
4695 u32 event_id = event->attr.config;
4696 u64 type = event->attr.type;
4697
4698 /*
4699 * Event scheduling is always serialized against hlist allocation
4700 * and release. Which makes the protected version suitable here.
4701 * The context lock guarantees that.
4702 */
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004703 hlist = rcu_dereference_protected(swhash->swevent_hlist,
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004704 lockdep_is_held(&event->ctx->lock));
4705 if (!hlist)
4706 return NULL;
4707
4708 return __find_swevent_head(hlist, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004709}
4710
4711static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4712 u64 nr, int nmi,
4713 struct perf_sample_data *data,
4714 struct pt_regs *regs)
4715{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004716 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004717 struct perf_event *event;
4718 struct hlist_node *node;
4719 struct hlist_head *head;
4720
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004721 rcu_read_lock();
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004722 head = find_swevent_head_rcu(swhash, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004723 if (!head)
4724 goto end;
4725
4726 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08004727 if (perf_swevent_match(event, type, event_id, data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004728 perf_swevent_event(event, nr, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004729 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004730end:
4731 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004732}
4733
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004734int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004735{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004736 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004737
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004738 return get_recursion_context(swhash->recursion);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004739}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004740EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004741
Jesper Juhlfa9f90b2010-11-28 21:39:34 +01004742inline void perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004743{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004744 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02004745
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004746 put_recursion_context(swhash->recursion, rctx);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004747}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004748
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004749void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4750 struct pt_regs *regs, u64 addr)
4751{
Ingo Molnara4234bf2009-11-23 10:57:59 +01004752 struct perf_sample_data data;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004753 int rctx;
4754
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004755 preempt_disable_notrace();
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004756 rctx = perf_swevent_get_recursion_context();
4757 if (rctx < 0)
4758 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004759
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004760 perf_sample_data_init(&data, addr);
Ingo Molnara4234bf2009-11-23 10:57:59 +01004761
4762 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004763
4764 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004765 preempt_enable_notrace();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004766}
4767
4768static void perf_swevent_read(struct perf_event *event)
4769{
4770}
4771
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004772static int perf_swevent_add(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004773{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004774 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004775 struct hw_perf_event *hwc = &event->hw;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004776 struct hlist_head *head;
4777
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01004778 if (is_sampling_event(event)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004779 hwc->last_period = hwc->sample_period;
4780 perf_swevent_set_period(event);
4781 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004782
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004783 hwc->state = !(flags & PERF_EF_START);
4784
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004785 head = find_swevent_head(swhash, event);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004786 if (WARN_ON_ONCE(!head))
4787 return -EINVAL;
4788
4789 hlist_add_head_rcu(&event->hlist_entry, head);
4790
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004791 return 0;
4792}
4793
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004794static void perf_swevent_del(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004795{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004796 hlist_del_rcu(&event->hlist_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004797}
4798
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004799static void perf_swevent_start(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004800{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004801 event->hw.state = 0;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004802}
4803
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004804static void perf_swevent_stop(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004805{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004806 event->hw.state = PERF_HES_STOPPED;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004807}
4808
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004809/* Deref the hlist from the update side */
4810static inline struct swevent_hlist *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004811swevent_hlist_deref(struct swevent_htable *swhash)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004812{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004813 return rcu_dereference_protected(swhash->swevent_hlist,
4814 lockdep_is_held(&swhash->hlist_mutex));
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004815}
4816
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004817static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4818{
4819 struct swevent_hlist *hlist;
4820
4821 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4822 kfree(hlist);
4823}
4824
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004825static void swevent_hlist_release(struct swevent_htable *swhash)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004826{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004827 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004828
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004829 if (!hlist)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004830 return;
4831
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004832 rcu_assign_pointer(swhash->swevent_hlist, NULL);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004833 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4834}
4835
4836static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4837{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004838 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004839
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004840 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004841
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004842 if (!--swhash->hlist_refcount)
4843 swevent_hlist_release(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004844
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004845 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004846}
4847
4848static void swevent_hlist_put(struct perf_event *event)
4849{
4850 int cpu;
4851
4852 if (event->cpu != -1) {
4853 swevent_hlist_put_cpu(event, event->cpu);
4854 return;
4855 }
4856
4857 for_each_possible_cpu(cpu)
4858 swevent_hlist_put_cpu(event, cpu);
4859}
4860
4861static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4862{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004863 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004864 int err = 0;
4865
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004866 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004867
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004868 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004869 struct swevent_hlist *hlist;
4870
4871 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4872 if (!hlist) {
4873 err = -ENOMEM;
4874 goto exit;
4875 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004876 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004877 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004878 swhash->hlist_refcount++;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004879exit:
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004880 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004881
4882 return err;
4883}
4884
4885static int swevent_hlist_get(struct perf_event *event)
4886{
4887 int err;
4888 int cpu, failed_cpu;
4889
4890 if (event->cpu != -1)
4891 return swevent_hlist_get_cpu(event, event->cpu);
4892
4893 get_online_cpus();
4894 for_each_possible_cpu(cpu) {
4895 err = swevent_hlist_get_cpu(event, cpu);
4896 if (err) {
4897 failed_cpu = cpu;
4898 goto fail;
4899 }
4900 }
4901 put_online_cpus();
4902
4903 return 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004904fail:
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004905 for_each_possible_cpu(cpu) {
4906 if (cpu == failed_cpu)
4907 break;
4908 swevent_hlist_put_cpu(event, cpu);
4909 }
4910
4911 put_online_cpus();
4912 return err;
4913}
4914
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004915atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004916
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004917static void sw_perf_event_destroy(struct perf_event *event)
4918{
4919 u64 event_id = event->attr.config;
4920
4921 WARN_ON(event->parent);
4922
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02004923 jump_label_dec(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004924 swevent_hlist_put(event);
4925}
4926
4927static int perf_swevent_init(struct perf_event *event)
4928{
4929 int event_id = event->attr.config;
4930
4931 if (event->attr.type != PERF_TYPE_SOFTWARE)
4932 return -ENOENT;
4933
4934 switch (event_id) {
4935 case PERF_COUNT_SW_CPU_CLOCK:
4936 case PERF_COUNT_SW_TASK_CLOCK:
4937 return -ENOENT;
4938
4939 default:
4940 break;
4941 }
4942
Dan Carpenterce677832010-10-24 21:50:42 +02004943 if (event_id >= PERF_COUNT_SW_MAX)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004944 return -ENOENT;
4945
4946 if (!event->parent) {
4947 int err;
4948
4949 err = swevent_hlist_get(event);
4950 if (err)
4951 return err;
4952
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02004953 jump_label_inc(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004954 event->destroy = sw_perf_event_destroy;
4955 }
4956
4957 return 0;
4958}
4959
4960static struct pmu perf_swevent = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02004961 .task_ctx_nr = perf_sw_context,
4962
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004963 .event_init = perf_swevent_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004964 .add = perf_swevent_add,
4965 .del = perf_swevent_del,
4966 .start = perf_swevent_start,
4967 .stop = perf_swevent_stop,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004968 .read = perf_swevent_read,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004969};
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004970
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004971#ifdef CONFIG_EVENT_TRACING
4972
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004973static int perf_tp_filter_match(struct perf_event *event,
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004974 struct perf_sample_data *data)
4975{
4976 void *record = data->raw->data;
4977
4978 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4979 return 1;
4980 return 0;
4981}
4982
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004983static int perf_tp_event_match(struct perf_event *event,
4984 struct perf_sample_data *data,
4985 struct pt_regs *regs)
4986{
Peter Zijlstra580d6072010-05-20 20:54:31 +02004987 /*
4988 * All tracepoints are from kernel-space.
4989 */
4990 if (event->attr.exclude_kernel)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004991 return 0;
4992
4993 if (!perf_tp_filter_match(event, data))
4994 return 0;
4995
4996 return 1;
4997}
4998
4999void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
Peter Zijlstraecc55f82010-05-21 15:11:34 +02005000 struct pt_regs *regs, struct hlist_head *head, int rctx)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005001{
5002 struct perf_sample_data data;
5003 struct perf_event *event;
5004 struct hlist_node *node;
5005
5006 struct perf_raw_record raw = {
5007 .size = entry_size,
5008 .data = record,
5009 };
5010
5011 perf_sample_data_init(&data, addr);
5012 data.raw = &raw;
5013
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005014 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5015 if (perf_tp_event_match(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005016 perf_swevent_event(event, count, 1, &data, regs);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005017 }
Peter Zijlstraecc55f82010-05-21 15:11:34 +02005018
5019 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005020}
5021EXPORT_SYMBOL_GPL(perf_tp_event);
5022
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005023static void tp_perf_event_destroy(struct perf_event *event)
5024{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005025 perf_trace_destroy(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005026}
5027
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005028static int perf_tp_event_init(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005029{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005030 int err;
5031
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005032 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5033 return -ENOENT;
5034
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005035 err = perf_trace_init(event);
5036 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005037 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005038
5039 event->destroy = tp_perf_event_destroy;
5040
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005041 return 0;
5042}
5043
5044static struct pmu perf_tracepoint = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005045 .task_ctx_nr = perf_sw_context,
5046
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005047 .event_init = perf_tp_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005048 .add = perf_trace_add,
5049 .del = perf_trace_del,
5050 .start = perf_swevent_start,
5051 .stop = perf_swevent_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005052 .read = perf_swevent_read,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005053};
5054
5055static inline void perf_tp_register(void)
5056{
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005057 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005058}
Li Zefan6fb29152009-10-15 11:21:42 +08005059
5060static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5061{
5062 char *filter_str;
5063 int ret;
5064
5065 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5066 return -EINVAL;
5067
5068 filter_str = strndup_user(arg, PAGE_SIZE);
5069 if (IS_ERR(filter_str))
5070 return PTR_ERR(filter_str);
5071
5072 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5073
5074 kfree(filter_str);
5075 return ret;
5076}
5077
5078static void perf_event_free_filter(struct perf_event *event)
5079{
5080 ftrace_profile_free_filter(event);
5081}
5082
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005083#else
Li Zefan6fb29152009-10-15 11:21:42 +08005084
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005085static inline void perf_tp_register(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005086{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005087}
Li Zefan6fb29152009-10-15 11:21:42 +08005088
5089static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5090{
5091 return -ENOENT;
5092}
5093
5094static void perf_event_free_filter(struct perf_event *event)
5095{
5096}
5097
Li Zefan07b139c2009-12-21 14:27:35 +08005098#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005099
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005100#ifdef CONFIG_HAVE_HW_BREAKPOINT
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005101void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005102{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005103 struct perf_sample_data sample;
5104 struct pt_regs *regs = data;
5105
Peter Zijlstradc1d6282010-03-03 15:55:04 +01005106 perf_sample_data_init(&sample, bp->attr.bp_addr);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005107
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005108 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5109 perf_swevent_event(bp, 1, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005110}
5111#endif
5112
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005113/*
5114 * hrtimer based swevent callback
5115 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005116
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005117static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005118{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005119 enum hrtimer_restart ret = HRTIMER_RESTART;
5120 struct perf_sample_data data;
5121 struct pt_regs *regs;
5122 struct perf_event *event;
5123 u64 period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005124
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005125 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5126 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005127
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005128 perf_sample_data_init(&data, 0);
5129 data.period = event->hw.last_period;
5130 regs = get_irq_regs();
5131
5132 if (regs && !perf_exclude_event(event, regs)) {
5133 if (!(event->attr.exclude_idle && current->pid == 0))
5134 if (perf_event_overflow(event, 0, &data, regs))
5135 ret = HRTIMER_NORESTART;
5136 }
5137
5138 period = max_t(u64, 10000, event->hw.sample_period);
5139 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5140
5141 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005142}
5143
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005144static void perf_swevent_start_hrtimer(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005145{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005146 struct hw_perf_event *hwc = &event->hw;
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005147 s64 period;
5148
5149 if (!is_sampling_event(event))
5150 return;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005151
5152 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5153 hwc->hrtimer.function = perf_swevent_hrtimer;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005154
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005155 period = local64_read(&hwc->period_left);
5156 if (period) {
5157 if (period < 0)
5158 period = 10000;
Peter Zijlstrafa407f32010-06-24 12:35:12 +02005159
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005160 local64_set(&hwc->period_left, 0);
5161 } else {
5162 period = max_t(u64, 10000, hwc->sample_period);
5163 }
5164 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005165 ns_to_ktime(period), 0,
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02005166 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005167}
5168
5169static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5170{
5171 struct hw_perf_event *hwc = &event->hw;
5172
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01005173 if (is_sampling_event(event)) {
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005174 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
Peter Zijlstrafa407f32010-06-24 12:35:12 +02005175 local64_set(&hwc->period_left, ktime_to_ns(remaining));
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005176
5177 hrtimer_cancel(&hwc->hrtimer);
5178 }
5179}
5180
5181/*
5182 * Software event: cpu wall time clock
5183 */
5184
5185static void cpu_clock_event_update(struct perf_event *event)
5186{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005187 s64 prev;
5188 u64 now;
5189
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005190 now = local_clock();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005191 prev = local64_xchg(&event->hw.prev_count, now);
5192 local64_add(now - prev, &event->count);
5193}
5194
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005195static void cpu_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005196{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005197 local64_set(&event->hw.prev_count, local_clock());
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005198 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005199}
5200
5201static void cpu_clock_event_stop(struct perf_event *event, int flags)
5202{
5203 perf_swevent_cancel_hrtimer(event);
5204 cpu_clock_event_update(event);
5205}
5206
5207static int cpu_clock_event_add(struct perf_event *event, int flags)
5208{
5209 if (flags & PERF_EF_START)
5210 cpu_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005211
5212 return 0;
5213}
5214
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005215static void cpu_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005216{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005217 cpu_clock_event_stop(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005218}
5219
5220static void cpu_clock_event_read(struct perf_event *event)
5221{
5222 cpu_clock_event_update(event);
5223}
5224
5225static int cpu_clock_event_init(struct perf_event *event)
5226{
5227 if (event->attr.type != PERF_TYPE_SOFTWARE)
5228 return -ENOENT;
5229
5230 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5231 return -ENOENT;
5232
5233 return 0;
5234}
5235
5236static struct pmu perf_cpu_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005237 .task_ctx_nr = perf_sw_context,
5238
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005239 .event_init = cpu_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005240 .add = cpu_clock_event_add,
5241 .del = cpu_clock_event_del,
5242 .start = cpu_clock_event_start,
5243 .stop = cpu_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005244 .read = cpu_clock_event_read,
5245};
5246
5247/*
5248 * Software event: task time clock
5249 */
5250
5251static void task_clock_event_update(struct perf_event *event, u64 now)
5252{
5253 u64 prev;
5254 s64 delta;
5255
5256 prev = local64_xchg(&event->hw.prev_count, now);
5257 delta = now - prev;
5258 local64_add(delta, &event->count);
5259}
5260
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005261static void task_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005262{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005263 local64_set(&event->hw.prev_count, event->ctx->time);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005264 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005265}
5266
5267static void task_clock_event_stop(struct perf_event *event, int flags)
5268{
5269 perf_swevent_cancel_hrtimer(event);
5270 task_clock_event_update(event, event->ctx->time);
5271}
5272
5273static int task_clock_event_add(struct perf_event *event, int flags)
5274{
5275 if (flags & PERF_EF_START)
5276 task_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005277
5278 return 0;
5279}
5280
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005281static void task_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005282{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005283 task_clock_event_stop(event, PERF_EF_UPDATE);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005284}
5285
5286static void task_clock_event_read(struct perf_event *event)
5287{
5288 u64 time;
5289
5290 if (!in_nmi()) {
5291 update_context_time(event->ctx);
5292 time = event->ctx->time;
5293 } else {
5294 u64 now = perf_clock();
5295 u64 delta = now - event->ctx->timestamp;
5296 time = event->ctx->time + delta;
5297 }
5298
5299 task_clock_event_update(event, time);
5300}
5301
5302static int task_clock_event_init(struct perf_event *event)
5303{
5304 if (event->attr.type != PERF_TYPE_SOFTWARE)
5305 return -ENOENT;
5306
5307 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5308 return -ENOENT;
5309
5310 return 0;
5311}
5312
5313static struct pmu perf_task_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005314 .task_ctx_nr = perf_sw_context,
5315
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005316 .event_init = task_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005317 .add = task_clock_event_add,
5318 .del = task_clock_event_del,
5319 .start = task_clock_event_start,
5320 .stop = task_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005321 .read = task_clock_event_read,
5322};
5323
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005324static void perf_pmu_nop_void(struct pmu *pmu)
5325{
5326}
5327
5328static int perf_pmu_nop_int(struct pmu *pmu)
5329{
5330 return 0;
5331}
5332
5333static void perf_pmu_start_txn(struct pmu *pmu)
5334{
5335 perf_pmu_disable(pmu);
5336}
5337
5338static int perf_pmu_commit_txn(struct pmu *pmu)
5339{
5340 perf_pmu_enable(pmu);
5341 return 0;
5342}
5343
5344static void perf_pmu_cancel_txn(struct pmu *pmu)
5345{
5346 perf_pmu_enable(pmu);
5347}
5348
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005349/*
5350 * Ensures all contexts with the same task_ctx_nr have the same
5351 * pmu_cpu_context too.
5352 */
5353static void *find_pmu_context(int ctxn)
5354{
5355 struct pmu *pmu;
5356
5357 if (ctxn < 0)
5358 return NULL;
5359
5360 list_for_each_entry(pmu, &pmus, entry) {
5361 if (pmu->task_ctx_nr == ctxn)
5362 return pmu->pmu_cpu_context;
5363 }
5364
5365 return NULL;
5366}
5367
Peter Zijlstra51676952010-12-07 14:18:20 +01005368static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005369{
Peter Zijlstra51676952010-12-07 14:18:20 +01005370 int cpu;
5371
5372 for_each_possible_cpu(cpu) {
5373 struct perf_cpu_context *cpuctx;
5374
5375 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5376
5377 if (cpuctx->active_pmu == old_pmu)
5378 cpuctx->active_pmu = pmu;
5379 }
5380}
5381
5382static void free_pmu_context(struct pmu *pmu)
5383{
5384 struct pmu *i;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005385
5386 mutex_lock(&pmus_lock);
5387 /*
5388 * Like a real lame refcount.
5389 */
Peter Zijlstra51676952010-12-07 14:18:20 +01005390 list_for_each_entry(i, &pmus, entry) {
5391 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5392 update_pmu_context(i, pmu);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005393 goto out;
Peter Zijlstra51676952010-12-07 14:18:20 +01005394 }
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005395 }
5396
Peter Zijlstra51676952010-12-07 14:18:20 +01005397 free_percpu(pmu->pmu_cpu_context);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005398out:
5399 mutex_unlock(&pmus_lock);
5400}
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005401static struct idr pmu_idr;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005402
Peter Zijlstraabe43402010-11-17 23:17:37 +01005403static ssize_t
5404type_show(struct device *dev, struct device_attribute *attr, char *page)
5405{
5406 struct pmu *pmu = dev_get_drvdata(dev);
5407
5408 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5409}
5410
5411static struct device_attribute pmu_dev_attrs[] = {
5412 __ATTR_RO(type),
5413 __ATTR_NULL,
5414};
5415
5416static int pmu_bus_running;
5417static struct bus_type pmu_bus = {
5418 .name = "event_source",
5419 .dev_attrs = pmu_dev_attrs,
5420};
5421
5422static void pmu_dev_release(struct device *dev)
5423{
5424 kfree(dev);
5425}
5426
5427static int pmu_dev_alloc(struct pmu *pmu)
5428{
5429 int ret = -ENOMEM;
5430
5431 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5432 if (!pmu->dev)
5433 goto out;
5434
5435 device_initialize(pmu->dev);
5436 ret = dev_set_name(pmu->dev, "%s", pmu->name);
5437 if (ret)
5438 goto free_dev;
5439
5440 dev_set_drvdata(pmu->dev, pmu);
5441 pmu->dev->bus = &pmu_bus;
5442 pmu->dev->release = pmu_dev_release;
5443 ret = device_add(pmu->dev);
5444 if (ret)
5445 goto free_dev;
5446
5447out:
5448 return ret;
5449
5450free_dev:
5451 put_device(pmu->dev);
5452 goto out;
5453}
5454
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01005455static struct lock_class_key cpuctx_mutex;
5456
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005457int perf_pmu_register(struct pmu *pmu, char *name, int type)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005458{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005459 int cpu, ret;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005460
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005461 mutex_lock(&pmus_lock);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005462 ret = -ENOMEM;
5463 pmu->pmu_disable_count = alloc_percpu(int);
5464 if (!pmu->pmu_disable_count)
5465 goto unlock;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005466
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005467 pmu->type = -1;
5468 if (!name)
5469 goto skip_type;
5470 pmu->name = name;
5471
5472 if (type < 0) {
5473 int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5474 if (!err)
5475 goto free_pdc;
5476
5477 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5478 if (err) {
5479 ret = err;
5480 goto free_pdc;
5481 }
5482 }
5483 pmu->type = type;
5484
Peter Zijlstraabe43402010-11-17 23:17:37 +01005485 if (pmu_bus_running) {
5486 ret = pmu_dev_alloc(pmu);
5487 if (ret)
5488 goto free_idr;
5489 }
5490
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005491skip_type:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005492 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5493 if (pmu->pmu_cpu_context)
5494 goto got_cpu_context;
5495
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005496 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5497 if (!pmu->pmu_cpu_context)
Peter Zijlstraabe43402010-11-17 23:17:37 +01005498 goto free_dev;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005499
5500 for_each_possible_cpu(cpu) {
5501 struct perf_cpu_context *cpuctx;
5502
5503 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Peter Zijlstraeb184472010-09-07 15:55:13 +02005504 __perf_event_init_context(&cpuctx->ctx);
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01005505 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005506 cpuctx->ctx.type = cpu_context;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005507 cpuctx->ctx.pmu = pmu;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02005508 cpuctx->jiffies_interval = 1;
5509 INIT_LIST_HEAD(&cpuctx->rotation_list);
Peter Zijlstra51676952010-12-07 14:18:20 +01005510 cpuctx->active_pmu = pmu;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005511 }
5512
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005513got_cpu_context:
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005514 if (!pmu->start_txn) {
5515 if (pmu->pmu_enable) {
5516 /*
5517 * If we have pmu_enable/pmu_disable calls, install
5518 * transaction stubs that use that to try and batch
5519 * hardware accesses.
5520 */
5521 pmu->start_txn = perf_pmu_start_txn;
5522 pmu->commit_txn = perf_pmu_commit_txn;
5523 pmu->cancel_txn = perf_pmu_cancel_txn;
5524 } else {
5525 pmu->start_txn = perf_pmu_nop_void;
5526 pmu->commit_txn = perf_pmu_nop_int;
5527 pmu->cancel_txn = perf_pmu_nop_void;
5528 }
5529 }
5530
5531 if (!pmu->pmu_enable) {
5532 pmu->pmu_enable = perf_pmu_nop_void;
5533 pmu->pmu_disable = perf_pmu_nop_void;
5534 }
5535
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005536 list_add_rcu(&pmu->entry, &pmus);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005537 ret = 0;
5538unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005539 mutex_unlock(&pmus_lock);
5540
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005541 return ret;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005542
Peter Zijlstraabe43402010-11-17 23:17:37 +01005543free_dev:
5544 device_del(pmu->dev);
5545 put_device(pmu->dev);
5546
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005547free_idr:
5548 if (pmu->type >= PERF_TYPE_MAX)
5549 idr_remove(&pmu_idr, pmu->type);
5550
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005551free_pdc:
5552 free_percpu(pmu->pmu_disable_count);
5553 goto unlock;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005554}
5555
5556void perf_pmu_unregister(struct pmu *pmu)
5557{
5558 mutex_lock(&pmus_lock);
5559 list_del_rcu(&pmu->entry);
5560 mutex_unlock(&pmus_lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005561
5562 /*
Peter Zijlstracde8e882010-09-13 11:06:55 +02005563 * We dereference the pmu list under both SRCU and regular RCU, so
5564 * synchronize against both of those.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005565 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005566 synchronize_srcu(&pmus_srcu);
Peter Zijlstracde8e882010-09-13 11:06:55 +02005567 synchronize_rcu();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005568
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005569 free_percpu(pmu->pmu_disable_count);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005570 if (pmu->type >= PERF_TYPE_MAX)
5571 idr_remove(&pmu_idr, pmu->type);
Peter Zijlstraabe43402010-11-17 23:17:37 +01005572 device_del(pmu->dev);
5573 put_device(pmu->dev);
Peter Zijlstra51676952010-12-07 14:18:20 +01005574 free_pmu_context(pmu);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005575}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005576
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005577struct pmu *perf_init_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005578{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005579 struct pmu *pmu = NULL;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005580 int idx;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005581
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005582 idx = srcu_read_lock(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005583
5584 rcu_read_lock();
5585 pmu = idr_find(&pmu_idr, event->attr.type);
5586 rcu_read_unlock();
5587 if (pmu)
5588 goto unlock;
5589
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005590 list_for_each_entry_rcu(pmu, &pmus, entry) {
5591 int ret = pmu->event_init(event);
5592 if (!ret)
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005593 goto unlock;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005594
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005595 if (ret != -ENOENT) {
5596 pmu = ERR_PTR(ret);
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005597 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005598 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005599 }
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005600 pmu = ERR_PTR(-ENOENT);
5601unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005602 srcu_read_unlock(&pmus_srcu, idx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005603
5604 return pmu;
5605}
5606
5607/*
5608 * Allocate and initialize a event structure
5609 */
5610static struct perf_event *
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005611perf_event_alloc(struct perf_event_attr *attr, int cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005612 struct task_struct *task,
5613 struct perf_event *group_leader,
5614 struct perf_event *parent_event,
5615 perf_overflow_handler_t overflow_handler)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005616{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005617 struct pmu *pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005618 struct perf_event *event;
5619 struct hw_perf_event *hwc;
5620 long err;
5621
Oleg Nesterov66832eb2011-01-18 17:10:32 +01005622 if ((unsigned)cpu >= nr_cpu_ids) {
5623 if (!task || cpu != -1)
5624 return ERR_PTR(-EINVAL);
5625 }
5626
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005627 event = kzalloc(sizeof(*event), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005628 if (!event)
5629 return ERR_PTR(-ENOMEM);
5630
5631 /*
5632 * Single events are their own group leaders, with an
5633 * empty sibling list:
5634 */
5635 if (!group_leader)
5636 group_leader = event;
5637
5638 mutex_init(&event->child_mutex);
5639 INIT_LIST_HEAD(&event->child_list);
5640
5641 INIT_LIST_HEAD(&event->group_entry);
5642 INIT_LIST_HEAD(&event->event_entry);
5643 INIT_LIST_HEAD(&event->sibling_list);
5644 init_waitqueue_head(&event->waitq);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08005645 init_irq_work(&event->pending, perf_pending_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005646
5647 mutex_init(&event->mmap_mutex);
5648
5649 event->cpu = cpu;
5650 event->attr = *attr;
5651 event->group_leader = group_leader;
5652 event->pmu = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005653 event->oncpu = -1;
5654
5655 event->parent = parent_event;
5656
5657 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5658 event->id = atomic64_inc_return(&perf_event_id);
5659
5660 event->state = PERF_EVENT_STATE_INACTIVE;
5661
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005662 if (task) {
5663 event->attach_state = PERF_ATTACH_TASK;
5664#ifdef CONFIG_HAVE_HW_BREAKPOINT
5665 /*
5666 * hw_breakpoint is a bit difficult here..
5667 */
5668 if (attr->type == PERF_TYPE_BREAKPOINT)
5669 event->hw.bp_target = task;
5670#endif
5671 }
5672
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005673 if (!overflow_handler && parent_event)
5674 overflow_handler = parent_event->overflow_handler;
Oleg Nesterov66832eb2011-01-18 17:10:32 +01005675
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005676 event->overflow_handler = overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005677
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005678 if (attr->disabled)
5679 event->state = PERF_EVENT_STATE_OFF;
5680
5681 pmu = NULL;
5682
5683 hwc = &event->hw;
5684 hwc->sample_period = attr->sample_period;
5685 if (attr->freq && attr->sample_freq)
5686 hwc->sample_period = 1;
5687 hwc->last_period = hwc->sample_period;
5688
Peter Zijlstrae7850592010-05-21 14:43:08 +02005689 local64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005690
5691 /*
5692 * we currently do not support PERF_FORMAT_GROUP on inherited events
5693 */
5694 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5695 goto done;
5696
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005697 pmu = perf_init_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005698
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005699done:
5700 err = 0;
5701 if (!pmu)
5702 err = -EINVAL;
5703 else if (IS_ERR(pmu))
5704 err = PTR_ERR(pmu);
5705
5706 if (err) {
5707 if (event->ns)
5708 put_pid_ns(event->ns);
5709 kfree(event);
5710 return ERR_PTR(err);
5711 }
5712
5713 event->pmu = pmu;
5714
5715 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02005716 if (event->attach_state & PERF_ATTACH_TASK)
5717 jump_label_inc(&perf_task_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01005718 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005719 atomic_inc(&nr_mmap_events);
5720 if (event->attr.comm)
5721 atomic_inc(&nr_comm_events);
5722 if (event->attr.task)
5723 atomic_inc(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02005724 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5725 err = get_callchain_buffers();
5726 if (err) {
5727 free_event(event);
5728 return ERR_PTR(err);
5729 }
5730 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005731 }
5732
5733 return event;
5734}
5735
5736static int perf_copy_attr(struct perf_event_attr __user *uattr,
5737 struct perf_event_attr *attr)
5738{
5739 u32 size;
5740 int ret;
5741
5742 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5743 return -EFAULT;
5744
5745 /*
5746 * zero the full structure, so that a short copy will be nice.
5747 */
5748 memset(attr, 0, sizeof(*attr));
5749
5750 ret = get_user(size, &uattr->size);
5751 if (ret)
5752 return ret;
5753
5754 if (size > PAGE_SIZE) /* silly large */
5755 goto err_size;
5756
5757 if (!size) /* abi compat */
5758 size = PERF_ATTR_SIZE_VER0;
5759
5760 if (size < PERF_ATTR_SIZE_VER0)
5761 goto err_size;
5762
5763 /*
5764 * If we're handed a bigger struct than we know of,
5765 * ensure all the unknown bits are 0 - i.e. new
5766 * user-space does not rely on any kernel feature
5767 * extensions we dont know about yet.
5768 */
5769 if (size > sizeof(*attr)) {
5770 unsigned char __user *addr;
5771 unsigned char __user *end;
5772 unsigned char val;
5773
5774 addr = (void __user *)uattr + sizeof(*attr);
5775 end = (void __user *)uattr + size;
5776
5777 for (; addr < end; addr++) {
5778 ret = get_user(val, addr);
5779 if (ret)
5780 return ret;
5781 if (val)
5782 goto err_size;
5783 }
5784 size = sizeof(*attr);
5785 }
5786
5787 ret = copy_from_user(attr, uattr, size);
5788 if (ret)
5789 return -EFAULT;
5790
5791 /*
5792 * If the type exists, the corresponding creation will verify
5793 * the attr->config.
5794 */
5795 if (attr->type >= PERF_TYPE_MAX)
5796 return -EINVAL;
5797
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05305798 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005799 return -EINVAL;
5800
5801 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5802 return -EINVAL;
5803
5804 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5805 return -EINVAL;
5806
5807out:
5808 return ret;
5809
5810err_size:
5811 put_user(sizeof(*attr), &uattr->size);
5812 ret = -E2BIG;
5813 goto out;
5814}
5815
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005816static int
5817perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005818{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005819 struct perf_buffer *buffer = NULL, *old_buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005820 int ret = -EINVAL;
5821
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005822 if (!output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005823 goto set;
5824
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005825 /* don't allow circular references */
5826 if (event == output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005827 goto out;
5828
Peter Zijlstra0f139302010-05-20 14:35:15 +02005829 /*
5830 * Don't allow cross-cpu buffers
5831 */
5832 if (output_event->cpu != event->cpu)
5833 goto out;
5834
5835 /*
5836 * If its not a per-cpu buffer, it must be the same task.
5837 */
5838 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5839 goto out;
5840
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005841set:
5842 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005843 /* Can't redirect output if we've got an active mmap() */
5844 if (atomic_read(&event->mmap_count))
5845 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005846
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005847 if (output_event) {
5848 /* get the buffer we want to redirect to */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005849 buffer = perf_buffer_get(output_event);
5850 if (!buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005851 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005852 }
5853
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005854 old_buffer = event->buffer;
5855 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005856 ret = 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005857unlock:
5858 mutex_unlock(&event->mmap_mutex);
5859
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005860 if (old_buffer)
5861 perf_buffer_put(old_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005862out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005863 return ret;
5864}
5865
5866/**
5867 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5868 *
5869 * @attr_uptr: event_id type attributes for monitoring/sampling
5870 * @pid: target pid
5871 * @cpu: target cpu
5872 * @group_fd: group leader event fd
5873 */
5874SYSCALL_DEFINE5(perf_event_open,
5875 struct perf_event_attr __user *, attr_uptr,
5876 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5877{
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005878 struct perf_event *group_leader = NULL, *output_event = NULL;
5879 struct perf_event *event, *sibling;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005880 struct perf_event_attr attr;
5881 struct perf_event_context *ctx;
5882 struct file *event_file = NULL;
5883 struct file *group_file = NULL;
Matt Helsley38a81da2010-09-13 13:01:20 -07005884 struct task_struct *task = NULL;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005885 struct pmu *pmu;
Al Viroea635c62010-05-26 17:40:29 -04005886 int event_fd;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005887 int move_group = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005888 int fput_needed = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005889 int err;
5890
5891 /* for future expandability... */
5892 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5893 return -EINVAL;
5894
5895 err = perf_copy_attr(attr_uptr, &attr);
5896 if (err)
5897 return err;
5898
5899 if (!attr.exclude_kernel) {
5900 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5901 return -EACCES;
5902 }
5903
5904 if (attr.freq) {
5905 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5906 return -EINVAL;
5907 }
5908
Al Viroea635c62010-05-26 17:40:29 -04005909 event_fd = get_unused_fd_flags(O_RDWR);
5910 if (event_fd < 0)
5911 return event_fd;
5912
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005913 if (group_fd != -1) {
5914 group_leader = perf_fget_light(group_fd, &fput_needed);
5915 if (IS_ERR(group_leader)) {
5916 err = PTR_ERR(group_leader);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005917 goto err_fd;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005918 }
5919 group_file = group_leader->filp;
5920 if (flags & PERF_FLAG_FD_OUTPUT)
5921 output_event = group_leader;
5922 if (flags & PERF_FLAG_FD_NO_GROUP)
5923 group_leader = NULL;
5924 }
5925
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005926 if (pid != -1) {
5927 task = find_lively_task_by_vpid(pid);
5928 if (IS_ERR(task)) {
5929 err = PTR_ERR(task);
5930 goto err_group_fd;
5931 }
5932 }
5933
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005934 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005935 if (IS_ERR(event)) {
5936 err = PTR_ERR(event);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005937 goto err_task;
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005938 }
5939
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005940 /*
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005941 * Special case software events and allow them to be part of
5942 * any hardware group.
5943 */
5944 pmu = event->pmu;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005945
5946 if (group_leader &&
5947 (is_software_event(event) != is_software_event(group_leader))) {
5948 if (is_software_event(event)) {
5949 /*
5950 * If event and group_leader are not both a software
5951 * event, and event is, then group leader is not.
5952 *
5953 * Allow the addition of software events to !software
5954 * groups, this is safe because software events never
5955 * fail to schedule.
5956 */
5957 pmu = group_leader->pmu;
5958 } else if (is_software_event(group_leader) &&
5959 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5960 /*
5961 * In case the group is a pure software group, and we
5962 * try to add a hardware event, move the whole group to
5963 * the hardware context.
5964 */
5965 move_group = 1;
5966 }
5967 }
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005968
5969 /*
5970 * Get the target context (task or percpu):
5971 */
Matt Helsley38a81da2010-09-13 13:01:20 -07005972 ctx = find_get_context(pmu, task, cpu);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005973 if (IS_ERR(ctx)) {
5974 err = PTR_ERR(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005975 goto err_alloc;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005976 }
5977
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005978 /*
5979 * Look up the group leader (we will attach this event to it):
5980 */
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005981 if (group_leader) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005982 err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005983
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005984 /*
5985 * Do not allow a recursive hierarchy (this new sibling
5986 * becoming part of another group-sibling):
5987 */
5988 if (group_leader->group_leader != group_leader)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005989 goto err_context;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005990 /*
5991 * Do not allow to attach to a group in a different
5992 * task or CPU context:
5993 */
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005994 if (move_group) {
5995 if (group_leader->ctx->type != ctx->type)
5996 goto err_context;
5997 } else {
5998 if (group_leader->ctx != ctx)
5999 goto err_context;
6000 }
6001
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006002 /*
6003 * Only a group leader can be exclusive or pinned
6004 */
6005 if (attr.exclusive || attr.pinned)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006006 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02006007 }
6008
6009 if (output_event) {
6010 err = perf_event_set_output(event, output_event);
6011 if (err)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006012 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02006013 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006014
Al Viroea635c62010-05-26 17:40:29 -04006015 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6016 if (IS_ERR(event_file)) {
6017 err = PTR_ERR(event_file);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006018 goto err_context;
Al Viroea635c62010-05-26 17:40:29 -04006019 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006020
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006021 if (move_group) {
6022 struct perf_event_context *gctx = group_leader->ctx;
6023
6024 mutex_lock(&gctx->mutex);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006025 perf_remove_from_context(group_leader);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006026 list_for_each_entry(sibling, &group_leader->sibling_list,
6027 group_entry) {
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006028 perf_remove_from_context(sibling);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006029 put_ctx(gctx);
6030 }
6031 mutex_unlock(&gctx->mutex);
6032 put_ctx(gctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006033 }
6034
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006035 event->filp = event_file;
6036 WARN_ON_ONCE(ctx->parent_ctx);
6037 mutex_lock(&ctx->mutex);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006038
6039 if (move_group) {
6040 perf_install_in_context(ctx, group_leader, cpu);
6041 get_ctx(ctx);
6042 list_for_each_entry(sibling, &group_leader->sibling_list,
6043 group_entry) {
6044 perf_install_in_context(ctx, sibling, cpu);
6045 get_ctx(ctx);
6046 }
6047 }
6048
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006049 perf_install_in_context(ctx, event, cpu);
6050 ++ctx->generation;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006051 perf_unpin_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006052 mutex_unlock(&ctx->mutex);
6053
6054 event->owner = current;
Peter Zijlstra88821352010-11-09 19:01:43 +01006055
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006056 mutex_lock(&current->perf_event_mutex);
6057 list_add_tail(&event->owner_entry, &current->perf_event_list);
6058 mutex_unlock(&current->perf_event_mutex);
6059
Peter Zijlstra8a495422010-05-27 15:47:49 +02006060 /*
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006061 * Precalculate sample_data sizes
6062 */
6063 perf_event__header_size(event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02006064 perf_event__id_header_size(event);
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006065
6066 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +02006067 * Drop the reference on the group_event after placing the
6068 * new event on the sibling_list. This ensures destruction
6069 * of the group leader will find the pointer to itself in
6070 * perf_group_detach().
6071 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006072 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04006073 fd_install(event_fd, event_file);
6074 return event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006075
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006076err_context:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006077 perf_unpin_context(ctx);
Al Viroea635c62010-05-26 17:40:29 -04006078 put_ctx(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02006079err_alloc:
6080 free_event(event);
Peter Zijlstrae7d0bc02010-10-14 16:54:51 +02006081err_task:
6082 if (task)
6083 put_task_struct(task);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02006084err_group_fd:
6085 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04006086err_fd:
6087 put_unused_fd(event_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006088 return err;
6089}
6090
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006091/**
6092 * perf_event_create_kernel_counter
6093 *
6094 * @attr: attributes of the counter to create
6095 * @cpu: cpu in which the counter is bound
Matt Helsley38a81da2010-09-13 13:01:20 -07006096 * @task: task to profile (NULL for percpu)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006097 */
6098struct perf_event *
6099perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Matt Helsley38a81da2010-09-13 13:01:20 -07006100 struct task_struct *task,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01006101 perf_overflow_handler_t overflow_handler)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006102{
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006103 struct perf_event_context *ctx;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006104 struct perf_event *event;
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006105 int err;
6106
6107 /*
6108 * Get the target context (task or percpu):
6109 */
6110
Peter Zijlstrad580ff82010-10-14 17:43:23 +02006111 event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006112 if (IS_ERR(event)) {
6113 err = PTR_ERR(event);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006114 goto err;
6115 }
6116
Matt Helsley38a81da2010-09-13 13:01:20 -07006117 ctx = find_get_context(event->pmu, task, cpu);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006118 if (IS_ERR(ctx)) {
6119 err = PTR_ERR(ctx);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006120 goto err_free;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006121 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006122
6123 event->filp = NULL;
6124 WARN_ON_ONCE(ctx->parent_ctx);
6125 mutex_lock(&ctx->mutex);
6126 perf_install_in_context(ctx, event, cpu);
6127 ++ctx->generation;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006128 perf_unpin_context(ctx);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006129 mutex_unlock(&ctx->mutex);
6130
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006131 return event;
6132
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006133err_free:
6134 free_event(event);
6135err:
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006136 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006137}
6138EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6139
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006140static void sync_child_event(struct perf_event *child_event,
6141 struct task_struct *child)
6142{
6143 struct perf_event *parent_event = child_event->parent;
6144 u64 child_val;
6145
6146 if (child_event->attr.inherit_stat)
6147 perf_event_read_event(child_event, child);
6148
Peter Zijlstrab5e58792010-05-21 14:43:12 +02006149 child_val = perf_event_count(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006150
6151 /*
6152 * Add back the child's count to the parent's count:
6153 */
Peter Zijlstraa6e6dea2010-05-21 14:27:58 +02006154 atomic64_add(child_val, &parent_event->child_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006155 atomic64_add(child_event->total_time_enabled,
6156 &parent_event->child_total_time_enabled);
6157 atomic64_add(child_event->total_time_running,
6158 &parent_event->child_total_time_running);
6159
6160 /*
6161 * Remove this event from the parent's list
6162 */
6163 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6164 mutex_lock(&parent_event->child_mutex);
6165 list_del_init(&child_event->child_list);
6166 mutex_unlock(&parent_event->child_mutex);
6167
6168 /*
6169 * Release the parent event, if this was the last
6170 * reference to it.
6171 */
6172 fput(parent_event->filp);
6173}
6174
6175static void
6176__perf_event_exit_task(struct perf_event *child_event,
6177 struct perf_event_context *child_ctx,
6178 struct task_struct *child)
6179{
6180 struct perf_event *parent_event;
6181
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006182 perf_remove_from_context(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006183
6184 parent_event = child_event->parent;
6185 /*
6186 * It can happen that parent exits first, and has events
6187 * that are still around due to the child reference. These
6188 * events need to be zapped - but otherwise linger.
6189 */
6190 if (parent_event) {
6191 sync_child_event(child_event, child);
6192 free_event(child_event);
6193 }
6194}
6195
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006196static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006197{
6198 struct perf_event *child_event, *tmp;
6199 struct perf_event_context *child_ctx;
6200 unsigned long flags;
6201
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006202 if (likely(!child->perf_event_ctxp[ctxn])) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006203 perf_event_task(child, NULL, 0);
6204 return;
6205 }
6206
6207 local_irq_save(flags);
6208 /*
6209 * We can't reschedule here because interrupts are disabled,
6210 * and either child is current or it is a task that can't be
6211 * scheduled, so we are now safe from rescheduling changing
6212 * our context.
6213 */
Oleg Nesterov806839b2011-01-21 18:45:47 +01006214 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02006215 task_ctx_sched_out(child_ctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006216
6217 /*
6218 * Take the context lock here so that if find_get_context is
6219 * reading child->perf_event_ctxp, we wait until it has
6220 * incremented the context's refcount before we do put_ctx below.
6221 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01006222 raw_spin_lock(&child_ctx->lock);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006223 child->perf_event_ctxp[ctxn] = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006224 /*
6225 * If this context is a clone; unclone it so it can't get
6226 * swapped to another process while we're removing all
6227 * the events from it.
6228 */
6229 unclone_ctx(child_ctx);
Peter Zijlstra5e942bb2009-11-23 11:37:26 +01006230 update_context_time(child_ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01006231 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006232
6233 /*
6234 * Report the task dead after unscheduling the events so that we
6235 * won't get any samples after PERF_RECORD_EXIT. We can however still
6236 * get a few PERF_RECORD_READ events.
6237 */
6238 perf_event_task(child, child_ctx, 0);
6239
6240 /*
6241 * We can recurse on the same lock type through:
6242 *
6243 * __perf_event_exit_task()
6244 * sync_child_event()
6245 * fput(parent_event->filp)
6246 * perf_release()
6247 * mutex_lock(&ctx->mutex)
6248 *
6249 * But since its the parent context it won't be the same instance.
6250 */
Peter Zijlstraa0507c82010-05-06 15:42:53 +02006251 mutex_lock(&child_ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006252
6253again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006254 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6255 group_entry)
6256 __perf_event_exit_task(child_event, child_ctx, child);
6257
6258 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006259 group_entry)
6260 __perf_event_exit_task(child_event, child_ctx, child);
6261
6262 /*
6263 * If the last event was a group event, it will have appended all
6264 * its siblings to the list, but we obtained 'tmp' before that which
6265 * will still point to the list head terminating the iteration.
6266 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006267 if (!list_empty(&child_ctx->pinned_groups) ||
6268 !list_empty(&child_ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006269 goto again;
6270
6271 mutex_unlock(&child_ctx->mutex);
6272
6273 put_ctx(child_ctx);
6274}
6275
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006276/*
6277 * When a child task exits, feed back event values to parent events.
6278 */
6279void perf_event_exit_task(struct task_struct *child)
6280{
Peter Zijlstra88821352010-11-09 19:01:43 +01006281 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006282 int ctxn;
6283
Peter Zijlstra88821352010-11-09 19:01:43 +01006284 mutex_lock(&child->perf_event_mutex);
6285 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6286 owner_entry) {
6287 list_del_init(&event->owner_entry);
6288
6289 /*
6290 * Ensure the list deletion is visible before we clear
6291 * the owner, closes a race against perf_release() where
6292 * we need to serialize on the owner->perf_event_mutex.
6293 */
6294 smp_wmb();
6295 event->owner = NULL;
6296 }
6297 mutex_unlock(&child->perf_event_mutex);
6298
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006299 for_each_task_context_nr(ctxn)
6300 perf_event_exit_task_context(child, ctxn);
6301}
6302
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006303static void perf_free_event(struct perf_event *event,
6304 struct perf_event_context *ctx)
6305{
6306 struct perf_event *parent = event->parent;
6307
6308 if (WARN_ON_ONCE(!parent))
6309 return;
6310
6311 mutex_lock(&parent->child_mutex);
6312 list_del_init(&event->child_list);
6313 mutex_unlock(&parent->child_mutex);
6314
6315 fput(parent->filp);
6316
Peter Zijlstra8a495422010-05-27 15:47:49 +02006317 perf_group_detach(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006318 list_del_event(event, ctx);
6319 free_event(event);
6320}
6321
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006322/*
6323 * free an unexposed, unused context as created by inheritance by
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006324 * perf_event_init_task below, used by fork() in case of fail.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006325 */
6326void perf_event_free_task(struct task_struct *task)
6327{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006328 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006329 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006330 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006331
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006332 for_each_task_context_nr(ctxn) {
6333 ctx = task->perf_event_ctxp[ctxn];
6334 if (!ctx)
6335 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006336
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006337 mutex_lock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006338again:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006339 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6340 group_entry)
6341 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006342
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006343 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6344 group_entry)
6345 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006346
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006347 if (!list_empty(&ctx->pinned_groups) ||
6348 !list_empty(&ctx->flexible_groups))
6349 goto again;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006350
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006351 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006352
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006353 put_ctx(ctx);
6354 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006355}
6356
Peter Zijlstra4e231c72010-09-09 21:01:59 +02006357void perf_event_delayed_put(struct task_struct *task)
6358{
6359 int ctxn;
6360
6361 for_each_task_context_nr(ctxn)
6362 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6363}
6364
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006365/*
6366 * inherit a event from parent task to child task:
6367 */
6368static struct perf_event *
6369inherit_event(struct perf_event *parent_event,
6370 struct task_struct *parent,
6371 struct perf_event_context *parent_ctx,
6372 struct task_struct *child,
6373 struct perf_event *group_leader,
6374 struct perf_event_context *child_ctx)
6375{
6376 struct perf_event *child_event;
Peter Zijlstracee010e2010-09-10 12:51:54 +02006377 unsigned long flags;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006378
6379 /*
6380 * Instead of creating recursive hierarchies of events,
6381 * we link inherited events back to the original parent,
6382 * which has a filp for sure, which we use as the reference
6383 * count:
6384 */
6385 if (parent_event->parent)
6386 parent_event = parent_event->parent;
6387
6388 child_event = perf_event_alloc(&parent_event->attr,
6389 parent_event->cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02006390 child,
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006391 group_leader, parent_event,
6392 NULL);
6393 if (IS_ERR(child_event))
6394 return child_event;
6395 get_ctx(child_ctx);
6396
6397 /*
6398 * Make the child state follow the state of the parent event,
6399 * not its attr.disabled bit. We hold the parent's mutex,
6400 * so we won't race with perf_event_{en, dis}able_family.
6401 */
6402 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6403 child_event->state = PERF_EVENT_STATE_INACTIVE;
6404 else
6405 child_event->state = PERF_EVENT_STATE_OFF;
6406
6407 if (parent_event->attr.freq) {
6408 u64 sample_period = parent_event->hw.sample_period;
6409 struct hw_perf_event *hwc = &child_event->hw;
6410
6411 hwc->sample_period = sample_period;
6412 hwc->last_period = sample_period;
6413
6414 local64_set(&hwc->period_left, sample_period);
6415 }
6416
6417 child_event->ctx = child_ctx;
6418 child_event->overflow_handler = parent_event->overflow_handler;
6419
6420 /*
Thomas Gleixner614b6782010-12-03 16:24:32 -02006421 * Precalculate sample_data sizes
6422 */
6423 perf_event__header_size(child_event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02006424 perf_event__id_header_size(child_event);
Thomas Gleixner614b6782010-12-03 16:24:32 -02006425
6426 /*
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006427 * Link it up in the child's context:
6428 */
Peter Zijlstracee010e2010-09-10 12:51:54 +02006429 raw_spin_lock_irqsave(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006430 add_event_to_ctx(child_event, child_ctx);
Peter Zijlstracee010e2010-09-10 12:51:54 +02006431 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006432
6433 /*
6434 * Get a reference to the parent filp - we will fput it
6435 * when the child event exits. This is safe to do because
6436 * we are in the parent and we know that the filp still
6437 * exists and has a nonzero count:
6438 */
6439 atomic_long_inc(&parent_event->filp->f_count);
6440
6441 /*
6442 * Link this into the parent event's child list
6443 */
6444 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6445 mutex_lock(&parent_event->child_mutex);
6446 list_add_tail(&child_event->child_list, &parent_event->child_list);
6447 mutex_unlock(&parent_event->child_mutex);
6448
6449 return child_event;
6450}
6451
6452static int inherit_group(struct perf_event *parent_event,
6453 struct task_struct *parent,
6454 struct perf_event_context *parent_ctx,
6455 struct task_struct *child,
6456 struct perf_event_context *child_ctx)
6457{
6458 struct perf_event *leader;
6459 struct perf_event *sub;
6460 struct perf_event *child_ctr;
6461
6462 leader = inherit_event(parent_event, parent, parent_ctx,
6463 child, NULL, child_ctx);
6464 if (IS_ERR(leader))
6465 return PTR_ERR(leader);
6466 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6467 child_ctr = inherit_event(sub, parent, parent_ctx,
6468 child, leader, child_ctx);
6469 if (IS_ERR(child_ctr))
6470 return PTR_ERR(child_ctr);
6471 }
6472 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006473}
6474
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006475static int
6476inherit_task_group(struct perf_event *event, struct task_struct *parent,
6477 struct perf_event_context *parent_ctx,
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006478 struct task_struct *child, int ctxn,
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006479 int *inherited_all)
6480{
6481 int ret;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006482 struct perf_event_context *child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006483
6484 if (!event->attr.inherit) {
6485 *inherited_all = 0;
6486 return 0;
6487 }
6488
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006489 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006490 if (!child_ctx) {
6491 /*
6492 * This is executed from the parent task context, so
6493 * inherit events that have been marked for cloning.
6494 * First allocate and initialize a context for the
6495 * child.
6496 */
6497
Peter Zijlstraeb184472010-09-07 15:55:13 +02006498 child_ctx = alloc_perf_context(event->pmu, child);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006499 if (!child_ctx)
6500 return -ENOMEM;
6501
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006502 child->perf_event_ctxp[ctxn] = child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006503 }
6504
6505 ret = inherit_group(event, parent, parent_ctx,
6506 child, child_ctx);
6507
6508 if (ret)
6509 *inherited_all = 0;
6510
6511 return ret;
6512}
6513
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006514/*
6515 * Initialize the perf_event context in task_struct
6516 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006517int perf_event_init_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006518{
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006519 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006520 struct perf_event_context *cloned_ctx;
6521 struct perf_event *event;
6522 struct task_struct *parent = current;
6523 int inherited_all = 1;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006524 unsigned long flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006525 int ret = 0;
6526
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006527 if (likely(!parent->perf_event_ctxp[ctxn]))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006528 return 0;
6529
6530 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006531 * If the parent's context is a clone, pin it so it won't get
6532 * swapped under us.
6533 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006534 parent_ctx = perf_pin_task_context(parent, ctxn);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006535
6536 /*
6537 * No need to check if parent_ctx != NULL here; since we saw
6538 * it non-NULL earlier, the only reason for it to become NULL
6539 * is if we exit, and since we're currently in the middle of
6540 * a fork we can't be exiting at the same time.
6541 */
6542
6543 /*
6544 * Lock the parent list. No need to lock the child - not PID
6545 * hashed yet and not running, so nobody can access it.
6546 */
6547 mutex_lock(&parent_ctx->mutex);
6548
6549 /*
6550 * We dont have to disable NMIs - we are only looking at
6551 * the list, not manipulating it:
6552 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006553 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006554 ret = inherit_task_group(event, parent, parent_ctx,
6555 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006556 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006557 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006558 }
6559
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006560 /*
6561 * We can't hold ctx->lock when iterating the ->flexible_group list due
6562 * to allocations, but we need to prevent rotation because
6563 * rotate_ctx() will change the list from interrupt context.
6564 */
6565 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6566 parent_ctx->rotate_disable = 1;
6567 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6568
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006569 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006570 ret = inherit_task_group(event, parent, parent_ctx,
6571 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006572 if (ret)
6573 break;
6574 }
6575
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006576 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6577 parent_ctx->rotate_disable = 0;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006578
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006579 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006580
Peter Zijlstra05cbaa22009-12-30 16:00:35 +01006581 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006582 /*
6583 * Mark the child context as a clone of the parent
6584 * context, or of whatever the parent is a clone of.
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006585 *
6586 * Note that if the parent is a clone, the holding of
6587 * parent_ctx->lock avoids it from being uncloned.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006588 */
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006589 cloned_ctx = parent_ctx->parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006590 if (cloned_ctx) {
6591 child_ctx->parent_ctx = cloned_ctx;
6592 child_ctx->parent_gen = parent_ctx->parent_gen;
6593 } else {
6594 child_ctx->parent_ctx = parent_ctx;
6595 child_ctx->parent_gen = parent_ctx->generation;
6596 }
6597 get_ctx(child_ctx->parent_ctx);
6598 }
6599
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006600 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006601 mutex_unlock(&parent_ctx->mutex);
6602
6603 perf_unpin_context(parent_ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006604 put_ctx(parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006605
6606 return ret;
6607}
6608
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006609/*
6610 * Initialize the perf_event context in task_struct
6611 */
6612int perf_event_init_task(struct task_struct *child)
6613{
6614 int ctxn, ret;
6615
Oleg Nesterov8550d7c2011-01-19 19:22:28 +01006616 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
6617 mutex_init(&child->perf_event_mutex);
6618 INIT_LIST_HEAD(&child->perf_event_list);
6619
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006620 for_each_task_context_nr(ctxn) {
6621 ret = perf_event_init_context(child, ctxn);
6622 if (ret)
6623 return ret;
6624 }
6625
6626 return 0;
6627}
6628
Paul Mackerras220b1402010-03-10 20:45:52 +11006629static void __init perf_event_init_all_cpus(void)
6630{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006631 struct swevent_htable *swhash;
Paul Mackerras220b1402010-03-10 20:45:52 +11006632 int cpu;
Paul Mackerras220b1402010-03-10 20:45:52 +11006633
6634 for_each_possible_cpu(cpu) {
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006635 swhash = &per_cpu(swevent_htable, cpu);
6636 mutex_init(&swhash->hlist_mutex);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006637 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
Paul Mackerras220b1402010-03-10 20:45:52 +11006638 }
6639}
6640
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006641static void __cpuinit perf_event_init_cpu(int cpu)
6642{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006643 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006644
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006645 mutex_lock(&swhash->hlist_mutex);
6646 if (swhash->hlist_refcount > 0) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006647 struct swevent_hlist *hlist;
6648
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006649 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6650 WARN_ON(!hlist);
6651 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006652 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006653 mutex_unlock(&swhash->hlist_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006654}
6655
Peter Zijlstrac2774432010-12-08 15:29:02 +01006656#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006657static void perf_pmu_rotate_stop(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006658{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006659 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6660
6661 WARN_ON(!irqs_disabled());
6662
6663 list_del_init(&cpuctx->rotation_list);
6664}
6665
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006666static void __perf_event_exit_context(void *__info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006667{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006668 struct perf_event_context *ctx = __info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006669 struct perf_event *event, *tmp;
6670
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006671 perf_pmu_rotate_stop(ctx->pmu);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02006672
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006673 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006674 __perf_remove_from_context(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006675 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006676 __perf_remove_from_context(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006677}
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006678
6679static void perf_event_exit_cpu_context(int cpu)
6680{
6681 struct perf_event_context *ctx;
6682 struct pmu *pmu;
6683 int idx;
6684
6685 idx = srcu_read_lock(&pmus_srcu);
6686 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra917bdd12010-09-17 11:28:49 +02006687 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006688
6689 mutex_lock(&ctx->mutex);
6690 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6691 mutex_unlock(&ctx->mutex);
6692 }
6693 srcu_read_unlock(&pmus_srcu, idx);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006694}
6695
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006696static void perf_event_exit_cpu(int cpu)
6697{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006698 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006699
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006700 mutex_lock(&swhash->hlist_mutex);
6701 swevent_hlist_release(swhash);
6702 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006703
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006704 perf_event_exit_cpu_context(cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006705}
6706#else
6707static inline void perf_event_exit_cpu(int cpu) { }
6708#endif
6709
Peter Zijlstrac2774432010-12-08 15:29:02 +01006710static int
6711perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
6712{
6713 int cpu;
6714
6715 for_each_online_cpu(cpu)
6716 perf_event_exit_cpu(cpu);
6717
6718 return NOTIFY_OK;
6719}
6720
6721/*
6722 * Run the perf reboot notifier at the very last possible moment so that
6723 * the generic watchdog code runs as long as possible.
6724 */
6725static struct notifier_block perf_reboot_notifier = {
6726 .notifier_call = perf_reboot,
6727 .priority = INT_MIN,
6728};
6729
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006730static int __cpuinit
6731perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6732{
6733 unsigned int cpu = (long)hcpu;
6734
Peter Zijlstra5e116372010-06-11 13:35:08 +02006735 switch (action & ~CPU_TASKS_FROZEN) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006736
6737 case CPU_UP_PREPARE:
Peter Zijlstra5e116372010-06-11 13:35:08 +02006738 case CPU_DOWN_FAILED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006739 perf_event_init_cpu(cpu);
6740 break;
6741
Peter Zijlstra5e116372010-06-11 13:35:08 +02006742 case CPU_UP_CANCELED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006743 case CPU_DOWN_PREPARE:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006744 perf_event_exit_cpu(cpu);
6745 break;
6746
6747 default:
6748 break;
6749 }
6750
6751 return NOTIFY_OK;
6752}
6753
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006754void __init perf_event_init(void)
6755{
Jason Wessel3c502e72010-11-04 17:33:01 -05006756 int ret;
6757
Peter Zijlstra2e80a822010-11-17 23:17:36 +01006758 idr_init(&pmu_idr);
6759
Paul Mackerras220b1402010-03-10 20:45:52 +11006760 perf_event_init_all_cpus();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02006761 init_srcu_struct(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01006762 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
6763 perf_pmu_register(&perf_cpu_clock, NULL, -1);
6764 perf_pmu_register(&perf_task_clock, NULL, -1);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02006765 perf_tp_register();
6766 perf_cpu_notifier(perf_cpu_notify);
Peter Zijlstrac2774432010-12-08 15:29:02 +01006767 register_reboot_notifier(&perf_reboot_notifier);
Jason Wessel3c502e72010-11-04 17:33:01 -05006768
6769 ret = init_hw_breakpoint();
6770 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006771}
Peter Zijlstraabe43402010-11-17 23:17:37 +01006772
6773static int __init perf_event_sysfs_init(void)
6774{
6775 struct pmu *pmu;
6776 int ret;
6777
6778 mutex_lock(&pmus_lock);
6779
6780 ret = bus_register(&pmu_bus);
6781 if (ret)
6782 goto unlock;
6783
6784 list_for_each_entry(pmu, &pmus, entry) {
6785 if (!pmu->name || pmu->type < 0)
6786 continue;
6787
6788 ret = pmu_dev_alloc(pmu);
6789 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
6790 }
6791 pmu_bus_running = 1;
6792 ret = 0;
6793
6794unlock:
6795 mutex_unlock(&pmus_lock);
6796
6797 return ret;
6798}
6799device_initcall(perf_event_sysfs_init);