blob: 999835b6112bc0705e6a511a63889a89c920b9d7 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
Peter Zijlstra2e80a822010-11-17 23:17:36 +010016#include <linux/idr.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020017#include <linux/file.h>
18#include <linux/poll.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020020#include <linux/hash.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020021#include <linux/sysfs.h>
22#include <linux/dcache.h>
23#include <linux/percpu.h>
24#include <linux/ptrace.h>
Peter Zijlstrac2774432010-12-08 15:29:02 +010025#include <linux/reboot.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020026#include <linux/vmstat.h>
Peter Zijlstraabe43402010-11-17 23:17:37 +010027#include <linux/device.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020028#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020029#include <linux/hardirq.h>
30#include <linux/rculist.h>
31#include <linux/uaccess.h>
32#include <linux/syscalls.h>
33#include <linux/anon_inodes.h>
34#include <linux/kernel_stat.h>
35#include <linux/perf_event.h>
Li Zefan6fb29152009-10-15 11:21:42 +080036#include <linux/ftrace_event.h>
Jason Wessel3c502e72010-11-04 17:33:01 -050037#include <linux/hw_breakpoint.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020038
39#include <asm/irq_regs.h>
40
Stephane Eranian0b3fcf12011-01-03 18:20:01 +020041enum event_type_t {
42 EVENT_FLEXIBLE = 0x1,
43 EVENT_PINNED = 0x2,
44 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
45};
46
Peter Zijlstra82cd6de2010-10-14 17:57:23 +020047atomic_t perf_task_events __read_mostly;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020048static atomic_t nr_mmap_events __read_mostly;
49static atomic_t nr_comm_events __read_mostly;
50static atomic_t nr_task_events __read_mostly;
51
Peter Zijlstra108b02c2010-09-06 14:32:03 +020052static LIST_HEAD(pmus);
53static DEFINE_MUTEX(pmus_lock);
54static struct srcu_struct pmus_srcu;
55
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056/*
57 * perf event paranoia level:
58 * -1 - not paranoid at all
59 * 0 - disallow raw tracepoint access for unpriv
60 * 1 - disallow cpu events for unpriv
61 * 2 - disallow kernel profiling for unpriv
62 */
63int sysctl_perf_event_paranoid __read_mostly = 1;
64
Ingo Molnarcdd6c482009-09-21 12:02:48 +020065int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
66
67/*
68 * max perf event sample rate
69 */
70int sysctl_perf_event_sample_rate __read_mostly = 100000;
71
72static atomic64_t perf_event_id;
73
Stephane Eranian0b3fcf12011-01-03 18:20:01 +020074static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
75 enum event_type_t event_type);
76
77static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
78 enum event_type_t event_type);
79
Ingo Molnarcdd6c482009-09-21 12:02:48 +020080void __weak perf_event_print_debug(void) { }
81
Matt Fleming84c79912010-10-03 21:41:13 +010082extern __weak const char *perf_pmu_name(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020083{
Matt Fleming84c79912010-10-03 21:41:13 +010084 return "pmu";
Ingo Molnarcdd6c482009-09-21 12:02:48 +020085}
86
Stephane Eranian0b3fcf12011-01-03 18:20:01 +020087static inline u64 perf_clock(void)
88{
89 return local_clock();
90}
91
Peter Zijlstra33696fc2010-06-14 08:49:00 +020092void perf_pmu_disable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020093{
Peter Zijlstra33696fc2010-06-14 08:49:00 +020094 int *count = this_cpu_ptr(pmu->pmu_disable_count);
95 if (!(*count)++)
96 pmu->pmu_disable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020097}
98
Peter Zijlstra33696fc2010-06-14 08:49:00 +020099void perf_pmu_enable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200100{
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200101 int *count = this_cpu_ptr(pmu->pmu_disable_count);
102 if (!--(*count))
103 pmu->pmu_enable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200104}
105
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200106static DEFINE_PER_CPU(struct list_head, rotation_list);
107
108/*
109 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
110 * because they're strictly cpu affine and rotate_start is called with IRQs
111 * disabled, while rotate_context is called from IRQ context.
112 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200113static void perf_pmu_rotate_start(struct pmu *pmu)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200114{
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200115 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200116 struct list_head *head = &__get_cpu_var(rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200117
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200118 WARN_ON(!irqs_disabled());
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200119
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200120 if (list_empty(&cpuctx->rotation_list))
121 list_add(&cpuctx->rotation_list, head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200122}
123
124static void get_ctx(struct perf_event_context *ctx)
125{
126 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
127}
128
129static void free_ctx(struct rcu_head *head)
130{
131 struct perf_event_context *ctx;
132
133 ctx = container_of(head, struct perf_event_context, rcu_head);
134 kfree(ctx);
135}
136
137static void put_ctx(struct perf_event_context *ctx)
138{
139 if (atomic_dec_and_test(&ctx->refcount)) {
140 if (ctx->parent_ctx)
141 put_ctx(ctx->parent_ctx);
142 if (ctx->task)
143 put_task_struct(ctx->task);
144 call_rcu(&ctx->rcu_head, free_ctx);
145 }
146}
147
148static void unclone_ctx(struct perf_event_context *ctx)
149{
150 if (ctx->parent_ctx) {
151 put_ctx(ctx->parent_ctx);
152 ctx->parent_ctx = NULL;
153 }
154}
155
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200156static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
157{
158 /*
159 * only top level events have the pid namespace they were created in
160 */
161 if (event->parent)
162 event = event->parent;
163
164 return task_tgid_nr_ns(p, event->ns);
165}
166
167static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
168{
169 /*
170 * only top level events have the pid namespace they were created in
171 */
172 if (event->parent)
173 event = event->parent;
174
175 return task_pid_nr_ns(p, event->ns);
176}
177
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200178/*
179 * If we inherit events we want to return the parent event id
180 * to userspace.
181 */
182static u64 primary_event_id(struct perf_event *event)
183{
184 u64 id = event->id;
185
186 if (event->parent)
187 id = event->parent->id;
188
189 return id;
190}
191
192/*
193 * Get the perf_event_context for a task and lock it.
194 * This has to cope with with the fact that until it is locked,
195 * the context could get moved to another task.
196 */
197static struct perf_event_context *
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200198perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200199{
200 struct perf_event_context *ctx;
201
202 rcu_read_lock();
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200203retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200204 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200205 if (ctx) {
206 /*
207 * If this context is a clone of another, it might
208 * get swapped for another underneath us by
209 * perf_event_task_sched_out, though the
210 * rcu_read_lock() protects us from any context
211 * getting freed. Lock the context and check if it
212 * got swapped before we could get the lock, and retry
213 * if so. If we locked the right context, then it
214 * can't get swapped on us any more.
215 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100216 raw_spin_lock_irqsave(&ctx->lock, *flags);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200217 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100218 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200219 goto retry;
220 }
221
222 if (!atomic_inc_not_zero(&ctx->refcount)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100223 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200224 ctx = NULL;
225 }
226 }
227 rcu_read_unlock();
228 return ctx;
229}
230
231/*
232 * Get the context for a task and increment its pin_count so it
233 * can't get swapped to another task. This also increments its
234 * reference count so that the context can't get freed.
235 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200236static struct perf_event_context *
237perf_pin_task_context(struct task_struct *task, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200238{
239 struct perf_event_context *ctx;
240 unsigned long flags;
241
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200242 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200243 if (ctx) {
244 ++ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100245 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200246 }
247 return ctx;
248}
249
250static void perf_unpin_context(struct perf_event_context *ctx)
251{
252 unsigned long flags;
253
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100254 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200255 --ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100256 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200257 put_ctx(ctx);
258}
259
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100260/*
261 * Update the record of the current time in a context.
262 */
263static void update_context_time(struct perf_event_context *ctx)
264{
265 u64 now = perf_clock();
266
267 ctx->time += now - ctx->timestamp;
268 ctx->timestamp = now;
269}
270
Stephane Eranian41587552011-01-03 18:20:01 +0200271static u64 perf_event_time(struct perf_event *event)
272{
273 struct perf_event_context *ctx = event->ctx;
274 return ctx ? ctx->time : 0;
275}
276
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100277/*
278 * Update the total_time_enabled and total_time_running fields for a event.
279 */
280static void update_event_times(struct perf_event *event)
281{
282 struct perf_event_context *ctx = event->ctx;
283 u64 run_end;
284
285 if (event->state < PERF_EVENT_STATE_INACTIVE ||
286 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
287 return;
288
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100289 if (ctx->is_active)
Stephane Eranian41587552011-01-03 18:20:01 +0200290 run_end = perf_event_time(event);
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100291 else
292 run_end = event->tstamp_stopped;
293
294 event->total_time_enabled = run_end - event->tstamp_enabled;
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100295
296 if (event->state == PERF_EVENT_STATE_INACTIVE)
297 run_end = event->tstamp_stopped;
298 else
Stephane Eranian41587552011-01-03 18:20:01 +0200299 run_end = perf_event_time(event);
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100300
301 event->total_time_running = run_end - event->tstamp_running;
302}
303
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200304/*
305 * Update total_time_enabled and total_time_running for all events in a group.
306 */
307static void update_group_times(struct perf_event *leader)
308{
309 struct perf_event *event;
310
311 update_event_times(leader);
312 list_for_each_entry(event, &leader->sibling_list, group_entry)
313 update_event_times(event);
314}
315
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100316static struct list_head *
317ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
318{
319 if (event->attr.pinned)
320 return &ctx->pinned_groups;
321 else
322 return &ctx->flexible_groups;
323}
324
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200325/*
326 * Add a event from the lists for its context.
327 * Must be called with ctx->mutex and ctx->lock held.
328 */
329static void
330list_add_event(struct perf_event *event, struct perf_event_context *ctx)
331{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200332 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
333 event->attach_state |= PERF_ATTACH_CONTEXT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200334
335 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +0200336 * If we're a stand alone event or group leader, we go to the context
337 * list, group events are kept attached to the group so that
338 * perf_group_detach can, at all times, locate all siblings.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200339 */
Peter Zijlstra8a495422010-05-27 15:47:49 +0200340 if (event->group_leader == event) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100341 struct list_head *list;
342
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100343 if (is_software_event(event))
344 event->group_flags |= PERF_GROUP_SOFTWARE;
345
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100346 list = ctx_group_list(event, ctx);
347 list_add_tail(&event->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200348 }
349
350 list_add_rcu(&event->event_entry, &ctx->event_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200351 if (!ctx->nr_events)
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200352 perf_pmu_rotate_start(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200353 ctx->nr_events++;
354 if (event->attr.inherit_stat)
355 ctx->nr_stat++;
356}
357
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200358/*
359 * Called at perf_event creation and when events are attached/detached from a
360 * group.
361 */
362static void perf_event__read_size(struct perf_event *event)
363{
364 int entry = sizeof(u64); /* value */
365 int size = 0;
366 int nr = 1;
367
368 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
369 size += sizeof(u64);
370
371 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
372 size += sizeof(u64);
373
374 if (event->attr.read_format & PERF_FORMAT_ID)
375 entry += sizeof(u64);
376
377 if (event->attr.read_format & PERF_FORMAT_GROUP) {
378 nr += event->group_leader->nr_siblings;
379 size += sizeof(u64);
380 }
381
382 size += entry * nr;
383 event->read_size = size;
384}
385
386static void perf_event__header_size(struct perf_event *event)
387{
388 struct perf_sample_data *data;
389 u64 sample_type = event->attr.sample_type;
390 u16 size = 0;
391
392 perf_event__read_size(event);
393
394 if (sample_type & PERF_SAMPLE_IP)
395 size += sizeof(data->ip);
396
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200397 if (sample_type & PERF_SAMPLE_ADDR)
398 size += sizeof(data->addr);
399
400 if (sample_type & PERF_SAMPLE_PERIOD)
401 size += sizeof(data->period);
402
403 if (sample_type & PERF_SAMPLE_READ)
404 size += event->read_size;
405
406 event->header_size = size;
407}
408
409static void perf_event__id_header_size(struct perf_event *event)
410{
411 struct perf_sample_data *data;
412 u64 sample_type = event->attr.sample_type;
413 u16 size = 0;
414
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200415 if (sample_type & PERF_SAMPLE_TID)
416 size += sizeof(data->tid_entry);
417
418 if (sample_type & PERF_SAMPLE_TIME)
419 size += sizeof(data->time);
420
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200421 if (sample_type & PERF_SAMPLE_ID)
422 size += sizeof(data->id);
423
424 if (sample_type & PERF_SAMPLE_STREAM_ID)
425 size += sizeof(data->stream_id);
426
427 if (sample_type & PERF_SAMPLE_CPU)
428 size += sizeof(data->cpu_entry);
429
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200430 event->id_header_size = size;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200431}
432
Peter Zijlstra8a495422010-05-27 15:47:49 +0200433static void perf_group_attach(struct perf_event *event)
434{
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200435 struct perf_event *group_leader = event->group_leader, *pos;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200436
Peter Zijlstra74c33372010-10-15 11:40:29 +0200437 /*
438 * We can have double attach due to group movement in perf_event_open.
439 */
440 if (event->attach_state & PERF_ATTACH_GROUP)
441 return;
442
Peter Zijlstra8a495422010-05-27 15:47:49 +0200443 event->attach_state |= PERF_ATTACH_GROUP;
444
445 if (group_leader == event)
446 return;
447
448 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
449 !is_software_event(event))
450 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
451
452 list_add_tail(&event->group_entry, &group_leader->sibling_list);
453 group_leader->nr_siblings++;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200454
455 perf_event__header_size(group_leader);
456
457 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
458 perf_event__header_size(pos);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200459}
460
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200461/*
462 * Remove a event from the lists for its context.
463 * Must be called with ctx->mutex and ctx->lock held.
464 */
465static void
466list_del_event(struct perf_event *event, struct perf_event_context *ctx)
467{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200468 /*
469 * We can have double detach due to exit/hot-unplug + close.
470 */
471 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200472 return;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200473
474 event->attach_state &= ~PERF_ATTACH_CONTEXT;
475
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200476 ctx->nr_events--;
477 if (event->attr.inherit_stat)
478 ctx->nr_stat--;
479
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200480 list_del_rcu(&event->event_entry);
481
Peter Zijlstra8a495422010-05-27 15:47:49 +0200482 if (event->group_leader == event)
483 list_del_init(&event->group_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200484
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200485 update_group_times(event);
Stephane Eranianb2e74a22009-11-26 09:24:30 -0800486
487 /*
488 * If event was in error state, then keep it
489 * that way, otherwise bogus counts will be
490 * returned on read(). The only way to get out
491 * of error state is by explicit re-enabling
492 * of the event
493 */
494 if (event->state > PERF_EVENT_STATE_OFF)
495 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra050735b2010-05-11 11:51:53 +0200496}
497
Peter Zijlstra8a495422010-05-27 15:47:49 +0200498static void perf_group_detach(struct perf_event *event)
Peter Zijlstra050735b2010-05-11 11:51:53 +0200499{
500 struct perf_event *sibling, *tmp;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200501 struct list_head *list = NULL;
502
503 /*
504 * We can have double detach due to exit/hot-unplug + close.
505 */
506 if (!(event->attach_state & PERF_ATTACH_GROUP))
507 return;
508
509 event->attach_state &= ~PERF_ATTACH_GROUP;
510
511 /*
512 * If this is a sibling, remove it from its group.
513 */
514 if (event->group_leader != event) {
515 list_del_init(&event->group_entry);
516 event->group_leader->nr_siblings--;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200517 goto out;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200518 }
519
520 if (!list_empty(&event->group_entry))
521 list = &event->group_entry;
Peter Zijlstra2e2af502009-11-23 11:37:25 +0100522
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200523 /*
524 * If this was a group event with sibling events then
525 * upgrade the siblings to singleton events by adding them
Peter Zijlstra8a495422010-05-27 15:47:49 +0200526 * to whatever list we are on.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200527 */
528 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Peter Zijlstra8a495422010-05-27 15:47:49 +0200529 if (list)
530 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200531 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100532
533 /* Inherit group flags from the previous leader */
534 sibling->group_flags = event->group_flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200535 }
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200536
537out:
538 perf_event__header_size(event->group_leader);
539
540 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
541 perf_event__header_size(tmp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200542}
543
Stephane Eranianfa66f072010-08-26 16:40:01 +0200544static inline int
545event_filter_match(struct perf_event *event)
546{
547 return event->cpu == -1 || event->cpu == smp_processor_id();
548}
549
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200550static void
551event_sched_out(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200552 struct perf_cpu_context *cpuctx,
553 struct perf_event_context *ctx)
554{
Stephane Eranian41587552011-01-03 18:20:01 +0200555 u64 tstamp = perf_event_time(event);
Stephane Eranianfa66f072010-08-26 16:40:01 +0200556 u64 delta;
557 /*
558 * An event which could not be activated because of
559 * filter mismatch still needs to have its timings
560 * maintained, otherwise bogus information is return
561 * via read() for time_enabled, time_running:
562 */
563 if (event->state == PERF_EVENT_STATE_INACTIVE
564 && !event_filter_match(event)) {
565 delta = ctx->time - event->tstamp_stopped;
566 event->tstamp_running += delta;
Stephane Eranian41587552011-01-03 18:20:01 +0200567 event->tstamp_stopped = tstamp;
Stephane Eranianfa66f072010-08-26 16:40:01 +0200568 }
569
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200570 if (event->state != PERF_EVENT_STATE_ACTIVE)
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200571 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200572
573 event->state = PERF_EVENT_STATE_INACTIVE;
574 if (event->pending_disable) {
575 event->pending_disable = 0;
576 event->state = PERF_EVENT_STATE_OFF;
577 }
Stephane Eranian41587552011-01-03 18:20:01 +0200578 event->tstamp_stopped = tstamp;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200579 event->pmu->del(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200580 event->oncpu = -1;
581
582 if (!is_software_event(event))
583 cpuctx->active_oncpu--;
584 ctx->nr_active--;
585 if (event->attr.exclusive || !cpuctx->active_oncpu)
586 cpuctx->exclusive = 0;
587}
588
589static void
590group_sched_out(struct perf_event *group_event,
591 struct perf_cpu_context *cpuctx,
592 struct perf_event_context *ctx)
593{
594 struct perf_event *event;
Stephane Eranianfa66f072010-08-26 16:40:01 +0200595 int state = group_event->state;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200596
597 event_sched_out(group_event, cpuctx, ctx);
598
599 /*
600 * Schedule out siblings (if any):
601 */
602 list_for_each_entry(event, &group_event->sibling_list, group_entry)
603 event_sched_out(event, cpuctx, ctx);
604
Stephane Eranianfa66f072010-08-26 16:40:01 +0200605 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200606 cpuctx->exclusive = 0;
607}
608
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200609static inline struct perf_cpu_context *
610__get_cpu_context(struct perf_event_context *ctx)
611{
612 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
613}
614
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200615/*
616 * Cross CPU call to remove a performance event
617 *
618 * We disable the event on the hardware level first. After that we
619 * remove it from the context list.
620 */
621static void __perf_event_remove_from_context(void *info)
622{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200623 struct perf_event *event = info;
624 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200625 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200626
627 /*
628 * If this is a task context, we need to check whether it is
629 * the current task context of this cpu. If not it has been
630 * scheduled out before the smp call arrived.
631 */
632 if (ctx->task && cpuctx->task_ctx != ctx)
633 return;
634
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100635 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200636
637 event_sched_out(event, cpuctx, ctx);
638
639 list_del_event(event, ctx);
640
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100641 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200642}
643
644
645/*
646 * Remove the event from a task's (or a CPU's) list of events.
647 *
648 * Must be called with ctx->mutex held.
649 *
650 * CPU events are removed with a smp call. For task events we only
651 * call when the task is on a CPU.
652 *
653 * If event->ctx is a cloned context, callers must make sure that
654 * every task struct that event->ctx->task could possibly point to
655 * remains valid. This is OK when called from perf_release since
656 * that only calls us on the top-level context, which can't be a clone.
657 * When called from perf_event_exit_task, it's OK because the
658 * context has been detached from its task.
659 */
660static void perf_event_remove_from_context(struct perf_event *event)
661{
662 struct perf_event_context *ctx = event->ctx;
663 struct task_struct *task = ctx->task;
664
665 if (!task) {
666 /*
667 * Per cpu events are removed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200668 * the removal is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200669 */
670 smp_call_function_single(event->cpu,
671 __perf_event_remove_from_context,
672 event, 1);
673 return;
674 }
675
676retry:
677 task_oncpu_function_call(task, __perf_event_remove_from_context,
678 event);
679
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100680 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200681 /*
682 * If the context is active we need to retry the smp call.
683 */
684 if (ctx->nr_active && !list_empty(&event->group_entry)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100685 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200686 goto retry;
687 }
688
689 /*
690 * The lock prevents that this context is scheduled in so we
691 * can remove the event safely, if the call above did not
692 * succeed.
693 */
Peter Zijlstra6c2bfcb2009-11-23 11:37:24 +0100694 if (!list_empty(&event->group_entry))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200695 list_del_event(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100696 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200697}
698
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200699/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200700 * Cross CPU call to disable a performance event
701 */
702static void __perf_event_disable(void *info)
703{
704 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200705 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200706 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200707
708 /*
709 * If this is a per-task event, need to check whether this
710 * event's task is the current task on this cpu.
711 */
712 if (ctx->task && cpuctx->task_ctx != ctx)
713 return;
714
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100715 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200716
717 /*
718 * If the event is on, turn it off.
719 * If it is in error state, leave it in error state.
720 */
721 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
722 update_context_time(ctx);
723 update_group_times(event);
724 if (event == event->group_leader)
725 group_sched_out(event, cpuctx, ctx);
726 else
727 event_sched_out(event, cpuctx, ctx);
728 event->state = PERF_EVENT_STATE_OFF;
729 }
730
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100731 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200732}
733
734/*
735 * Disable a event.
736 *
737 * If event->ctx is a cloned context, callers must make sure that
738 * every task struct that event->ctx->task could possibly point to
739 * remains valid. This condition is satisifed when called through
740 * perf_event_for_each_child or perf_event_for_each because they
741 * hold the top-level event's child_mutex, so any descendant that
742 * goes to exit will block in sync_child_event.
743 * When called from perf_pending_event it's OK because event->ctx
744 * is the current context on this CPU and preemption is disabled,
745 * hence we can't get into perf_event_task_sched_out for this context.
746 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100747void perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200748{
749 struct perf_event_context *ctx = event->ctx;
750 struct task_struct *task = ctx->task;
751
752 if (!task) {
753 /*
754 * Disable the event on the cpu that it's on
755 */
756 smp_call_function_single(event->cpu, __perf_event_disable,
757 event, 1);
758 return;
759 }
760
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200761retry:
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200762 task_oncpu_function_call(task, __perf_event_disable, event);
763
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100764 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200765 /*
766 * If the event is still active, we need to retry the cross-call.
767 */
768 if (event->state == PERF_EVENT_STATE_ACTIVE) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100769 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200770 goto retry;
771 }
772
773 /*
774 * Since we have the lock this context can't be scheduled
775 * in, so we can change the state safely.
776 */
777 if (event->state == PERF_EVENT_STATE_INACTIVE) {
778 update_group_times(event);
779 event->state = PERF_EVENT_STATE_OFF;
780 }
781
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100782 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200783}
784
785static int
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200786event_sched_in(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200787 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100788 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200789{
Stephane Eranian41587552011-01-03 18:20:01 +0200790 u64 tstamp = perf_event_time(event);
791
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200792 if (event->state <= PERF_EVENT_STATE_OFF)
793 return 0;
794
795 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +0100796 event->oncpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200797 /*
798 * The new state must be visible before we turn it on in the hardware:
799 */
800 smp_wmb();
801
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200802 if (event->pmu->add(event, PERF_EF_START)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200803 event->state = PERF_EVENT_STATE_INACTIVE;
804 event->oncpu = -1;
805 return -EAGAIN;
806 }
807
Stephane Eranian41587552011-01-03 18:20:01 +0200808 event->tstamp_running += tstamp - event->tstamp_stopped;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200809
Stephane Eranian41587552011-01-03 18:20:01 +0200810 event->shadow_ctx_time = tstamp - ctx->timestamp;
Stephane Eranianeed01522010-10-26 16:08:01 +0200811
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200812 if (!is_software_event(event))
813 cpuctx->active_oncpu++;
814 ctx->nr_active++;
815
816 if (event->attr.exclusive)
817 cpuctx->exclusive = 1;
818
819 return 0;
820}
821
822static int
823group_sched_in(struct perf_event *group_event,
824 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100825 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200826{
Lin Ming6bde9b62010-04-23 13:56:00 +0800827 struct perf_event *event, *partial_group = NULL;
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200828 struct pmu *pmu = group_event->pmu;
Stephane Eraniand7842da2010-10-20 15:25:01 +0200829 u64 now = ctx->time;
830 bool simulate = false;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200831
832 if (group_event->state == PERF_EVENT_STATE_OFF)
833 return 0;
834
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200835 pmu->start_txn(pmu);
Lin Ming6bde9b62010-04-23 13:56:00 +0800836
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200837 if (event_sched_in(group_event, cpuctx, ctx)) {
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200838 pmu->cancel_txn(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200839 return -EAGAIN;
Stephane Eranian90151c32010-05-25 16:23:10 +0200840 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200841
842 /*
843 * Schedule in siblings as one group (if any):
844 */
845 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200846 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200847 partial_group = event;
848 goto group_error;
849 }
850 }
851
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200852 if (!pmu->commit_txn(pmu))
Paul Mackerras6e851582010-05-08 20:58:00 +1000853 return 0;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200854
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200855group_error:
856 /*
857 * Groups can be scheduled in as one unit only, so undo any
858 * partial group before returning:
Stephane Eraniand7842da2010-10-20 15:25:01 +0200859 * The events up to the failed event are scheduled out normally,
860 * tstamp_stopped will be updated.
861 *
862 * The failed events and the remaining siblings need to have
863 * their timings updated as if they had gone thru event_sched_in()
864 * and event_sched_out(). This is required to get consistent timings
865 * across the group. This also takes care of the case where the group
866 * could never be scheduled by ensuring tstamp_stopped is set to mark
867 * the time the event was actually stopped, such that time delta
868 * calculation in update_event_times() is correct.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200869 */
870 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
871 if (event == partial_group)
Stephane Eraniand7842da2010-10-20 15:25:01 +0200872 simulate = true;
873
874 if (simulate) {
875 event->tstamp_running += now - event->tstamp_stopped;
876 event->tstamp_stopped = now;
877 } else {
878 event_sched_out(event, cpuctx, ctx);
879 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200880 }
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200881 event_sched_out(group_event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200882
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200883 pmu->cancel_txn(pmu);
Stephane Eranian90151c32010-05-25 16:23:10 +0200884
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200885 return -EAGAIN;
886}
887
888/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200889 * Work out whether we can put this event group on the CPU now.
890 */
891static int group_can_go_on(struct perf_event *event,
892 struct perf_cpu_context *cpuctx,
893 int can_add_hw)
894{
895 /*
896 * Groups consisting entirely of software events can always go on.
897 */
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100898 if (event->group_flags & PERF_GROUP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200899 return 1;
900 /*
901 * If an exclusive group is already on, no other hardware
902 * events can go on.
903 */
904 if (cpuctx->exclusive)
905 return 0;
906 /*
907 * If this group is exclusive and there are already
908 * events on the CPU, it can't go on.
909 */
910 if (event->attr.exclusive && cpuctx->active_oncpu)
911 return 0;
912 /*
913 * Otherwise, try to add it if all previous groups were able
914 * to go on.
915 */
916 return can_add_hw;
917}
918
919static void add_event_to_ctx(struct perf_event *event,
920 struct perf_event_context *ctx)
921{
Stephane Eranian41587552011-01-03 18:20:01 +0200922 u64 tstamp = perf_event_time(event);
923
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200924 list_add_event(event, ctx);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200925 perf_group_attach(event);
Stephane Eranian41587552011-01-03 18:20:01 +0200926 event->tstamp_enabled = tstamp;
927 event->tstamp_running = tstamp;
928 event->tstamp_stopped = tstamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200929}
930
931/*
932 * Cross CPU call to install and enable a performance event
933 *
934 * Must be called with ctx->mutex held
935 */
936static void __perf_install_in_context(void *info)
937{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200938 struct perf_event *event = info;
939 struct perf_event_context *ctx = event->ctx;
940 struct perf_event *leader = event->group_leader;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200941 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200942 int err;
943
944 /*
945 * If this is a task context, we need to check whether it is
946 * the current task context of this cpu. If not it has been
947 * scheduled out before the smp call arrived.
948 * Or possibly this is the right context but it isn't
949 * on this cpu because it had no events.
950 */
951 if (ctx->task && cpuctx->task_ctx != ctx) {
952 if (cpuctx->task_ctx || ctx->task != current)
953 return;
954 cpuctx->task_ctx = ctx;
955 }
956
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100957 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200958 ctx->is_active = 1;
959 update_context_time(ctx);
960
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200961 add_event_to_ctx(event, ctx);
962
Stephane Eranian5632ab12011-01-03 18:20:01 +0200963 if (!event_filter_match(event))
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100964 goto unlock;
965
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200966 /*
967 * Don't put the event on if it is disabled or if
968 * it is in a group and the group isn't on.
969 */
970 if (event->state != PERF_EVENT_STATE_INACTIVE ||
971 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
972 goto unlock;
973
974 /*
975 * An exclusive event can't go on if there are already active
976 * hardware events, and no hardware event can go on if there
977 * is already an exclusive event on.
978 */
979 if (!group_can_go_on(event, cpuctx, 1))
980 err = -EEXIST;
981 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100982 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200983
984 if (err) {
985 /*
986 * This event couldn't go on. If it is in a group
987 * then we have to pull the whole group off.
988 * If the event group is pinned then put it in error state.
989 */
990 if (leader != event)
991 group_sched_out(leader, cpuctx, ctx);
992 if (leader->attr.pinned) {
993 update_group_times(leader);
994 leader->state = PERF_EVENT_STATE_ERROR;
995 }
996 }
997
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200998unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100999 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001000}
1001
1002/*
1003 * Attach a performance event to a context
1004 *
1005 * First we add the event to the list with the hardware enable bit
1006 * in event->hw_config cleared.
1007 *
1008 * If the event is attached to a task which is on a CPU we use a smp
1009 * call to enable it in the task context. The task might have been
1010 * scheduled away, but we check this in the smp call again.
1011 *
1012 * Must be called with ctx->mutex held.
1013 */
1014static void
1015perf_install_in_context(struct perf_event_context *ctx,
1016 struct perf_event *event,
1017 int cpu)
1018{
1019 struct task_struct *task = ctx->task;
1020
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02001021 event->ctx = ctx;
1022
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001023 if (!task) {
1024 /*
1025 * Per cpu events are installed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001026 * the install is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001027 */
1028 smp_call_function_single(cpu, __perf_install_in_context,
1029 event, 1);
1030 return;
1031 }
1032
1033retry:
1034 task_oncpu_function_call(task, __perf_install_in_context,
1035 event);
1036
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001037 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001038 /*
1039 * we need to retry the smp call.
1040 */
1041 if (ctx->is_active && list_empty(&event->group_entry)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001042 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001043 goto retry;
1044 }
1045
1046 /*
1047 * The lock prevents that this context is scheduled in so we
1048 * can add the event safely, if it the call above did not
1049 * succeed.
1050 */
1051 if (list_empty(&event->group_entry))
1052 add_event_to_ctx(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001053 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001054}
1055
1056/*
1057 * Put a event into inactive state and update time fields.
1058 * Enabling the leader of a group effectively enables all
1059 * the group members that aren't explicitly disabled, so we
1060 * have to update their ->tstamp_enabled also.
1061 * Note: this works for group members as well as group leaders
1062 * since the non-leader members' sibling_lists will be empty.
1063 */
1064static void __perf_event_mark_enabled(struct perf_event *event,
1065 struct perf_event_context *ctx)
1066{
1067 struct perf_event *sub;
Stephane Eranian41587552011-01-03 18:20:01 +02001068 u64 tstamp = perf_event_time(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001069
1070 event->state = PERF_EVENT_STATE_INACTIVE;
Stephane Eranian41587552011-01-03 18:20:01 +02001071 event->tstamp_enabled = tstamp - event->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001072 list_for_each_entry(sub, &event->sibling_list, group_entry) {
Stephane Eranian41587552011-01-03 18:20:01 +02001073 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1074 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001075 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001076}
1077
1078/*
1079 * Cross CPU call to enable a performance event
1080 */
1081static void __perf_event_enable(void *info)
1082{
1083 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001084 struct perf_event_context *ctx = event->ctx;
1085 struct perf_event *leader = event->group_leader;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001086 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001087 int err;
1088
1089 /*
1090 * If this is a per-task event, need to check whether this
1091 * event's task is the current task on this cpu.
1092 */
1093 if (ctx->task && cpuctx->task_ctx != ctx) {
1094 if (cpuctx->task_ctx || ctx->task != current)
1095 return;
1096 cpuctx->task_ctx = ctx;
1097 }
1098
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001099 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001100 ctx->is_active = 1;
1101 update_context_time(ctx);
1102
1103 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1104 goto unlock;
1105 __perf_event_mark_enabled(event, ctx);
1106
Stephane Eranian5632ab12011-01-03 18:20:01 +02001107 if (!event_filter_match(event))
Peter Zijlstraf4c41762009-12-16 17:55:54 +01001108 goto unlock;
1109
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001110 /*
1111 * If the event is in a group and isn't the group leader,
1112 * then don't put it on unless the group is on.
1113 */
1114 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1115 goto unlock;
1116
1117 if (!group_can_go_on(event, cpuctx, 1)) {
1118 err = -EEXIST;
1119 } else {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001120 if (event == leader)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001121 err = group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001122 else
Peter Zijlstra6e377382010-02-11 13:21:58 +01001123 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001124 }
1125
1126 if (err) {
1127 /*
1128 * If this event can't go on and it's part of a
1129 * group, then the whole group has to come off.
1130 */
1131 if (leader != event)
1132 group_sched_out(leader, cpuctx, ctx);
1133 if (leader->attr.pinned) {
1134 update_group_times(leader);
1135 leader->state = PERF_EVENT_STATE_ERROR;
1136 }
1137 }
1138
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001139unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001140 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001141}
1142
1143/*
1144 * Enable a event.
1145 *
1146 * If event->ctx is a cloned context, callers must make sure that
1147 * every task struct that event->ctx->task could possibly point to
1148 * remains valid. This condition is satisfied when called through
1149 * perf_event_for_each_child or perf_event_for_each as described
1150 * for perf_event_disable.
1151 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +01001152void perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001153{
1154 struct perf_event_context *ctx = event->ctx;
1155 struct task_struct *task = ctx->task;
1156
1157 if (!task) {
1158 /*
1159 * Enable the event on the cpu that it's on
1160 */
1161 smp_call_function_single(event->cpu, __perf_event_enable,
1162 event, 1);
1163 return;
1164 }
1165
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001166 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001167 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1168 goto out;
1169
1170 /*
1171 * If the event is in error state, clear that first.
1172 * That way, if we see the event in error state below, we
1173 * know that it has gone back into error state, as distinct
1174 * from the task having been scheduled away before the
1175 * cross-call arrived.
1176 */
1177 if (event->state == PERF_EVENT_STATE_ERROR)
1178 event->state = PERF_EVENT_STATE_OFF;
1179
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001180retry:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001181 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001182 task_oncpu_function_call(task, __perf_event_enable, event);
1183
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001184 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001185
1186 /*
1187 * If the context is active and the event is still off,
1188 * we need to retry the cross-call.
1189 */
1190 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1191 goto retry;
1192
1193 /*
1194 * Since we have the lock this context can't be scheduled
1195 * in, so we can change the state safely.
1196 */
1197 if (event->state == PERF_EVENT_STATE_OFF)
1198 __perf_event_mark_enabled(event, ctx);
1199
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001200out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001201 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001202}
1203
1204static int perf_event_refresh(struct perf_event *event, int refresh)
1205{
1206 /*
1207 * not supported on inherited events
1208 */
Franck Bui-Huu2e939d12010-11-23 16:21:44 +01001209 if (event->attr.inherit || !is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001210 return -EINVAL;
1211
1212 atomic_add(refresh, &event->event_limit);
1213 perf_event_enable(event);
1214
1215 return 0;
1216}
1217
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001218static void ctx_sched_out(struct perf_event_context *ctx,
1219 struct perf_cpu_context *cpuctx,
1220 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001221{
1222 struct perf_event *event;
1223
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001224 raw_spin_lock(&ctx->lock);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001225 perf_pmu_disable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001226 ctx->is_active = 0;
1227 if (likely(!ctx->nr_events))
1228 goto out;
1229 update_context_time(ctx);
1230
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001231 if (!ctx->nr_active)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001232 goto out;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001233
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001234 if (event_type & EVENT_PINNED) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001235 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1236 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001237 }
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001238
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001239 if (event_type & EVENT_FLEXIBLE) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001240 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001241 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001242 }
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001243out:
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001244 perf_pmu_enable(ctx->pmu);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001245 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001246}
1247
1248/*
1249 * Test whether two contexts are equivalent, i.e. whether they
1250 * have both been cloned from the same version of the same context
1251 * and they both have the same number of enabled events.
1252 * If the number of enabled events is the same, then the set
1253 * of enabled events should be the same, because these are both
1254 * inherited contexts, therefore we can't access individual events
1255 * in them directly with an fd; we can only enable/disable all
1256 * events via prctl, or enable/disable all events in a family
1257 * via ioctl, which will have the same effect on both contexts.
1258 */
1259static int context_equiv(struct perf_event_context *ctx1,
1260 struct perf_event_context *ctx2)
1261{
1262 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1263 && ctx1->parent_gen == ctx2->parent_gen
1264 && !ctx1->pin_count && !ctx2->pin_count;
1265}
1266
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001267static void __perf_event_sync_stat(struct perf_event *event,
1268 struct perf_event *next_event)
1269{
1270 u64 value;
1271
1272 if (!event->attr.inherit_stat)
1273 return;
1274
1275 /*
1276 * Update the event value, we cannot use perf_event_read()
1277 * because we're in the middle of a context switch and have IRQs
1278 * disabled, which upsets smp_call_function_single(), however
1279 * we know the event must be on the current CPU, therefore we
1280 * don't need to use it.
1281 */
1282 switch (event->state) {
1283 case PERF_EVENT_STATE_ACTIVE:
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01001284 event->pmu->read(event);
1285 /* fall-through */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001286
1287 case PERF_EVENT_STATE_INACTIVE:
1288 update_event_times(event);
1289 break;
1290
1291 default:
1292 break;
1293 }
1294
1295 /*
1296 * In order to keep per-task stats reliable we need to flip the event
1297 * values when we flip the contexts.
1298 */
Peter Zijlstrae7850592010-05-21 14:43:08 +02001299 value = local64_read(&next_event->count);
1300 value = local64_xchg(&event->count, value);
1301 local64_set(&next_event->count, value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001302
1303 swap(event->total_time_enabled, next_event->total_time_enabled);
1304 swap(event->total_time_running, next_event->total_time_running);
1305
1306 /*
1307 * Since we swizzled the values, update the user visible data too.
1308 */
1309 perf_event_update_userpage(event);
1310 perf_event_update_userpage(next_event);
1311}
1312
1313#define list_next_entry(pos, member) \
1314 list_entry(pos->member.next, typeof(*pos), member)
1315
1316static void perf_event_sync_stat(struct perf_event_context *ctx,
1317 struct perf_event_context *next_ctx)
1318{
1319 struct perf_event *event, *next_event;
1320
1321 if (!ctx->nr_stat)
1322 return;
1323
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01001324 update_context_time(ctx);
1325
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001326 event = list_first_entry(&ctx->event_list,
1327 struct perf_event, event_entry);
1328
1329 next_event = list_first_entry(&next_ctx->event_list,
1330 struct perf_event, event_entry);
1331
1332 while (&event->event_entry != &ctx->event_list &&
1333 &next_event->event_entry != &next_ctx->event_list) {
1334
1335 __perf_event_sync_stat(event, next_event);
1336
1337 event = list_next_entry(event, event_entry);
1338 next_event = list_next_entry(next_event, event_entry);
1339 }
1340}
1341
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001342void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1343 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001344{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001345 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001346 struct perf_event_context *next_ctx;
1347 struct perf_event_context *parent;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001348 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001349 int do_switch = 1;
1350
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001351 if (likely(!ctx))
1352 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001353
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001354 cpuctx = __get_cpu_context(ctx);
1355 if (!cpuctx->task_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001356 return;
1357
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001358 rcu_read_lock();
1359 parent = rcu_dereference(ctx->parent_ctx);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001360 next_ctx = next->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001361 if (parent && next_ctx &&
1362 rcu_dereference(next_ctx->parent_ctx) == parent) {
1363 /*
1364 * Looks like the two contexts are clones, so we might be
1365 * able to optimize the context switch. We lock both
1366 * contexts and check that they are clones under the
1367 * lock (including re-checking that neither has been
1368 * uncloned in the meantime). It doesn't matter which
1369 * order we take the locks because no other cpu could
1370 * be trying to lock both of these tasks.
1371 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001372 raw_spin_lock(&ctx->lock);
1373 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001374 if (context_equiv(ctx, next_ctx)) {
1375 /*
1376 * XXX do we need a memory barrier of sorts
1377 * wrt to rcu_dereference() of perf_event_ctxp
1378 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001379 task->perf_event_ctxp[ctxn] = next_ctx;
1380 next->perf_event_ctxp[ctxn] = ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001381 ctx->task = next;
1382 next_ctx->task = task;
1383 do_switch = 0;
1384
1385 perf_event_sync_stat(ctx, next_ctx);
1386 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001387 raw_spin_unlock(&next_ctx->lock);
1388 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001389 }
1390 rcu_read_unlock();
1391
1392 if (do_switch) {
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001393 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001394 cpuctx->task_ctx = NULL;
1395 }
1396}
1397
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001398#define for_each_task_context_nr(ctxn) \
1399 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
1400
1401/*
1402 * Called from scheduler to remove the events of the current task,
1403 * with interrupts disabled.
1404 *
1405 * We stop each event and update the event value in event->count.
1406 *
1407 * This does not protect us against NMI, but disable()
1408 * sets the disabled bit in the control field of event _before_
1409 * accessing the event control register. If a NMI hits, then it will
1410 * not restart the event.
1411 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02001412void __perf_event_task_sched_out(struct task_struct *task,
1413 struct task_struct *next)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001414{
1415 int ctxn;
1416
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001417 for_each_task_context_nr(ctxn)
1418 perf_event_context_sched_out(task, ctxn, next);
1419}
1420
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001421static void task_ctx_sched_out(struct perf_event_context *ctx,
1422 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001423{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001424 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001425
1426 if (!cpuctx->task_ctx)
1427 return;
1428
1429 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1430 return;
1431
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001432 ctx_sched_out(ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001433 cpuctx->task_ctx = NULL;
1434}
1435
1436/*
1437 * Called with IRQs disabled
1438 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001439static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1440 enum event_type_t event_type)
1441{
1442 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001443}
1444
1445static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001446ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001447 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001448{
1449 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001450
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001451 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1452 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001453 continue;
Stephane Eranian5632ab12011-01-03 18:20:01 +02001454 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001455 continue;
1456
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001457 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01001458 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001459
1460 /*
1461 * If this pinned group hasn't been scheduled,
1462 * put it in error state.
1463 */
1464 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1465 update_group_times(event);
1466 event->state = PERF_EVENT_STATE_ERROR;
1467 }
1468 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001469}
1470
1471static void
1472ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001473 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001474{
1475 struct perf_event *event;
1476 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001477
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001478 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1479 /* Ignore events in OFF or ERROR state */
1480 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001481 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001482 /*
1483 * Listen to the 'cpu' scheduling filter constraint
1484 * of events:
1485 */
Stephane Eranian5632ab12011-01-03 18:20:01 +02001486 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001487 continue;
1488
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001489 if (group_can_go_on(event, cpuctx, can_add_hw)) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001490 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001491 can_add_hw = 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001492 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001493 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001494}
1495
1496static void
1497ctx_sched_in(struct perf_event_context *ctx,
1498 struct perf_cpu_context *cpuctx,
1499 enum event_type_t event_type)
1500{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001501 raw_spin_lock(&ctx->lock);
1502 ctx->is_active = 1;
1503 if (likely(!ctx->nr_events))
1504 goto out;
1505
1506 ctx->timestamp = perf_clock();
1507
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001508 /*
1509 * First go through the list and put on any pinned groups
1510 * in order to give them the best chance of going on.
1511 */
1512 if (event_type & EVENT_PINNED)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001513 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001514
1515 /* Then walk through the lower prio flexible groups */
1516 if (event_type & EVENT_FLEXIBLE)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001517 ctx_flexible_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001518
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001519out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001520 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001521}
1522
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001523static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1524 enum event_type_t event_type)
1525{
1526 struct perf_event_context *ctx = &cpuctx->ctx;
1527
1528 ctx_sched_in(ctx, cpuctx, event_type);
1529}
1530
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001531static void task_ctx_sched_in(struct perf_event_context *ctx,
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001532 enum event_type_t event_type)
1533{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001534 struct perf_cpu_context *cpuctx;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001535
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001536 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001537 if (cpuctx->task_ctx == ctx)
1538 return;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001539
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001540 ctx_sched_in(ctx, cpuctx, event_type);
1541 cpuctx->task_ctx = ctx;
1542}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001543
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001544void perf_event_context_sched_in(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001545{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001546 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001547
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001548 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001549 if (cpuctx->task_ctx == ctx)
1550 return;
1551
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001552 perf_pmu_disable(ctx->pmu);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001553 /*
1554 * We want to keep the following priority order:
1555 * cpu pinned (that don't need to move), task pinned,
1556 * cpu flexible, task flexible.
1557 */
1558 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1559
1560 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1561 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1562 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1563
1564 cpuctx->task_ctx = ctx;
eranian@google.com9b33fa62010-03-10 22:26:05 -08001565
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001566 /*
1567 * Since these rotations are per-cpu, we need to ensure the
1568 * cpu-context we got scheduled on is actually rotating.
1569 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001570 perf_pmu_rotate_start(ctx->pmu);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001571 perf_pmu_enable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001572}
1573
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001574/*
1575 * Called from scheduler to add the events of the current task
1576 * with interrupts disabled.
1577 *
1578 * We restore the event value and then enable it.
1579 *
1580 * This does not protect us against NMI, but enable()
1581 * sets the enabled bit in the control field of event _before_
1582 * accessing the event control register. If a NMI hits, then it will
1583 * keep the event running.
1584 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02001585void __perf_event_task_sched_in(struct task_struct *task)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001586{
1587 struct perf_event_context *ctx;
1588 int ctxn;
1589
1590 for_each_task_context_nr(ctxn) {
1591 ctx = task->perf_event_ctxp[ctxn];
1592 if (likely(!ctx))
1593 continue;
1594
1595 perf_event_context_sched_in(ctx);
1596 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001597}
1598
1599#define MAX_INTERRUPTS (~0ULL)
1600
1601static void perf_log_throttle(struct perf_event *event, int enable);
1602
Peter Zijlstraabd50712010-01-26 18:50:16 +01001603static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1604{
1605 u64 frequency = event->attr.sample_freq;
1606 u64 sec = NSEC_PER_SEC;
1607 u64 divisor, dividend;
1608
1609 int count_fls, nsec_fls, frequency_fls, sec_fls;
1610
1611 count_fls = fls64(count);
1612 nsec_fls = fls64(nsec);
1613 frequency_fls = fls64(frequency);
1614 sec_fls = 30;
1615
1616 /*
1617 * We got @count in @nsec, with a target of sample_freq HZ
1618 * the target period becomes:
1619 *
1620 * @count * 10^9
1621 * period = -------------------
1622 * @nsec * sample_freq
1623 *
1624 */
1625
1626 /*
1627 * Reduce accuracy by one bit such that @a and @b converge
1628 * to a similar magnitude.
1629 */
1630#define REDUCE_FLS(a, b) \
1631do { \
1632 if (a##_fls > b##_fls) { \
1633 a >>= 1; \
1634 a##_fls--; \
1635 } else { \
1636 b >>= 1; \
1637 b##_fls--; \
1638 } \
1639} while (0)
1640
1641 /*
1642 * Reduce accuracy until either term fits in a u64, then proceed with
1643 * the other, so that finally we can do a u64/u64 division.
1644 */
1645 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1646 REDUCE_FLS(nsec, frequency);
1647 REDUCE_FLS(sec, count);
1648 }
1649
1650 if (count_fls + sec_fls > 64) {
1651 divisor = nsec * frequency;
1652
1653 while (count_fls + sec_fls > 64) {
1654 REDUCE_FLS(count, sec);
1655 divisor >>= 1;
1656 }
1657
1658 dividend = count * sec;
1659 } else {
1660 dividend = count * sec;
1661
1662 while (nsec_fls + frequency_fls > 64) {
1663 REDUCE_FLS(nsec, frequency);
1664 dividend >>= 1;
1665 }
1666
1667 divisor = nsec * frequency;
1668 }
1669
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001670 if (!divisor)
1671 return dividend;
1672
Peter Zijlstraabd50712010-01-26 18:50:16 +01001673 return div64_u64(dividend, divisor);
1674}
1675
1676static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001677{
1678 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001679 s64 period, sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001680 s64 delta;
1681
Peter Zijlstraabd50712010-01-26 18:50:16 +01001682 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001683
1684 delta = (s64)(period - hwc->sample_period);
1685 delta = (delta + 7) / 8; /* low pass filter */
1686
1687 sample_period = hwc->sample_period + delta;
1688
1689 if (!sample_period)
1690 sample_period = 1;
1691
1692 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001693
Peter Zijlstrae7850592010-05-21 14:43:08 +02001694 if (local64_read(&hwc->period_left) > 8*sample_period) {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001695 event->pmu->stop(event, PERF_EF_UPDATE);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001696 local64_set(&hwc->period_left, 0);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001697 event->pmu->start(event, PERF_EF_RELOAD);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001698 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001699}
1700
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001701static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001702{
1703 struct perf_event *event;
1704 struct hw_perf_event *hwc;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001705 u64 interrupts, now;
1706 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001707
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001708 raw_spin_lock(&ctx->lock);
Paul Mackerras03541f82009-10-14 16:58:03 +11001709 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001710 if (event->state != PERF_EVENT_STATE_ACTIVE)
1711 continue;
1712
Stephane Eranian5632ab12011-01-03 18:20:01 +02001713 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01001714 continue;
1715
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001716 hwc = &event->hw;
1717
1718 interrupts = hwc->interrupts;
1719 hwc->interrupts = 0;
1720
1721 /*
1722 * unthrottle events on the tick
1723 */
1724 if (interrupts == MAX_INTERRUPTS) {
1725 perf_log_throttle(event, 1);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001726 event->pmu->start(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001727 }
1728
1729 if (!event->attr.freq || !event->attr.sample_freq)
1730 continue;
1731
Peter Zijlstraabd50712010-01-26 18:50:16 +01001732 event->pmu->read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001733 now = local64_read(&event->count);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001734 delta = now - hwc->freq_count_stamp;
1735 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001736
Peter Zijlstraabd50712010-01-26 18:50:16 +01001737 if (delta > 0)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001738 perf_adjust_period(event, period, delta);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001739 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001740 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001741}
1742
1743/*
1744 * Round-robin a context's events:
1745 */
1746static void rotate_ctx(struct perf_event_context *ctx)
1747{
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001748 raw_spin_lock(&ctx->lock);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001749
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01001750 /*
1751 * Rotate the first entry last of non-pinned groups. Rotation might be
1752 * disabled by the inheritance code.
1753 */
1754 if (!ctx->rotate_disable)
1755 list_rotate_left(&ctx->flexible_groups);
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001756
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001757 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001758}
1759
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001760/*
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001761 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
1762 * because they're strictly cpu affine and rotate_start is called with IRQs
1763 * disabled, while rotate_context is called from IRQ context.
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001764 */
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001765static void perf_rotate_context(struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001766{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001767 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001768 struct perf_event_context *ctx = NULL;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001769 int rotate = 0, remove = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001770
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001771 if (cpuctx->ctx.nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001772 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001773 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1774 rotate = 1;
1775 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001776
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001777 ctx = cpuctx->task_ctx;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001778 if (ctx && ctx->nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001779 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001780 if (ctx->nr_events != ctx->nr_active)
1781 rotate = 1;
1782 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001783
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001784 perf_pmu_disable(cpuctx->ctx.pmu);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001785 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001786 if (ctx)
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001787 perf_ctx_adjust_freq(ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001788
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001789 if (!rotate)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001790 goto done;
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001791
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001792 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001793 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001794 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001795
1796 rotate_ctx(&cpuctx->ctx);
1797 if (ctx)
1798 rotate_ctx(ctx);
1799
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001800 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001801 if (ctx)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001802 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001803
1804done:
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001805 if (remove)
1806 list_del_init(&cpuctx->rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001807
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001808 perf_pmu_enable(cpuctx->ctx.pmu);
1809}
1810
1811void perf_event_task_tick(void)
1812{
1813 struct list_head *head = &__get_cpu_var(rotation_list);
1814 struct perf_cpu_context *cpuctx, *tmp;
1815
1816 WARN_ON(!irqs_disabled());
1817
1818 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
1819 if (cpuctx->jiffies_interval == 1 ||
1820 !(jiffies % cpuctx->jiffies_interval))
1821 perf_rotate_context(cpuctx);
1822 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001823}
1824
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001825static int event_enable_on_exec(struct perf_event *event,
1826 struct perf_event_context *ctx)
1827{
1828 if (!event->attr.enable_on_exec)
1829 return 0;
1830
1831 event->attr.enable_on_exec = 0;
1832 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1833 return 0;
1834
1835 __perf_event_mark_enabled(event, ctx);
1836
1837 return 1;
1838}
1839
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001840/*
1841 * Enable all of a task's events that have been marked enable-on-exec.
1842 * This expects task == current.
1843 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001844static void perf_event_enable_on_exec(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001845{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001846 struct perf_event *event;
1847 unsigned long flags;
1848 int enabled = 0;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001849 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001850
1851 local_irq_save(flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001852 if (!ctx || !ctx->nr_events)
1853 goto out;
1854
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001855 task_ctx_sched_out(ctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001856
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001857 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001858
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001859 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1860 ret = event_enable_on_exec(event, ctx);
1861 if (ret)
1862 enabled = 1;
1863 }
1864
1865 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1866 ret = event_enable_on_exec(event, ctx);
1867 if (ret)
1868 enabled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001869 }
1870
1871 /*
1872 * Unclone this context if we enabled any event.
1873 */
1874 if (enabled)
1875 unclone_ctx(ctx);
1876
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001877 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001878
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001879 perf_event_context_sched_in(ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001880out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001881 local_irq_restore(flags);
1882}
1883
1884/*
1885 * Cross CPU call to read the hardware event
1886 */
1887static void __perf_event_read(void *info)
1888{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001889 struct perf_event *event = info;
1890 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001891 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001892
1893 /*
1894 * If this is a task context, we need to check whether it is
1895 * the current task context of this cpu. If not it has been
1896 * scheduled out before the smp call arrived. In that case
1897 * event->count would have been updated to a recent sample
1898 * when the event was scheduled out.
1899 */
1900 if (ctx->task && cpuctx->task_ctx != ctx)
1901 return;
1902
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001903 raw_spin_lock(&ctx->lock);
Peter Zijlstra542e72f2011-01-26 15:38:35 +01001904 if (ctx->is_active)
1905 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001906 update_event_times(event);
Peter Zijlstra542e72f2011-01-26 15:38:35 +01001907 if (event->state == PERF_EVENT_STATE_ACTIVE)
1908 event->pmu->read(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001909 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001910}
1911
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001912static inline u64 perf_event_count(struct perf_event *event)
1913{
Peter Zijlstrae7850592010-05-21 14:43:08 +02001914 return local64_read(&event->count) + atomic64_read(&event->child_count);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001915}
1916
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001917static u64 perf_event_read(struct perf_event *event)
1918{
1919 /*
1920 * If event is enabled and currently active on a CPU, update the
1921 * value in the event structure:
1922 */
1923 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1924 smp_call_function_single(event->oncpu,
1925 __perf_event_read, event, 1);
1926 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001927 struct perf_event_context *ctx = event->ctx;
1928 unsigned long flags;
1929
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001930 raw_spin_lock_irqsave(&ctx->lock, flags);
Stephane Eranianc530ccd2010-10-15 15:26:01 +02001931 /*
1932 * may read while context is not active
1933 * (e.g., thread is blocked), in that case
1934 * we cannot update context time
1935 */
1936 if (ctx->is_active)
1937 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001938 update_event_times(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001939 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001940 }
1941
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001942 return perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001943}
1944
1945/*
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001946 * Callchain support
1947 */
1948
1949struct callchain_cpus_entries {
1950 struct rcu_head rcu_head;
1951 struct perf_callchain_entry *cpu_entries[0];
1952};
1953
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02001954static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001955static atomic_t nr_callchain_events;
1956static DEFINE_MUTEX(callchain_mutex);
1957struct callchain_cpus_entries *callchain_cpus_entries;
1958
1959
1960__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
1961 struct pt_regs *regs)
1962{
1963}
1964
1965__weak void perf_callchain_user(struct perf_callchain_entry *entry,
1966 struct pt_regs *regs)
1967{
1968}
1969
1970static void release_callchain_buffers_rcu(struct rcu_head *head)
1971{
1972 struct callchain_cpus_entries *entries;
1973 int cpu;
1974
1975 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
1976
1977 for_each_possible_cpu(cpu)
1978 kfree(entries->cpu_entries[cpu]);
1979
1980 kfree(entries);
1981}
1982
1983static void release_callchain_buffers(void)
1984{
1985 struct callchain_cpus_entries *entries;
1986
1987 entries = callchain_cpus_entries;
1988 rcu_assign_pointer(callchain_cpus_entries, NULL);
1989 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
1990}
1991
1992static int alloc_callchain_buffers(void)
1993{
1994 int cpu;
1995 int size;
1996 struct callchain_cpus_entries *entries;
1997
1998 /*
1999 * We can't use the percpu allocation API for data that can be
2000 * accessed from NMI. Use a temporary manual per cpu allocation
2001 * until that gets sorted out.
2002 */
Eric Dumazet88d4f0d2011-01-25 19:40:51 +01002003 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002004
2005 entries = kzalloc(size, GFP_KERNEL);
2006 if (!entries)
2007 return -ENOMEM;
2008
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02002009 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002010
2011 for_each_possible_cpu(cpu) {
2012 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
2013 cpu_to_node(cpu));
2014 if (!entries->cpu_entries[cpu])
2015 goto fail;
2016 }
2017
2018 rcu_assign_pointer(callchain_cpus_entries, entries);
2019
2020 return 0;
2021
2022fail:
2023 for_each_possible_cpu(cpu)
2024 kfree(entries->cpu_entries[cpu]);
2025 kfree(entries);
2026
2027 return -ENOMEM;
2028}
2029
2030static int get_callchain_buffers(void)
2031{
2032 int err = 0;
2033 int count;
2034
2035 mutex_lock(&callchain_mutex);
2036
2037 count = atomic_inc_return(&nr_callchain_events);
2038 if (WARN_ON_ONCE(count < 1)) {
2039 err = -EINVAL;
2040 goto exit;
2041 }
2042
2043 if (count > 1) {
2044 /* If the allocation failed, give up */
2045 if (!callchain_cpus_entries)
2046 err = -ENOMEM;
2047 goto exit;
2048 }
2049
2050 err = alloc_callchain_buffers();
2051 if (err)
2052 release_callchain_buffers();
2053exit:
2054 mutex_unlock(&callchain_mutex);
2055
2056 return err;
2057}
2058
2059static void put_callchain_buffers(void)
2060{
2061 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
2062 release_callchain_buffers();
2063 mutex_unlock(&callchain_mutex);
2064 }
2065}
2066
2067static int get_recursion_context(int *recursion)
2068{
2069 int rctx;
2070
2071 if (in_nmi())
2072 rctx = 3;
2073 else if (in_irq())
2074 rctx = 2;
2075 else if (in_softirq())
2076 rctx = 1;
2077 else
2078 rctx = 0;
2079
2080 if (recursion[rctx])
2081 return -1;
2082
2083 recursion[rctx]++;
2084 barrier();
2085
2086 return rctx;
2087}
2088
2089static inline void put_recursion_context(int *recursion, int rctx)
2090{
2091 barrier();
2092 recursion[rctx]--;
2093}
2094
2095static struct perf_callchain_entry *get_callchain_entry(int *rctx)
2096{
2097 int cpu;
2098 struct callchain_cpus_entries *entries;
2099
2100 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
2101 if (*rctx == -1)
2102 return NULL;
2103
2104 entries = rcu_dereference(callchain_cpus_entries);
2105 if (!entries)
2106 return NULL;
2107
2108 cpu = smp_processor_id();
2109
2110 return &entries->cpu_entries[cpu][*rctx];
2111}
2112
2113static void
2114put_callchain_entry(int rctx)
2115{
2116 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
2117}
2118
2119static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2120{
2121 int rctx;
2122 struct perf_callchain_entry *entry;
2123
2124
2125 entry = get_callchain_entry(&rctx);
2126 if (rctx == -1)
2127 return NULL;
2128
2129 if (!entry)
2130 goto exit_put;
2131
2132 entry->nr = 0;
2133
2134 if (!user_mode(regs)) {
2135 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
2136 perf_callchain_kernel(entry, regs);
2137 if (current->mm)
2138 regs = task_pt_regs(current);
2139 else
2140 regs = NULL;
2141 }
2142
2143 if (regs) {
2144 perf_callchain_store(entry, PERF_CONTEXT_USER);
2145 perf_callchain_user(entry, regs);
2146 }
2147
2148exit_put:
2149 put_callchain_entry(rctx);
2150
2151 return entry;
2152}
2153
2154/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002155 * Initialize the perf_event context in a task_struct:
2156 */
Peter Zijlstraeb184472010-09-07 15:55:13 +02002157static void __perf_event_init_context(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002158{
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002159 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002160 mutex_init(&ctx->mutex);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002161 INIT_LIST_HEAD(&ctx->pinned_groups);
2162 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002163 INIT_LIST_HEAD(&ctx->event_list);
2164 atomic_set(&ctx->refcount, 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002165}
2166
Peter Zijlstraeb184472010-09-07 15:55:13 +02002167static struct perf_event_context *
2168alloc_perf_context(struct pmu *pmu, struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002169{
2170 struct perf_event_context *ctx;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002171
2172 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2173 if (!ctx)
2174 return NULL;
2175
2176 __perf_event_init_context(ctx);
2177 if (task) {
2178 ctx->task = task;
2179 get_task_struct(task);
2180 }
2181 ctx->pmu = pmu;
2182
2183 return ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002184}
2185
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002186static struct task_struct *
2187find_lively_task_by_vpid(pid_t vpid)
2188{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002189 struct task_struct *task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002190 int err;
2191
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002192 rcu_read_lock();
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002193 if (!vpid)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002194 task = current;
2195 else
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002196 task = find_task_by_vpid(vpid);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002197 if (task)
2198 get_task_struct(task);
2199 rcu_read_unlock();
2200
2201 if (!task)
2202 return ERR_PTR(-ESRCH);
2203
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002204 /* Reuse ptrace permission checks for now. */
2205 err = -EACCES;
2206 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2207 goto errout;
2208
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002209 return task;
2210errout:
2211 put_task_struct(task);
2212 return ERR_PTR(err);
2213
2214}
2215
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002216static struct perf_event_context *
Matt Helsley38a81da2010-09-13 13:01:20 -07002217find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002218{
2219 struct perf_event_context *ctx;
2220 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002221 unsigned long flags;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002222 int ctxn, err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002223
Oleg Nesterov22a4ec72011-01-18 17:10:08 +01002224 if (!task) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002225 /* Must be root to operate on a CPU event: */
2226 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2227 return ERR_PTR(-EACCES);
2228
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002229 /*
2230 * We could be clever and allow to attach a event to an
2231 * offline CPU and activate it when the CPU comes up, but
2232 * that's for later.
2233 */
2234 if (!cpu_online(cpu))
2235 return ERR_PTR(-ENODEV);
2236
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002237 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002238 ctx = &cpuctx->ctx;
2239 get_ctx(ctx);
2240
2241 return ctx;
2242 }
2243
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002244 err = -EINVAL;
2245 ctxn = pmu->task_ctx_nr;
2246 if (ctxn < 0)
2247 goto errout;
2248
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002249retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002250 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002251 if (ctx) {
2252 unclone_ctx(ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002253 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002254 }
2255
2256 if (!ctx) {
Peter Zijlstraeb184472010-09-07 15:55:13 +02002257 ctx = alloc_perf_context(pmu, task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002258 err = -ENOMEM;
2259 if (!ctx)
2260 goto errout;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002261
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002262 get_ctx(ctx);
Peter Zijlstraeb184472010-09-07 15:55:13 +02002263
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002264 err = 0;
2265 mutex_lock(&task->perf_event_mutex);
2266 /*
2267 * If it has already passed perf_event_exit_task().
2268 * we must see PF_EXITING, it takes this mutex too.
2269 */
2270 if (task->flags & PF_EXITING)
2271 err = -ESRCH;
2272 else if (task->perf_event_ctxp[ctxn])
2273 err = -EAGAIN;
2274 else
2275 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2276 mutex_unlock(&task->perf_event_mutex);
2277
2278 if (unlikely(err)) {
Peter Zijlstraeb184472010-09-07 15:55:13 +02002279 put_task_struct(task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002280 kfree(ctx);
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002281
2282 if (err == -EAGAIN)
2283 goto retry;
2284 goto errout;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002285 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002286 }
2287
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002288 return ctx;
2289
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002290errout:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002291 return ERR_PTR(err);
2292}
2293
Li Zefan6fb29152009-10-15 11:21:42 +08002294static void perf_event_free_filter(struct perf_event *event);
2295
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002296static void free_event_rcu(struct rcu_head *head)
2297{
2298 struct perf_event *event;
2299
2300 event = container_of(head, struct perf_event, rcu_head);
2301 if (event->ns)
2302 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08002303 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002304 kfree(event);
2305}
2306
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002307static void perf_buffer_put(struct perf_buffer *buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002308
2309static void free_event(struct perf_event *event)
2310{
Peter Zijlstrae360adb2010-10-14 14:01:34 +08002311 irq_work_sync(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002312
2313 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02002314 if (event->attach_state & PERF_ATTACH_TASK)
2315 jump_label_dec(&perf_task_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01002316 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002317 atomic_dec(&nr_mmap_events);
2318 if (event->attr.comm)
2319 atomic_dec(&nr_comm_events);
2320 if (event->attr.task)
2321 atomic_dec(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002322 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2323 put_callchain_buffers();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002324 }
2325
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002326 if (event->buffer) {
2327 perf_buffer_put(event->buffer);
2328 event->buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002329 }
2330
2331 if (event->destroy)
2332 event->destroy(event);
2333
Peter Zijlstra0c67b402010-09-13 11:15:58 +02002334 if (event->ctx)
2335 put_ctx(event->ctx);
2336
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002337 call_rcu(&event->rcu_head, free_event_rcu);
2338}
2339
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002340int perf_event_release_kernel(struct perf_event *event)
2341{
2342 struct perf_event_context *ctx = event->ctx;
2343
Peter Zijlstra050735b2010-05-11 11:51:53 +02002344 /*
2345 * Remove from the PMU, can't get re-enabled since we got
2346 * here because the last ref went.
2347 */
2348 perf_event_disable(event);
2349
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002350 WARN_ON_ONCE(ctx->parent_ctx);
Peter Zijlstraa0507c82010-05-06 15:42:53 +02002351 /*
2352 * There are two ways this annotation is useful:
2353 *
2354 * 1) there is a lock recursion from perf_event_exit_task
2355 * see the comment there.
2356 *
2357 * 2) there is a lock-inversion with mmap_sem through
2358 * perf_event_read_group(), which takes faults while
2359 * holding ctx->mutex, however this is called after
2360 * the last filedesc died, so there is no possibility
2361 * to trigger the AB-BA case.
2362 */
2363 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002364 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra8a495422010-05-27 15:47:49 +02002365 perf_group_detach(event);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002366 list_del_event(event, ctx);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002367 raw_spin_unlock_irq(&ctx->lock);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002368 mutex_unlock(&ctx->mutex);
2369
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002370 free_event(event);
2371
2372 return 0;
2373}
2374EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2375
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002376/*
2377 * Called when the last reference to the file is gone.
2378 */
2379static int perf_release(struct inode *inode, struct file *file)
2380{
2381 struct perf_event *event = file->private_data;
Peter Zijlstra88821352010-11-09 19:01:43 +01002382 struct task_struct *owner;
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002383
2384 file->private_data = NULL;
2385
Peter Zijlstra88821352010-11-09 19:01:43 +01002386 rcu_read_lock();
2387 owner = ACCESS_ONCE(event->owner);
2388 /*
2389 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2390 * !owner it means the list deletion is complete and we can indeed
2391 * free this event, otherwise we need to serialize on
2392 * owner->perf_event_mutex.
2393 */
2394 smp_read_barrier_depends();
2395 if (owner) {
2396 /*
2397 * Since delayed_put_task_struct() also drops the last
2398 * task reference we can safely take a new reference
2399 * while holding the rcu_read_lock().
2400 */
2401 get_task_struct(owner);
2402 }
2403 rcu_read_unlock();
2404
2405 if (owner) {
2406 mutex_lock(&owner->perf_event_mutex);
2407 /*
2408 * We have to re-check the event->owner field, if it is cleared
2409 * we raced with perf_event_exit_task(), acquiring the mutex
2410 * ensured they're done, and we can proceed with freeing the
2411 * event.
2412 */
2413 if (event->owner)
2414 list_del_init(&event->owner_entry);
2415 mutex_unlock(&owner->perf_event_mutex);
2416 put_task_struct(owner);
2417 }
2418
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002419 return perf_event_release_kernel(event);
2420}
2421
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002422u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002423{
2424 struct perf_event *child;
2425 u64 total = 0;
2426
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002427 *enabled = 0;
2428 *running = 0;
2429
Peter Zijlstra6f105812009-11-20 22:19:56 +01002430 mutex_lock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002431 total += perf_event_read(event);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002432 *enabled += event->total_time_enabled +
2433 atomic64_read(&event->child_total_time_enabled);
2434 *running += event->total_time_running +
2435 atomic64_read(&event->child_total_time_running);
2436
2437 list_for_each_entry(child, &event->child_list, child_list) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002438 total += perf_event_read(child);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002439 *enabled += child->total_time_enabled;
2440 *running += child->total_time_running;
2441 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002442 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002443
2444 return total;
2445}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002446EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002447
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002448static int perf_event_read_group(struct perf_event *event,
2449 u64 read_format, char __user *buf)
2450{
2451 struct perf_event *leader = event->group_leader, *sub;
Peter Zijlstra6f105812009-11-20 22:19:56 +01002452 int n = 0, size = 0, ret = -EFAULT;
2453 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002454 u64 values[5];
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002455 u64 count, enabled, running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002456
Peter Zijlstra6f105812009-11-20 22:19:56 +01002457 mutex_lock(&ctx->mutex);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002458 count = perf_event_read_value(leader, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002459
2460 values[n++] = 1 + leader->nr_siblings;
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002461 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2462 values[n++] = enabled;
2463 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2464 values[n++] = running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002465 values[n++] = count;
2466 if (read_format & PERF_FORMAT_ID)
2467 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002468
2469 size = n * sizeof(u64);
2470
2471 if (copy_to_user(buf, values, size))
Peter Zijlstra6f105812009-11-20 22:19:56 +01002472 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002473
Peter Zijlstra6f105812009-11-20 22:19:56 +01002474 ret = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002475
2476 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstraabf48682009-11-20 22:19:49 +01002477 n = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002478
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002479 values[n++] = perf_event_read_value(sub, &enabled, &running);
Peter Zijlstraabf48682009-11-20 22:19:49 +01002480 if (read_format & PERF_FORMAT_ID)
2481 values[n++] = primary_event_id(sub);
2482
2483 size = n * sizeof(u64);
2484
Stephane Eranian184d3da2009-11-23 21:40:49 -08002485 if (copy_to_user(buf + ret, values, size)) {
Peter Zijlstra6f105812009-11-20 22:19:56 +01002486 ret = -EFAULT;
2487 goto unlock;
2488 }
Peter Zijlstraabf48682009-11-20 22:19:49 +01002489
2490 ret += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002491 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002492unlock:
2493 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002494
Peter Zijlstraabf48682009-11-20 22:19:49 +01002495 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002496}
2497
2498static int perf_event_read_one(struct perf_event *event,
2499 u64 read_format, char __user *buf)
2500{
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002501 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002502 u64 values[4];
2503 int n = 0;
2504
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002505 values[n++] = perf_event_read_value(event, &enabled, &running);
2506 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2507 values[n++] = enabled;
2508 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2509 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002510 if (read_format & PERF_FORMAT_ID)
2511 values[n++] = primary_event_id(event);
2512
2513 if (copy_to_user(buf, values, n * sizeof(u64)))
2514 return -EFAULT;
2515
2516 return n * sizeof(u64);
2517}
2518
2519/*
2520 * Read the performance event - simple non blocking version for now
2521 */
2522static ssize_t
2523perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2524{
2525 u64 read_format = event->attr.read_format;
2526 int ret;
2527
2528 /*
2529 * Return end-of-file for a read on a event that is in
2530 * error state (i.e. because it was pinned but it couldn't be
2531 * scheduled on to the CPU at some point).
2532 */
2533 if (event->state == PERF_EVENT_STATE_ERROR)
2534 return 0;
2535
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02002536 if (count < event->read_size)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002537 return -ENOSPC;
2538
2539 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002540 if (read_format & PERF_FORMAT_GROUP)
2541 ret = perf_event_read_group(event, read_format, buf);
2542 else
2543 ret = perf_event_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002544
2545 return ret;
2546}
2547
2548static ssize_t
2549perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2550{
2551 struct perf_event *event = file->private_data;
2552
2553 return perf_read_hw(event, buf, count);
2554}
2555
2556static unsigned int perf_poll(struct file *file, poll_table *wait)
2557{
2558 struct perf_event *event = file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002559 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002560 unsigned int events = POLL_HUP;
2561
2562 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002563 buffer = rcu_dereference(event->buffer);
2564 if (buffer)
2565 events = atomic_xchg(&buffer->poll, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002566 rcu_read_unlock();
2567
2568 poll_wait(file, &event->waitq, wait);
2569
2570 return events;
2571}
2572
2573static void perf_event_reset(struct perf_event *event)
2574{
2575 (void)perf_event_read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02002576 local64_set(&event->count, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002577 perf_event_update_userpage(event);
2578}
2579
2580/*
2581 * Holding the top-level event's child_mutex means that any
2582 * descendant process that has inherited this event will block
2583 * in sync_child_event if it goes to exit, thus satisfying the
2584 * task existence requirements of perf_event_enable/disable.
2585 */
2586static void perf_event_for_each_child(struct perf_event *event,
2587 void (*func)(struct perf_event *))
2588{
2589 struct perf_event *child;
2590
2591 WARN_ON_ONCE(event->ctx->parent_ctx);
2592 mutex_lock(&event->child_mutex);
2593 func(event);
2594 list_for_each_entry(child, &event->child_list, child_list)
2595 func(child);
2596 mutex_unlock(&event->child_mutex);
2597}
2598
2599static void perf_event_for_each(struct perf_event *event,
2600 void (*func)(struct perf_event *))
2601{
2602 struct perf_event_context *ctx = event->ctx;
2603 struct perf_event *sibling;
2604
2605 WARN_ON_ONCE(ctx->parent_ctx);
2606 mutex_lock(&ctx->mutex);
2607 event = event->group_leader;
2608
2609 perf_event_for_each_child(event, func);
2610 func(event);
2611 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2612 perf_event_for_each_child(event, func);
2613 mutex_unlock(&ctx->mutex);
2614}
2615
2616static int perf_event_period(struct perf_event *event, u64 __user *arg)
2617{
2618 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002619 int ret = 0;
2620 u64 value;
2621
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01002622 if (!is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002623 return -EINVAL;
2624
John Blackwoodad0cf342010-09-28 18:03:11 -04002625 if (copy_from_user(&value, arg, sizeof(value)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002626 return -EFAULT;
2627
2628 if (!value)
2629 return -EINVAL;
2630
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002631 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002632 if (event->attr.freq) {
2633 if (value > sysctl_perf_event_sample_rate) {
2634 ret = -EINVAL;
2635 goto unlock;
2636 }
2637
2638 event->attr.sample_freq = value;
2639 } else {
2640 event->attr.sample_period = value;
2641 event->hw.sample_period = value;
2642 }
2643unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002644 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002645
2646 return ret;
2647}
2648
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002649static const struct file_operations perf_fops;
2650
2651static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2652{
2653 struct file *file;
2654
2655 file = fget_light(fd, fput_needed);
2656 if (!file)
2657 return ERR_PTR(-EBADF);
2658
2659 if (file->f_op != &perf_fops) {
2660 fput_light(file, *fput_needed);
2661 *fput_needed = 0;
2662 return ERR_PTR(-EBADF);
2663 }
2664
2665 return file->private_data;
2666}
2667
2668static int perf_event_set_output(struct perf_event *event,
2669 struct perf_event *output_event);
Li Zefan6fb29152009-10-15 11:21:42 +08002670static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002671
2672static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2673{
2674 struct perf_event *event = file->private_data;
2675 void (*func)(struct perf_event *);
2676 u32 flags = arg;
2677
2678 switch (cmd) {
2679 case PERF_EVENT_IOC_ENABLE:
2680 func = perf_event_enable;
2681 break;
2682 case PERF_EVENT_IOC_DISABLE:
2683 func = perf_event_disable;
2684 break;
2685 case PERF_EVENT_IOC_RESET:
2686 func = perf_event_reset;
2687 break;
2688
2689 case PERF_EVENT_IOC_REFRESH:
2690 return perf_event_refresh(event, arg);
2691
2692 case PERF_EVENT_IOC_PERIOD:
2693 return perf_event_period(event, (u64 __user *)arg);
2694
2695 case PERF_EVENT_IOC_SET_OUTPUT:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002696 {
2697 struct perf_event *output_event = NULL;
2698 int fput_needed = 0;
2699 int ret;
2700
2701 if (arg != -1) {
2702 output_event = perf_fget_light(arg, &fput_needed);
2703 if (IS_ERR(output_event))
2704 return PTR_ERR(output_event);
2705 }
2706
2707 ret = perf_event_set_output(event, output_event);
2708 if (output_event)
2709 fput_light(output_event->filp, fput_needed);
2710
2711 return ret;
2712 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002713
Li Zefan6fb29152009-10-15 11:21:42 +08002714 case PERF_EVENT_IOC_SET_FILTER:
2715 return perf_event_set_filter(event, (void __user *)arg);
2716
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002717 default:
2718 return -ENOTTY;
2719 }
2720
2721 if (flags & PERF_IOC_FLAG_GROUP)
2722 perf_event_for_each(event, func);
2723 else
2724 perf_event_for_each_child(event, func);
2725
2726 return 0;
2727}
2728
2729int perf_event_task_enable(void)
2730{
2731 struct perf_event *event;
2732
2733 mutex_lock(&current->perf_event_mutex);
2734 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2735 perf_event_for_each_child(event, perf_event_enable);
2736 mutex_unlock(&current->perf_event_mutex);
2737
2738 return 0;
2739}
2740
2741int perf_event_task_disable(void)
2742{
2743 struct perf_event *event;
2744
2745 mutex_lock(&current->perf_event_mutex);
2746 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2747 perf_event_for_each_child(event, perf_event_disable);
2748 mutex_unlock(&current->perf_event_mutex);
2749
2750 return 0;
2751}
2752
2753#ifndef PERF_EVENT_INDEX_OFFSET
2754# define PERF_EVENT_INDEX_OFFSET 0
2755#endif
2756
2757static int perf_event_index(struct perf_event *event)
2758{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002759 if (event->hw.state & PERF_HES_STOPPED)
2760 return 0;
2761
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002762 if (event->state != PERF_EVENT_STATE_ACTIVE)
2763 return 0;
2764
2765 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2766}
2767
2768/*
2769 * Callers need to ensure there can be no nesting of this function, otherwise
2770 * the seqlock logic goes bad. We can not serialize this because the arch
2771 * code calls this from NMI context.
2772 */
2773void perf_event_update_userpage(struct perf_event *event)
2774{
2775 struct perf_event_mmap_page *userpg;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002776 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002777
2778 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002779 buffer = rcu_dereference(event->buffer);
2780 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002781 goto unlock;
2782
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002783 userpg = buffer->user_page;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002784
2785 /*
2786 * Disable preemption so as to not let the corresponding user-space
2787 * spin too long if we get preempted.
2788 */
2789 preempt_disable();
2790 ++userpg->lock;
2791 barrier();
2792 userpg->index = perf_event_index(event);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02002793 userpg->offset = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002794 if (event->state == PERF_EVENT_STATE_ACTIVE)
Peter Zijlstrae7850592010-05-21 14:43:08 +02002795 userpg->offset -= local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002796
2797 userpg->time_enabled = event->total_time_enabled +
2798 atomic64_read(&event->child_total_time_enabled);
2799
2800 userpg->time_running = event->total_time_running +
2801 atomic64_read(&event->child_total_time_running);
2802
2803 barrier();
2804 ++userpg->lock;
2805 preempt_enable();
2806unlock:
2807 rcu_read_unlock();
2808}
2809
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002810static unsigned long perf_data_size(struct perf_buffer *buffer);
2811
2812static void
2813perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2814{
2815 long max_size = perf_data_size(buffer);
2816
2817 if (watermark)
2818 buffer->watermark = min(max_size, watermark);
2819
2820 if (!buffer->watermark)
2821 buffer->watermark = max_size / 2;
2822
2823 if (flags & PERF_BUFFER_WRITABLE)
2824 buffer->writable = 1;
2825
2826 atomic_set(&buffer->refcount, 1);
2827}
2828
Peter Zijlstra906010b2009-09-21 16:08:49 +02002829#ifndef CONFIG_PERF_USE_VMALLOC
2830
2831/*
2832 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2833 */
2834
2835static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002836perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002837{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002838 if (pgoff > buffer->nr_pages)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002839 return NULL;
2840
2841 if (pgoff == 0)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002842 return virt_to_page(buffer->user_page);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002843
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002844 return virt_to_page(buffer->data_pages[pgoff - 1]);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002845}
2846
Peter Zijlstraa19d35c2010-05-17 18:48:00 +02002847static void *perf_mmap_alloc_page(int cpu)
2848{
2849 struct page *page;
2850 int node;
2851
2852 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2853 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2854 if (!page)
2855 return NULL;
2856
2857 return page_address(page);
2858}
2859
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002860static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002861perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002862{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002863 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002864 unsigned long size;
2865 int i;
2866
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002867 size = sizeof(struct perf_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002868 size += nr_pages * sizeof(void *);
2869
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002870 buffer = kzalloc(size, GFP_KERNEL);
2871 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002872 goto fail;
2873
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002874 buffer->user_page = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002875 if (!buffer->user_page)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002876 goto fail_user_page;
2877
2878 for (i = 0; i < nr_pages; i++) {
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002879 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002880 if (!buffer->data_pages[i])
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002881 goto fail_data_pages;
2882 }
2883
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002884 buffer->nr_pages = nr_pages;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002885
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002886 perf_buffer_init(buffer, watermark, flags);
2887
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002888 return buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002889
2890fail_data_pages:
2891 for (i--; i >= 0; i--)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002892 free_page((unsigned long)buffer->data_pages[i]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002893
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002894 free_page((unsigned long)buffer->user_page);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002895
2896fail_user_page:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002897 kfree(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002898
2899fail:
Peter Zijlstra906010b2009-09-21 16:08:49 +02002900 return NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002901}
2902
2903static void perf_mmap_free_page(unsigned long addr)
2904{
2905 struct page *page = virt_to_page((void *)addr);
2906
2907 page->mapping = NULL;
2908 __free_page(page);
2909}
2910
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002911static void perf_buffer_free(struct perf_buffer *buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002912{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002913 int i;
2914
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002915 perf_mmap_free_page((unsigned long)buffer->user_page);
2916 for (i = 0; i < buffer->nr_pages; i++)
2917 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2918 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002919}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002920
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002921static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002922{
2923 return 0;
2924}
2925
Peter Zijlstra906010b2009-09-21 16:08:49 +02002926#else
2927
2928/*
2929 * Back perf_mmap() with vmalloc memory.
2930 *
2931 * Required for architectures that have d-cache aliasing issues.
2932 */
2933
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002934static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002935{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002936 return buffer->page_order;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002937}
2938
Peter Zijlstra906010b2009-09-21 16:08:49 +02002939static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002940perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002941{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002942 if (pgoff > (1UL << page_order(buffer)))
Peter Zijlstra906010b2009-09-21 16:08:49 +02002943 return NULL;
2944
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002945 return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002946}
2947
2948static void perf_mmap_unmark_page(void *addr)
2949{
2950 struct page *page = vmalloc_to_page(addr);
2951
2952 page->mapping = NULL;
2953}
2954
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002955static void perf_buffer_free_work(struct work_struct *work)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002956{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002957 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002958 void *base;
2959 int i, nr;
2960
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002961 buffer = container_of(work, struct perf_buffer, work);
2962 nr = 1 << page_order(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002963
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002964 base = buffer->user_page;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002965 for (i = 0; i < nr + 1; i++)
2966 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2967
2968 vfree(base);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002969 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002970}
2971
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002972static void perf_buffer_free(struct perf_buffer *buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002973{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002974 schedule_work(&buffer->work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002975}
2976
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002977static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002978perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002979{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002980 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002981 unsigned long size;
2982 void *all_buf;
2983
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002984 size = sizeof(struct perf_buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002985 size += sizeof(void *);
2986
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002987 buffer = kzalloc(size, GFP_KERNEL);
2988 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002989 goto fail;
2990
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002991 INIT_WORK(&buffer->work, perf_buffer_free_work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002992
2993 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2994 if (!all_buf)
2995 goto fail_all_buf;
2996
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002997 buffer->user_page = all_buf;
2998 buffer->data_pages[0] = all_buf + PAGE_SIZE;
2999 buffer->page_order = ilog2(nr_pages);
3000 buffer->nr_pages = 1;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003001
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003002 perf_buffer_init(buffer, watermark, flags);
3003
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003004 return buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003005
3006fail_all_buf:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003007 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003008
3009fail:
3010 return NULL;
3011}
3012
3013#endif
3014
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003015static unsigned long perf_data_size(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003016{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003017 return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003018}
3019
Peter Zijlstra906010b2009-09-21 16:08:49 +02003020static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3021{
3022 struct perf_event *event = vma->vm_file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003023 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003024 int ret = VM_FAULT_SIGBUS;
3025
3026 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3027 if (vmf->pgoff == 0)
3028 ret = 0;
3029 return ret;
3030 }
3031
3032 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003033 buffer = rcu_dereference(event->buffer);
3034 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003035 goto unlock;
3036
3037 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3038 goto unlock;
3039
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003040 vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003041 if (!vmf->page)
3042 goto unlock;
3043
3044 get_page(vmf->page);
3045 vmf->page->mapping = vma->vm_file->f_mapping;
3046 vmf->page->index = vmf->pgoff;
3047
3048 ret = 0;
3049unlock:
3050 rcu_read_unlock();
3051
3052 return ret;
3053}
3054
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003055static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003056{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003057 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003058
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003059 buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
3060 perf_buffer_free(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003061}
3062
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003063static struct perf_buffer *perf_buffer_get(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003064{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003065 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003066
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003067 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003068 buffer = rcu_dereference(event->buffer);
3069 if (buffer) {
3070 if (!atomic_inc_not_zero(&buffer->refcount))
3071 buffer = NULL;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003072 }
3073 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003074
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003075 return buffer;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003076}
3077
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003078static void perf_buffer_put(struct perf_buffer *buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003079{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003080 if (!atomic_dec_and_test(&buffer->refcount))
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003081 return;
3082
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003083 call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003084}
3085
3086static void perf_mmap_open(struct vm_area_struct *vma)
3087{
3088 struct perf_event *event = vma->vm_file->private_data;
3089
3090 atomic_inc(&event->mmap_count);
3091}
3092
3093static void perf_mmap_close(struct vm_area_struct *vma)
3094{
3095 struct perf_event *event = vma->vm_file->private_data;
3096
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003097 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003098 unsigned long size = perf_data_size(event->buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003099 struct user_struct *user = event->mmap_user;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003100 struct perf_buffer *buffer = event->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003101
Peter Zijlstra906010b2009-09-21 16:08:49 +02003102 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003103 vma->vm_mm->locked_vm -= event->mmap_locked;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003104 rcu_assign_pointer(event->buffer, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003105 mutex_unlock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003106
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003107 perf_buffer_put(buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003108 free_uid(user);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003109 }
3110}
3111
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003112static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003113 .open = perf_mmap_open,
3114 .close = perf_mmap_close,
3115 .fault = perf_mmap_fault,
3116 .page_mkwrite = perf_mmap_fault,
3117};
3118
3119static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3120{
3121 struct perf_event *event = file->private_data;
3122 unsigned long user_locked, user_lock_limit;
3123 struct user_struct *user = current_user();
3124 unsigned long locked, lock_limit;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003125 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003126 unsigned long vma_size;
3127 unsigned long nr_pages;
3128 long user_extra, extra;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003129 int ret = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003130
Peter Zijlstrac7920612010-05-18 10:33:24 +02003131 /*
3132 * Don't allow mmap() of inherited per-task counters. This would
3133 * create a performance issue due to all children writing to the
3134 * same buffer.
3135 */
3136 if (event->cpu == -1 && event->attr.inherit)
3137 return -EINVAL;
3138
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003139 if (!(vma->vm_flags & VM_SHARED))
3140 return -EINVAL;
3141
3142 vma_size = vma->vm_end - vma->vm_start;
3143 nr_pages = (vma_size / PAGE_SIZE) - 1;
3144
3145 /*
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003146 * If we have buffer pages ensure they're a power-of-two number, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003147 * can do bitmasks instead of modulo.
3148 */
3149 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3150 return -EINVAL;
3151
3152 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3153 return -EINVAL;
3154
3155 if (vma->vm_pgoff != 0)
3156 return -EINVAL;
3157
3158 WARN_ON_ONCE(event->ctx->parent_ctx);
3159 mutex_lock(&event->mmap_mutex);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003160 if (event->buffer) {
3161 if (event->buffer->nr_pages == nr_pages)
3162 atomic_inc(&event->buffer->refcount);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003163 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003164 ret = -EINVAL;
3165 goto unlock;
3166 }
3167
3168 user_extra = nr_pages + 1;
3169 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3170
3171 /*
3172 * Increase the limit linearly with more CPUs:
3173 */
3174 user_lock_limit *= num_online_cpus();
3175
3176 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3177
3178 extra = 0;
3179 if (user_locked > user_lock_limit)
3180 extra = user_locked - user_lock_limit;
3181
Jiri Slaby78d7d402010-03-05 13:42:54 -08003182 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003183 lock_limit >>= PAGE_SHIFT;
3184 locked = vma->vm_mm->locked_vm + extra;
3185
3186 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3187 !capable(CAP_IPC_LOCK)) {
3188 ret = -EPERM;
3189 goto unlock;
3190 }
3191
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003192 WARN_ON(event->buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003193
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003194 if (vma->vm_flags & VM_WRITE)
3195 flags |= PERF_BUFFER_WRITABLE;
3196
3197 buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
3198 event->cpu, flags);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003199 if (!buffer) {
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003200 ret = -ENOMEM;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003201 goto unlock;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003202 }
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003203 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003204
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003205 atomic_long_add(user_extra, &user->locked_vm);
3206 event->mmap_locked = extra;
3207 event->mmap_user = get_current_user();
3208 vma->vm_mm->locked_vm += event->mmap_locked;
3209
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003210unlock:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003211 if (!ret)
3212 atomic_inc(&event->mmap_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003213 mutex_unlock(&event->mmap_mutex);
3214
3215 vma->vm_flags |= VM_RESERVED;
3216 vma->vm_ops = &perf_mmap_vmops;
3217
3218 return ret;
3219}
3220
3221static int perf_fasync(int fd, struct file *filp, int on)
3222{
3223 struct inode *inode = filp->f_path.dentry->d_inode;
3224 struct perf_event *event = filp->private_data;
3225 int retval;
3226
3227 mutex_lock(&inode->i_mutex);
3228 retval = fasync_helper(fd, filp, on, &event->fasync);
3229 mutex_unlock(&inode->i_mutex);
3230
3231 if (retval < 0)
3232 return retval;
3233
3234 return 0;
3235}
3236
3237static const struct file_operations perf_fops = {
Arnd Bergmann3326c1c2010-03-23 19:09:33 +01003238 .llseek = no_llseek,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003239 .release = perf_release,
3240 .read = perf_read,
3241 .poll = perf_poll,
3242 .unlocked_ioctl = perf_ioctl,
3243 .compat_ioctl = perf_ioctl,
3244 .mmap = perf_mmap,
3245 .fasync = perf_fasync,
3246};
3247
3248/*
3249 * Perf event wakeup
3250 *
3251 * If there's data, ensure we set the poll() state and publish everything
3252 * to user-space before waking everybody up.
3253 */
3254
3255void perf_event_wakeup(struct perf_event *event)
3256{
3257 wake_up_all(&event->waitq);
3258
3259 if (event->pending_kill) {
3260 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3261 event->pending_kill = 0;
3262 }
3263}
3264
Peter Zijlstrae360adb2010-10-14 14:01:34 +08003265static void perf_pending_event(struct irq_work *entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003266{
3267 struct perf_event *event = container_of(entry,
3268 struct perf_event, pending);
3269
3270 if (event->pending_disable) {
3271 event->pending_disable = 0;
3272 __perf_event_disable(event);
3273 }
3274
3275 if (event->pending_wakeup) {
3276 event->pending_wakeup = 0;
3277 perf_event_wakeup(event);
3278 }
3279}
3280
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003281/*
Zhang, Yanmin39447b32010-04-19 13:32:41 +08003282 * We assume there is only KVM supporting the callbacks.
3283 * Later on, we might change it to a list if there is
3284 * another virtualization implementation supporting the callbacks.
3285 */
3286struct perf_guest_info_callbacks *perf_guest_cbs;
3287
3288int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3289{
3290 perf_guest_cbs = cbs;
3291 return 0;
3292}
3293EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3294
3295int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3296{
3297 perf_guest_cbs = NULL;
3298 return 0;
3299}
3300EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3301
3302/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003303 * Output
3304 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003305static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003306 unsigned long offset, unsigned long head)
3307{
3308 unsigned long mask;
3309
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003310 if (!buffer->writable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003311 return true;
3312
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003313 mask = perf_data_size(buffer) - 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003314
3315 offset = (offset - tail) & mask;
3316 head = (head - tail) & mask;
3317
3318 if ((int)(head - offset) < 0)
3319 return false;
3320
3321 return true;
3322}
3323
3324static void perf_output_wakeup(struct perf_output_handle *handle)
3325{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003326 atomic_set(&handle->buffer->poll, POLL_IN);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003327
3328 if (handle->nmi) {
3329 handle->event->pending_wakeup = 1;
Peter Zijlstrae360adb2010-10-14 14:01:34 +08003330 irq_work_queue(&handle->event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003331 } else
3332 perf_event_wakeup(handle->event);
3333}
3334
3335/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003336 * We need to ensure a later event_id doesn't publish a head when a former
Peter Zijlstraef607772010-05-18 10:50:41 +02003337 * event isn't done writing. However since we need to deal with NMIs we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003338 * cannot fully serialize things.
3339 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003340 * We only publish the head (and generate a wakeup) when the outer-most
Peter Zijlstraef607772010-05-18 10:50:41 +02003341 * event completes.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003342 */
Peter Zijlstraef607772010-05-18 10:50:41 +02003343static void perf_output_get_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003344{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003345 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003346
Peter Zijlstraef607772010-05-18 10:50:41 +02003347 preempt_disable();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003348 local_inc(&buffer->nest);
3349 handle->wakeup = local_read(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003350}
3351
Peter Zijlstraef607772010-05-18 10:50:41 +02003352static void perf_output_put_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003353{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003354 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003355 unsigned long head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003356
3357again:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003358 head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003359
3360 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003361 * IRQ/NMI can happen here, which means we can miss a head update.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003362 */
3363
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003364 if (!local_dec_and_test(&buffer->nest))
Frederic Weisbeckeracd35a42010-05-20 21:28:34 +02003365 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003366
3367 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003368 * Publish the known good head. Rely on the full barrier implied
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003369 * by atomic_dec_and_test() order the buffer->head read and this
Peter Zijlstraef607772010-05-18 10:50:41 +02003370 * write.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003371 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003372 buffer->user_page->data_head = head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003373
Peter Zijlstraef607772010-05-18 10:50:41 +02003374 /*
3375 * Now check if we missed an update, rely on the (compiler)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003376 * barrier in atomic_dec_and_test() to re-read buffer->head.
Peter Zijlstraef607772010-05-18 10:50:41 +02003377 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003378 if (unlikely(head != local_read(&buffer->head))) {
3379 local_inc(&buffer->nest);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003380 goto again;
3381 }
3382
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003383 if (handle->wakeup != local_read(&buffer->wakeup))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003384 perf_output_wakeup(handle);
Peter Zijlstraef607772010-05-18 10:50:41 +02003385
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003386out:
Peter Zijlstraef607772010-05-18 10:50:41 +02003387 preempt_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003388}
3389
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003390__always_inline void perf_output_copy(struct perf_output_handle *handle,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003391 const void *buf, unsigned int len)
3392{
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003393 do {
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003394 unsigned long size = min_t(unsigned long, handle->size, len);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003395
3396 memcpy(handle->addr, buf, size);
3397
3398 len -= size;
3399 handle->addr += size;
Frederic Weisbecker74048f82010-05-27 21:34:58 +02003400 buf += size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003401 handle->size -= size;
3402 if (!handle->size) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003403 struct perf_buffer *buffer = handle->buffer;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003404
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003405 handle->page++;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003406 handle->page &= buffer->nr_pages - 1;
3407 handle->addr = buffer->data_pages[handle->page];
3408 handle->size = PAGE_SIZE << page_order(buffer);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003409 }
3410 } while (len);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003411}
3412
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003413static void __perf_event_header__init_id(struct perf_event_header *header,
3414 struct perf_sample_data *data,
3415 struct perf_event *event)
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02003416{
3417 u64 sample_type = event->attr.sample_type;
3418
3419 data->type = sample_type;
3420 header->size += event->id_header_size;
3421
3422 if (sample_type & PERF_SAMPLE_TID) {
3423 /* namespace issues */
3424 data->tid_entry.pid = perf_event_pid(event, current);
3425 data->tid_entry.tid = perf_event_tid(event, current);
3426 }
3427
3428 if (sample_type & PERF_SAMPLE_TIME)
3429 data->time = perf_clock();
3430
3431 if (sample_type & PERF_SAMPLE_ID)
3432 data->id = primary_event_id(event);
3433
3434 if (sample_type & PERF_SAMPLE_STREAM_ID)
3435 data->stream_id = event->id;
3436
3437 if (sample_type & PERF_SAMPLE_CPU) {
3438 data->cpu_entry.cpu = raw_smp_processor_id();
3439 data->cpu_entry.reserved = 0;
3440 }
3441}
3442
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003443static void perf_event_header__init_id(struct perf_event_header *header,
3444 struct perf_sample_data *data,
3445 struct perf_event *event)
3446{
3447 if (event->attr.sample_id_all)
3448 __perf_event_header__init_id(header, data, event);
3449}
3450
3451static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3452 struct perf_sample_data *data)
3453{
3454 u64 sample_type = data->type;
3455
3456 if (sample_type & PERF_SAMPLE_TID)
3457 perf_output_put(handle, data->tid_entry);
3458
3459 if (sample_type & PERF_SAMPLE_TIME)
3460 perf_output_put(handle, data->time);
3461
3462 if (sample_type & PERF_SAMPLE_ID)
3463 perf_output_put(handle, data->id);
3464
3465 if (sample_type & PERF_SAMPLE_STREAM_ID)
3466 perf_output_put(handle, data->stream_id);
3467
3468 if (sample_type & PERF_SAMPLE_CPU)
3469 perf_output_put(handle, data->cpu_entry);
3470}
3471
3472static void perf_event__output_id_sample(struct perf_event *event,
3473 struct perf_output_handle *handle,
3474 struct perf_sample_data *sample)
3475{
3476 if (event->attr.sample_id_all)
3477 __perf_event__output_id_sample(handle, sample);
3478}
3479
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003480int perf_output_begin(struct perf_output_handle *handle,
3481 struct perf_event *event, unsigned int size,
3482 int nmi, int sample)
3483{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003484 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003485 unsigned long tail, offset, head;
3486 int have_lost;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003487 struct perf_sample_data sample_data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003488 struct {
3489 struct perf_event_header header;
3490 u64 id;
3491 u64 lost;
3492 } lost_event;
3493
3494 rcu_read_lock();
3495 /*
3496 * For inherited events we send all the output towards the parent.
3497 */
3498 if (event->parent)
3499 event = event->parent;
3500
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003501 buffer = rcu_dereference(event->buffer);
3502 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003503 goto out;
3504
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003505 handle->buffer = buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003506 handle->event = event;
3507 handle->nmi = nmi;
3508 handle->sample = sample;
3509
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003510 if (!buffer->nr_pages)
Stephane Eranian00d1d0b2010-05-17 12:46:01 +02003511 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003512
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003513 have_lost = local_read(&buffer->lost);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003514 if (have_lost) {
3515 lost_event.header.size = sizeof(lost_event);
3516 perf_event_header__init_id(&lost_event.header, &sample_data,
3517 event);
3518 size += lost_event.header.size;
3519 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003520
Peter Zijlstraef607772010-05-18 10:50:41 +02003521 perf_output_get_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003522
3523 do {
3524 /*
3525 * Userspace could choose to issue a mb() before updating the
3526 * tail pointer. So that all reads will be completed before the
3527 * write is issued.
3528 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003529 tail = ACCESS_ONCE(buffer->user_page->data_tail);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003530 smp_rmb();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003531 offset = head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003532 head += size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003533 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003534 goto fail;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003535 } while (local_cmpxchg(&buffer->head, offset, head) != offset);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003536
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003537 if (head - local_read(&buffer->wakeup) > buffer->watermark)
3538 local_add(buffer->watermark, &buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003539
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003540 handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3541 handle->page &= buffer->nr_pages - 1;
3542 handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3543 handle->addr = buffer->data_pages[handle->page];
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003544 handle->addr += handle->size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003545 handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003546
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003547 if (have_lost) {
3548 lost_event.header.type = PERF_RECORD_LOST;
3549 lost_event.header.misc = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003550 lost_event.id = event->id;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003551 lost_event.lost = local_xchg(&buffer->lost, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003552
3553 perf_output_put(handle, lost_event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003554 perf_event__output_id_sample(event, handle, &sample_data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003555 }
3556
3557 return 0;
3558
3559fail:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003560 local_inc(&buffer->lost);
Peter Zijlstraef607772010-05-18 10:50:41 +02003561 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003562out:
3563 rcu_read_unlock();
3564
3565 return -ENOSPC;
3566}
3567
3568void perf_output_end(struct perf_output_handle *handle)
3569{
3570 struct perf_event *event = handle->event;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003571 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003572
3573 int wakeup_events = event->attr.wakeup_events;
3574
3575 if (handle->sample && wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003576 int events = local_inc_return(&buffer->events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003577 if (events >= wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003578 local_sub(wakeup_events, &buffer->events);
3579 local_inc(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003580 }
3581 }
3582
Peter Zijlstraef607772010-05-18 10:50:41 +02003583 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003584 rcu_read_unlock();
3585}
3586
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003587static void perf_output_read_one(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003588 struct perf_event *event,
3589 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003590{
3591 u64 read_format = event->attr.read_format;
3592 u64 values[4];
3593 int n = 0;
3594
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003595 values[n++] = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003596 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003597 values[n++] = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003598 atomic64_read(&event->child_total_time_enabled);
3599 }
3600 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003601 values[n++] = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003602 atomic64_read(&event->child_total_time_running);
3603 }
3604 if (read_format & PERF_FORMAT_ID)
3605 values[n++] = primary_event_id(event);
3606
3607 perf_output_copy(handle, values, n * sizeof(u64));
3608}
3609
3610/*
3611 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3612 */
3613static void perf_output_read_group(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003614 struct perf_event *event,
3615 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003616{
3617 struct perf_event *leader = event->group_leader, *sub;
3618 u64 read_format = event->attr.read_format;
3619 u64 values[5];
3620 int n = 0;
3621
3622 values[n++] = 1 + leader->nr_siblings;
3623
3624 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
Stephane Eranianeed01522010-10-26 16:08:01 +02003625 values[n++] = enabled;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003626
3627 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
Stephane Eranianeed01522010-10-26 16:08:01 +02003628 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003629
3630 if (leader != event)
3631 leader->pmu->read(leader);
3632
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003633 values[n++] = perf_event_count(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003634 if (read_format & PERF_FORMAT_ID)
3635 values[n++] = primary_event_id(leader);
3636
3637 perf_output_copy(handle, values, n * sizeof(u64));
3638
3639 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3640 n = 0;
3641
3642 if (sub != event)
3643 sub->pmu->read(sub);
3644
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003645 values[n++] = perf_event_count(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003646 if (read_format & PERF_FORMAT_ID)
3647 values[n++] = primary_event_id(sub);
3648
3649 perf_output_copy(handle, values, n * sizeof(u64));
3650 }
3651}
3652
Stephane Eranianeed01522010-10-26 16:08:01 +02003653#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3654 PERF_FORMAT_TOTAL_TIME_RUNNING)
3655
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003656static void perf_output_read(struct perf_output_handle *handle,
3657 struct perf_event *event)
3658{
Stephane Eranianeed01522010-10-26 16:08:01 +02003659 u64 enabled = 0, running = 0, now, ctx_time;
3660 u64 read_format = event->attr.read_format;
3661
3662 /*
3663 * compute total_time_enabled, total_time_running
3664 * based on snapshot values taken when the event
3665 * was last scheduled in.
3666 *
3667 * we cannot simply called update_context_time()
3668 * because of locking issue as we are called in
3669 * NMI context
3670 */
3671 if (read_format & PERF_FORMAT_TOTAL_TIMES) {
3672 now = perf_clock();
3673 ctx_time = event->shadow_ctx_time + now;
3674 enabled = ctx_time - event->tstamp_enabled;
3675 running = ctx_time - event->tstamp_running;
3676 }
3677
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003678 if (event->attr.read_format & PERF_FORMAT_GROUP)
Stephane Eranianeed01522010-10-26 16:08:01 +02003679 perf_output_read_group(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003680 else
Stephane Eranianeed01522010-10-26 16:08:01 +02003681 perf_output_read_one(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003682}
3683
3684void perf_output_sample(struct perf_output_handle *handle,
3685 struct perf_event_header *header,
3686 struct perf_sample_data *data,
3687 struct perf_event *event)
3688{
3689 u64 sample_type = data->type;
3690
3691 perf_output_put(handle, *header);
3692
3693 if (sample_type & PERF_SAMPLE_IP)
3694 perf_output_put(handle, data->ip);
3695
3696 if (sample_type & PERF_SAMPLE_TID)
3697 perf_output_put(handle, data->tid_entry);
3698
3699 if (sample_type & PERF_SAMPLE_TIME)
3700 perf_output_put(handle, data->time);
3701
3702 if (sample_type & PERF_SAMPLE_ADDR)
3703 perf_output_put(handle, data->addr);
3704
3705 if (sample_type & PERF_SAMPLE_ID)
3706 perf_output_put(handle, data->id);
3707
3708 if (sample_type & PERF_SAMPLE_STREAM_ID)
3709 perf_output_put(handle, data->stream_id);
3710
3711 if (sample_type & PERF_SAMPLE_CPU)
3712 perf_output_put(handle, data->cpu_entry);
3713
3714 if (sample_type & PERF_SAMPLE_PERIOD)
3715 perf_output_put(handle, data->period);
3716
3717 if (sample_type & PERF_SAMPLE_READ)
3718 perf_output_read(handle, event);
3719
3720 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3721 if (data->callchain) {
3722 int size = 1;
3723
3724 if (data->callchain)
3725 size += data->callchain->nr;
3726
3727 size *= sizeof(u64);
3728
3729 perf_output_copy(handle, data->callchain, size);
3730 } else {
3731 u64 nr = 0;
3732 perf_output_put(handle, nr);
3733 }
3734 }
3735
3736 if (sample_type & PERF_SAMPLE_RAW) {
3737 if (data->raw) {
3738 perf_output_put(handle, data->raw->size);
3739 perf_output_copy(handle, data->raw->data,
3740 data->raw->size);
3741 } else {
3742 struct {
3743 u32 size;
3744 u32 data;
3745 } raw = {
3746 .size = sizeof(u32),
3747 .data = 0,
3748 };
3749 perf_output_put(handle, raw);
3750 }
3751 }
3752}
3753
3754void perf_prepare_sample(struct perf_event_header *header,
3755 struct perf_sample_data *data,
3756 struct perf_event *event,
3757 struct pt_regs *regs)
3758{
3759 u64 sample_type = event->attr.sample_type;
3760
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003761 header->type = PERF_RECORD_SAMPLE;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003762 header->size = sizeof(*header) + event->header_size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003763
3764 header->misc = 0;
3765 header->misc |= perf_misc_flags(regs);
3766
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003767 __perf_event_header__init_id(header, data, event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02003768
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003769 if (sample_type & PERF_SAMPLE_IP)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003770 data->ip = perf_instruction_pointer(regs);
3771
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003772 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3773 int size = 1;
3774
3775 data->callchain = perf_callchain(regs);
3776
3777 if (data->callchain)
3778 size += data->callchain->nr;
3779
3780 header->size += size * sizeof(u64);
3781 }
3782
3783 if (sample_type & PERF_SAMPLE_RAW) {
3784 int size = sizeof(u32);
3785
3786 if (data->raw)
3787 size += data->raw->size;
3788 else
3789 size += sizeof(u32);
3790
3791 WARN_ON_ONCE(size & (sizeof(u64)-1));
3792 header->size += size;
3793 }
3794}
3795
3796static void perf_event_output(struct perf_event *event, int nmi,
3797 struct perf_sample_data *data,
3798 struct pt_regs *regs)
3799{
3800 struct perf_output_handle handle;
3801 struct perf_event_header header;
3802
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003803 /* protect the callchain buffers */
3804 rcu_read_lock();
3805
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003806 perf_prepare_sample(&header, data, event, regs);
3807
3808 if (perf_output_begin(&handle, event, header.size, nmi, 1))
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003809 goto exit;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003810
3811 perf_output_sample(&handle, &header, data, event);
3812
3813 perf_output_end(&handle);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003814
3815exit:
3816 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003817}
3818
3819/*
3820 * read event_id
3821 */
3822
3823struct perf_read_event {
3824 struct perf_event_header header;
3825
3826 u32 pid;
3827 u32 tid;
3828};
3829
3830static void
3831perf_event_read_event(struct perf_event *event,
3832 struct task_struct *task)
3833{
3834 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003835 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003836 struct perf_read_event read_event = {
3837 .header = {
3838 .type = PERF_RECORD_READ,
3839 .misc = 0,
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003840 .size = sizeof(read_event) + event->read_size,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003841 },
3842 .pid = perf_event_pid(event, task),
3843 .tid = perf_event_tid(event, task),
3844 };
3845 int ret;
3846
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003847 perf_event_header__init_id(&read_event.header, &sample, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003848 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3849 if (ret)
3850 return;
3851
3852 perf_output_put(&handle, read_event);
3853 perf_output_read(&handle, event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003854 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003855
3856 perf_output_end(&handle);
3857}
3858
3859/*
3860 * task tracking -- fork/exit
3861 *
Eric B Munson3af9e852010-05-18 15:30:49 +01003862 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003863 */
3864
3865struct perf_task_event {
3866 struct task_struct *task;
3867 struct perf_event_context *task_ctx;
3868
3869 struct {
3870 struct perf_event_header header;
3871
3872 u32 pid;
3873 u32 ppid;
3874 u32 tid;
3875 u32 ptid;
3876 u64 time;
3877 } event_id;
3878};
3879
3880static void perf_event_task_output(struct perf_event *event,
3881 struct perf_task_event *task_event)
3882{
3883 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003884 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003885 struct task_struct *task = task_event->task;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003886 int ret, size = task_event->event_id.header.size;
Mike Galbraith8bb39f92010-03-26 11:11:33 +01003887
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003888 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003889
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003890 ret = perf_output_begin(&handle, event,
3891 task_event->event_id.header.size, 0, 0);
Peter Zijlstraef607772010-05-18 10:50:41 +02003892 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003893 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003894
3895 task_event->event_id.pid = perf_event_pid(event, task);
3896 task_event->event_id.ppid = perf_event_pid(event, current);
3897
3898 task_event->event_id.tid = perf_event_tid(event, task);
3899 task_event->event_id.ptid = perf_event_tid(event, current);
3900
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003901 perf_output_put(&handle, task_event->event_id);
3902
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003903 perf_event__output_id_sample(event, &handle, &sample);
3904
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003905 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003906out:
3907 task_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003908}
3909
3910static int perf_event_task_match(struct perf_event *event)
3911{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003912 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003913 return 0;
3914
Stephane Eranian5632ab12011-01-03 18:20:01 +02003915 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003916 return 0;
3917
Eric B Munson3af9e852010-05-18 15:30:49 +01003918 if (event->attr.comm || event->attr.mmap ||
3919 event->attr.mmap_data || event->attr.task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003920 return 1;
3921
3922 return 0;
3923}
3924
3925static void perf_event_task_ctx(struct perf_event_context *ctx,
3926 struct perf_task_event *task_event)
3927{
3928 struct perf_event *event;
3929
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003930 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3931 if (perf_event_task_match(event))
3932 perf_event_task_output(event, task_event);
3933 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003934}
3935
3936static void perf_event_task_event(struct perf_task_event *task_event)
3937{
3938 struct perf_cpu_context *cpuctx;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003939 struct perf_event_context *ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003940 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003941 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003942
Peter Zijlstrad6ff86c2009-11-20 22:19:46 +01003943 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003944 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02003945 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01003946 if (cpuctx->active_pmu != pmu)
3947 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003948 perf_event_task_ctx(&cpuctx->ctx, task_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003949
3950 ctx = task_event->task_ctx;
3951 if (!ctx) {
3952 ctxn = pmu->task_ctx_nr;
3953 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02003954 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003955 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3956 }
3957 if (ctx)
3958 perf_event_task_ctx(ctx, task_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02003959next:
3960 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003961 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003962 rcu_read_unlock();
3963}
3964
3965static void perf_event_task(struct task_struct *task,
3966 struct perf_event_context *task_ctx,
3967 int new)
3968{
3969 struct perf_task_event task_event;
3970
3971 if (!atomic_read(&nr_comm_events) &&
3972 !atomic_read(&nr_mmap_events) &&
3973 !atomic_read(&nr_task_events))
3974 return;
3975
3976 task_event = (struct perf_task_event){
3977 .task = task,
3978 .task_ctx = task_ctx,
3979 .event_id = {
3980 .header = {
3981 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3982 .misc = 0,
3983 .size = sizeof(task_event.event_id),
3984 },
3985 /* .pid */
3986 /* .ppid */
3987 /* .tid */
3988 /* .ptid */
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003989 .time = perf_clock(),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003990 },
3991 };
3992
3993 perf_event_task_event(&task_event);
3994}
3995
3996void perf_event_fork(struct task_struct *task)
3997{
3998 perf_event_task(task, NULL, 1);
3999}
4000
4001/*
4002 * comm tracking
4003 */
4004
4005struct perf_comm_event {
4006 struct task_struct *task;
4007 char *comm;
4008 int comm_size;
4009
4010 struct {
4011 struct perf_event_header header;
4012
4013 u32 pid;
4014 u32 tid;
4015 } event_id;
4016};
4017
4018static void perf_event_comm_output(struct perf_event *event,
4019 struct perf_comm_event *comm_event)
4020{
4021 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004022 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004023 int size = comm_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004024 int ret;
4025
4026 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4027 ret = perf_output_begin(&handle, event,
4028 comm_event->event_id.header.size, 0, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004029
4030 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004031 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004032
4033 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4034 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4035
4036 perf_output_put(&handle, comm_event->event_id);
4037 perf_output_copy(&handle, comm_event->comm,
4038 comm_event->comm_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004039
4040 perf_event__output_id_sample(event, &handle, &sample);
4041
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004042 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004043out:
4044 comm_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004045}
4046
4047static int perf_event_comm_match(struct perf_event *event)
4048{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004049 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004050 return 0;
4051
Stephane Eranian5632ab12011-01-03 18:20:01 +02004052 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004053 return 0;
4054
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004055 if (event->attr.comm)
4056 return 1;
4057
4058 return 0;
4059}
4060
4061static void perf_event_comm_ctx(struct perf_event_context *ctx,
4062 struct perf_comm_event *comm_event)
4063{
4064 struct perf_event *event;
4065
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004066 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4067 if (perf_event_comm_match(event))
4068 perf_event_comm_output(event, comm_event);
4069 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004070}
4071
4072static void perf_event_comm_event(struct perf_comm_event *comm_event)
4073{
4074 struct perf_cpu_context *cpuctx;
4075 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004076 char comm[TASK_COMM_LEN];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004077 unsigned int size;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004078 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004079 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004080
4081 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01004082 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004083 size = ALIGN(strlen(comm)+1, sizeof(u64));
4084
4085 comm_event->comm = comm;
4086 comm_event->comm_size = size;
4087
4088 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
Peter Zijlstraf6595f32009-11-20 22:19:47 +01004089 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004090 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004091 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004092 if (cpuctx->active_pmu != pmu)
4093 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004094 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004095
4096 ctxn = pmu->task_ctx_nr;
4097 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004098 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004099
4100 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4101 if (ctx)
4102 perf_event_comm_ctx(ctx, comm_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02004103next:
4104 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004105 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004106 rcu_read_unlock();
4107}
4108
4109void perf_event_comm(struct task_struct *task)
4110{
4111 struct perf_comm_event comm_event;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004112 struct perf_event_context *ctx;
4113 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004114
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004115 for_each_task_context_nr(ctxn) {
4116 ctx = task->perf_event_ctxp[ctxn];
4117 if (!ctx)
4118 continue;
4119
4120 perf_event_enable_on_exec(ctx);
4121 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004122
4123 if (!atomic_read(&nr_comm_events))
4124 return;
4125
4126 comm_event = (struct perf_comm_event){
4127 .task = task,
4128 /* .comm */
4129 /* .comm_size */
4130 .event_id = {
4131 .header = {
4132 .type = PERF_RECORD_COMM,
4133 .misc = 0,
4134 /* .size */
4135 },
4136 /* .pid */
4137 /* .tid */
4138 },
4139 };
4140
4141 perf_event_comm_event(&comm_event);
4142}
4143
4144/*
4145 * mmap tracking
4146 */
4147
4148struct perf_mmap_event {
4149 struct vm_area_struct *vma;
4150
4151 const char *file_name;
4152 int file_size;
4153
4154 struct {
4155 struct perf_event_header header;
4156
4157 u32 pid;
4158 u32 tid;
4159 u64 start;
4160 u64 len;
4161 u64 pgoff;
4162 } event_id;
4163};
4164
4165static void perf_event_mmap_output(struct perf_event *event,
4166 struct perf_mmap_event *mmap_event)
4167{
4168 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004169 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004170 int size = mmap_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004171 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004172
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004173 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4174 ret = perf_output_begin(&handle, event,
4175 mmap_event->event_id.header.size, 0, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004176 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004177 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004178
4179 mmap_event->event_id.pid = perf_event_pid(event, current);
4180 mmap_event->event_id.tid = perf_event_tid(event, current);
4181
4182 perf_output_put(&handle, mmap_event->event_id);
4183 perf_output_copy(&handle, mmap_event->file_name,
4184 mmap_event->file_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004185
4186 perf_event__output_id_sample(event, &handle, &sample);
4187
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004188 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004189out:
4190 mmap_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004191}
4192
4193static int perf_event_mmap_match(struct perf_event *event,
Eric B Munson3af9e852010-05-18 15:30:49 +01004194 struct perf_mmap_event *mmap_event,
4195 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004196{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004197 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004198 return 0;
4199
Stephane Eranian5632ab12011-01-03 18:20:01 +02004200 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004201 return 0;
4202
Eric B Munson3af9e852010-05-18 15:30:49 +01004203 if ((!executable && event->attr.mmap_data) ||
4204 (executable && event->attr.mmap))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004205 return 1;
4206
4207 return 0;
4208}
4209
4210static void perf_event_mmap_ctx(struct perf_event_context *ctx,
Eric B Munson3af9e852010-05-18 15:30:49 +01004211 struct perf_mmap_event *mmap_event,
4212 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004213{
4214 struct perf_event *event;
4215
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004216 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Eric B Munson3af9e852010-05-18 15:30:49 +01004217 if (perf_event_mmap_match(event, mmap_event, executable))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004218 perf_event_mmap_output(event, mmap_event);
4219 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004220}
4221
4222static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4223{
4224 struct perf_cpu_context *cpuctx;
4225 struct perf_event_context *ctx;
4226 struct vm_area_struct *vma = mmap_event->vma;
4227 struct file *file = vma->vm_file;
4228 unsigned int size;
4229 char tmp[16];
4230 char *buf = NULL;
4231 const char *name;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004232 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004233 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004234
4235 memset(tmp, 0, sizeof(tmp));
4236
4237 if (file) {
4238 /*
4239 * d_path works from the end of the buffer backwards, so we
4240 * need to add enough zero bytes after the string to handle
4241 * the 64bit alignment we do later.
4242 */
4243 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4244 if (!buf) {
4245 name = strncpy(tmp, "//enomem", sizeof(tmp));
4246 goto got_name;
4247 }
4248 name = d_path(&file->f_path, buf, PATH_MAX);
4249 if (IS_ERR(name)) {
4250 name = strncpy(tmp, "//toolong", sizeof(tmp));
4251 goto got_name;
4252 }
4253 } else {
4254 if (arch_vma_name(mmap_event->vma)) {
4255 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4256 sizeof(tmp));
4257 goto got_name;
4258 }
4259
4260 if (!vma->vm_mm) {
4261 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4262 goto got_name;
Eric B Munson3af9e852010-05-18 15:30:49 +01004263 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4264 vma->vm_end >= vma->vm_mm->brk) {
4265 name = strncpy(tmp, "[heap]", sizeof(tmp));
4266 goto got_name;
4267 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4268 vma->vm_end >= vma->vm_mm->start_stack) {
4269 name = strncpy(tmp, "[stack]", sizeof(tmp));
4270 goto got_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004271 }
4272
4273 name = strncpy(tmp, "//anon", sizeof(tmp));
4274 goto got_name;
4275 }
4276
4277got_name:
4278 size = ALIGN(strlen(name)+1, sizeof(u64));
4279
4280 mmap_event->file_name = name;
4281 mmap_event->file_size = size;
4282
4283 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4284
Peter Zijlstraf6d9dd22009-11-20 22:19:48 +01004285 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004286 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004287 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004288 if (cpuctx->active_pmu != pmu)
4289 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004290 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4291 vma->vm_flags & VM_EXEC);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004292
4293 ctxn = pmu->task_ctx_nr;
4294 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004295 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004296
4297 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4298 if (ctx) {
4299 perf_event_mmap_ctx(ctx, mmap_event,
4300 vma->vm_flags & VM_EXEC);
4301 }
Peter Zijlstra41945f62010-09-16 19:17:24 +02004302next:
4303 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004304 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004305 rcu_read_unlock();
4306
4307 kfree(buf);
4308}
4309
Eric B Munson3af9e852010-05-18 15:30:49 +01004310void perf_event_mmap(struct vm_area_struct *vma)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004311{
4312 struct perf_mmap_event mmap_event;
4313
4314 if (!atomic_read(&nr_mmap_events))
4315 return;
4316
4317 mmap_event = (struct perf_mmap_event){
4318 .vma = vma,
4319 /* .file_name */
4320 /* .file_size */
4321 .event_id = {
4322 .header = {
4323 .type = PERF_RECORD_MMAP,
Zhang, Yanmin39447b32010-04-19 13:32:41 +08004324 .misc = PERF_RECORD_MISC_USER,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004325 /* .size */
4326 },
4327 /* .pid */
4328 /* .tid */
4329 .start = vma->vm_start,
4330 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01004331 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004332 },
4333 };
4334
4335 perf_event_mmap_event(&mmap_event);
4336}
4337
4338/*
4339 * IRQ throttle logging
4340 */
4341
4342static void perf_log_throttle(struct perf_event *event, int enable)
4343{
4344 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004345 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004346 int ret;
4347
4348 struct {
4349 struct perf_event_header header;
4350 u64 time;
4351 u64 id;
4352 u64 stream_id;
4353 } throttle_event = {
4354 .header = {
4355 .type = PERF_RECORD_THROTTLE,
4356 .misc = 0,
4357 .size = sizeof(throttle_event),
4358 },
4359 .time = perf_clock(),
4360 .id = primary_event_id(event),
4361 .stream_id = event->id,
4362 };
4363
4364 if (enable)
4365 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4366
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004367 perf_event_header__init_id(&throttle_event.header, &sample, event);
4368
4369 ret = perf_output_begin(&handle, event,
4370 throttle_event.header.size, 1, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004371 if (ret)
4372 return;
4373
4374 perf_output_put(&handle, throttle_event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004375 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004376 perf_output_end(&handle);
4377}
4378
4379/*
4380 * Generic event overflow handling, sampling.
4381 */
4382
4383static int __perf_event_overflow(struct perf_event *event, int nmi,
4384 int throttle, struct perf_sample_data *data,
4385 struct pt_regs *regs)
4386{
4387 int events = atomic_read(&event->event_limit);
4388 struct hw_perf_event *hwc = &event->hw;
4389 int ret = 0;
4390
Peter Zijlstra96398822010-11-24 18:55:29 +01004391 /*
4392 * Non-sampling counters might still use the PMI to fold short
4393 * hardware counters, ignore those.
4394 */
4395 if (unlikely(!is_sampling_event(event)))
4396 return 0;
4397
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004398 if (!throttle) {
4399 hwc->interrupts++;
4400 } else {
4401 if (hwc->interrupts != MAX_INTERRUPTS) {
4402 hwc->interrupts++;
4403 if (HZ * hwc->interrupts >
4404 (u64)sysctl_perf_event_sample_rate) {
4405 hwc->interrupts = MAX_INTERRUPTS;
4406 perf_log_throttle(event, 0);
4407 ret = 1;
4408 }
4409 } else {
4410 /*
4411 * Keep re-disabling events even though on the previous
4412 * pass we disabled it - just in case we raced with a
4413 * sched-in and the event got enabled again:
4414 */
4415 ret = 1;
4416 }
4417 }
4418
4419 if (event->attr.freq) {
4420 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01004421 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004422
Peter Zijlstraabd50712010-01-26 18:50:16 +01004423 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004424
Peter Zijlstraabd50712010-01-26 18:50:16 +01004425 if (delta > 0 && delta < 2*TICK_NSEC)
4426 perf_adjust_period(event, delta, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004427 }
4428
4429 /*
4430 * XXX event_limit might not quite work as expected on inherited
4431 * events
4432 */
4433
4434 event->pending_kill = POLL_IN;
4435 if (events && atomic_dec_and_test(&event->event_limit)) {
4436 ret = 1;
4437 event->pending_kill = POLL_HUP;
4438 if (nmi) {
4439 event->pending_disable = 1;
Peter Zijlstrae360adb2010-10-14 14:01:34 +08004440 irq_work_queue(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004441 } else
4442 perf_event_disable(event);
4443 }
4444
Peter Zijlstra453f19e2009-11-20 22:19:43 +01004445 if (event->overflow_handler)
4446 event->overflow_handler(event, nmi, data, regs);
4447 else
4448 perf_event_output(event, nmi, data, regs);
4449
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004450 return ret;
4451}
4452
4453int perf_event_overflow(struct perf_event *event, int nmi,
4454 struct perf_sample_data *data,
4455 struct pt_regs *regs)
4456{
4457 return __perf_event_overflow(event, nmi, 1, data, regs);
4458}
4459
4460/*
4461 * Generic software event infrastructure
4462 */
4463
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004464struct swevent_htable {
4465 struct swevent_hlist *swevent_hlist;
4466 struct mutex hlist_mutex;
4467 int hlist_refcount;
4468
4469 /* Recursion avoidance in each contexts */
4470 int recursion[PERF_NR_CONTEXTS];
4471};
4472
4473static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4474
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004475/*
4476 * We directly increment event->count and keep a second value in
4477 * event->hw.period_left to count intervals. This period event
4478 * is kept in the range [-sample_period, 0] so that we can use the
4479 * sign as trigger.
4480 */
4481
4482static u64 perf_swevent_set_period(struct perf_event *event)
4483{
4484 struct hw_perf_event *hwc = &event->hw;
4485 u64 period = hwc->last_period;
4486 u64 nr, offset;
4487 s64 old, val;
4488
4489 hwc->last_period = hwc->sample_period;
4490
4491again:
Peter Zijlstrae7850592010-05-21 14:43:08 +02004492 old = val = local64_read(&hwc->period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004493 if (val < 0)
4494 return 0;
4495
4496 nr = div64_u64(period + val, period);
4497 offset = nr * period;
4498 val -= offset;
Peter Zijlstrae7850592010-05-21 14:43:08 +02004499 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004500 goto again;
4501
4502 return nr;
4503}
4504
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004505static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004506 int nmi, struct perf_sample_data *data,
4507 struct pt_regs *regs)
4508{
4509 struct hw_perf_event *hwc = &event->hw;
4510 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004511
4512 data->period = event->hw.last_period;
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004513 if (!overflow)
4514 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004515
4516 if (hwc->interrupts == MAX_INTERRUPTS)
4517 return;
4518
4519 for (; overflow; overflow--) {
4520 if (__perf_event_overflow(event, nmi, throttle,
4521 data, regs)) {
4522 /*
4523 * We inhibit the overflow from happening when
4524 * hwc->interrupts == MAX_INTERRUPTS.
4525 */
4526 break;
4527 }
4528 throttle = 1;
4529 }
4530}
4531
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004532static void perf_swevent_event(struct perf_event *event, u64 nr,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004533 int nmi, struct perf_sample_data *data,
4534 struct pt_regs *regs)
4535{
4536 struct hw_perf_event *hwc = &event->hw;
4537
Peter Zijlstrae7850592010-05-21 14:43:08 +02004538 local64_add(nr, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004539
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004540 if (!regs)
4541 return;
4542
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01004543 if (!is_sampling_event(event))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004544 return;
4545
4546 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4547 return perf_swevent_overflow(event, 1, nmi, data, regs);
4548
Peter Zijlstrae7850592010-05-21 14:43:08 +02004549 if (local64_add_negative(nr, &hwc->period_left))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004550 return;
4551
4552 perf_swevent_overflow(event, 0, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004553}
4554
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004555static int perf_exclude_event(struct perf_event *event,
4556 struct pt_regs *regs)
4557{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004558 if (event->hw.state & PERF_HES_STOPPED)
4559 return 0;
4560
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004561 if (regs) {
4562 if (event->attr.exclude_user && user_mode(regs))
4563 return 1;
4564
4565 if (event->attr.exclude_kernel && !user_mode(regs))
4566 return 1;
4567 }
4568
4569 return 0;
4570}
4571
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004572static int perf_swevent_match(struct perf_event *event,
4573 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08004574 u32 event_id,
4575 struct perf_sample_data *data,
4576 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004577{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004578 if (event->attr.type != type)
4579 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004580
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004581 if (event->attr.config != event_id)
4582 return 0;
4583
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004584 if (perf_exclude_event(event, regs))
4585 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004586
4587 return 1;
4588}
4589
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004590static inline u64 swevent_hash(u64 type, u32 event_id)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004591{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004592 u64 val = event_id | (type << 32);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004593
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004594 return hash_64(val, SWEVENT_HLIST_BITS);
4595}
4596
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004597static inline struct hlist_head *
4598__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004599{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004600 u64 hash = swevent_hash(type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004601
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004602 return &hlist->heads[hash];
4603}
4604
4605/* For the read side: events when they trigger */
4606static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004607find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004608{
4609 struct swevent_hlist *hlist;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004610
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004611 hlist = rcu_dereference(swhash->swevent_hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004612 if (!hlist)
4613 return NULL;
4614
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004615 return __find_swevent_head(hlist, type, event_id);
4616}
4617
4618/* For the event head insertion and removal in the hlist */
4619static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004620find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004621{
4622 struct swevent_hlist *hlist;
4623 u32 event_id = event->attr.config;
4624 u64 type = event->attr.type;
4625
4626 /*
4627 * Event scheduling is always serialized against hlist allocation
4628 * and release. Which makes the protected version suitable here.
4629 * The context lock guarantees that.
4630 */
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004631 hlist = rcu_dereference_protected(swhash->swevent_hlist,
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004632 lockdep_is_held(&event->ctx->lock));
4633 if (!hlist)
4634 return NULL;
4635
4636 return __find_swevent_head(hlist, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004637}
4638
4639static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4640 u64 nr, int nmi,
4641 struct perf_sample_data *data,
4642 struct pt_regs *regs)
4643{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004644 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004645 struct perf_event *event;
4646 struct hlist_node *node;
4647 struct hlist_head *head;
4648
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004649 rcu_read_lock();
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004650 head = find_swevent_head_rcu(swhash, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004651 if (!head)
4652 goto end;
4653
4654 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08004655 if (perf_swevent_match(event, type, event_id, data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004656 perf_swevent_event(event, nr, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004657 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004658end:
4659 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004660}
4661
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004662int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004663{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004664 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004665
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004666 return get_recursion_context(swhash->recursion);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004667}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004668EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004669
Jesper Juhlfa9f90b2010-11-28 21:39:34 +01004670inline void perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004671{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004672 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02004673
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004674 put_recursion_context(swhash->recursion, rctx);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004675}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004676
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004677void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4678 struct pt_regs *regs, u64 addr)
4679{
Ingo Molnara4234bf2009-11-23 10:57:59 +01004680 struct perf_sample_data data;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004681 int rctx;
4682
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004683 preempt_disable_notrace();
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004684 rctx = perf_swevent_get_recursion_context();
4685 if (rctx < 0)
4686 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004687
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004688 perf_sample_data_init(&data, addr);
Ingo Molnara4234bf2009-11-23 10:57:59 +01004689
4690 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004691
4692 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004693 preempt_enable_notrace();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004694}
4695
4696static void perf_swevent_read(struct perf_event *event)
4697{
4698}
4699
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004700static int perf_swevent_add(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004701{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004702 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004703 struct hw_perf_event *hwc = &event->hw;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004704 struct hlist_head *head;
4705
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01004706 if (is_sampling_event(event)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004707 hwc->last_period = hwc->sample_period;
4708 perf_swevent_set_period(event);
4709 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004710
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004711 hwc->state = !(flags & PERF_EF_START);
4712
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004713 head = find_swevent_head(swhash, event);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004714 if (WARN_ON_ONCE(!head))
4715 return -EINVAL;
4716
4717 hlist_add_head_rcu(&event->hlist_entry, head);
4718
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004719 return 0;
4720}
4721
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004722static void perf_swevent_del(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004723{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004724 hlist_del_rcu(&event->hlist_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004725}
4726
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004727static void perf_swevent_start(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004728{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004729 event->hw.state = 0;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004730}
4731
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004732static void perf_swevent_stop(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004733{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004734 event->hw.state = PERF_HES_STOPPED;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004735}
4736
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004737/* Deref the hlist from the update side */
4738static inline struct swevent_hlist *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004739swevent_hlist_deref(struct swevent_htable *swhash)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004740{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004741 return rcu_dereference_protected(swhash->swevent_hlist,
4742 lockdep_is_held(&swhash->hlist_mutex));
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004743}
4744
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004745static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4746{
4747 struct swevent_hlist *hlist;
4748
4749 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4750 kfree(hlist);
4751}
4752
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004753static void swevent_hlist_release(struct swevent_htable *swhash)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004754{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004755 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004756
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004757 if (!hlist)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004758 return;
4759
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004760 rcu_assign_pointer(swhash->swevent_hlist, NULL);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004761 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4762}
4763
4764static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4765{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004766 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004767
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004768 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004769
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004770 if (!--swhash->hlist_refcount)
4771 swevent_hlist_release(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004772
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004773 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004774}
4775
4776static void swevent_hlist_put(struct perf_event *event)
4777{
4778 int cpu;
4779
4780 if (event->cpu != -1) {
4781 swevent_hlist_put_cpu(event, event->cpu);
4782 return;
4783 }
4784
4785 for_each_possible_cpu(cpu)
4786 swevent_hlist_put_cpu(event, cpu);
4787}
4788
4789static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4790{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004791 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004792 int err = 0;
4793
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004794 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004795
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004796 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004797 struct swevent_hlist *hlist;
4798
4799 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4800 if (!hlist) {
4801 err = -ENOMEM;
4802 goto exit;
4803 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004804 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004805 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004806 swhash->hlist_refcount++;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004807exit:
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004808 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004809
4810 return err;
4811}
4812
4813static int swevent_hlist_get(struct perf_event *event)
4814{
4815 int err;
4816 int cpu, failed_cpu;
4817
4818 if (event->cpu != -1)
4819 return swevent_hlist_get_cpu(event, event->cpu);
4820
4821 get_online_cpus();
4822 for_each_possible_cpu(cpu) {
4823 err = swevent_hlist_get_cpu(event, cpu);
4824 if (err) {
4825 failed_cpu = cpu;
4826 goto fail;
4827 }
4828 }
4829 put_online_cpus();
4830
4831 return 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004832fail:
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004833 for_each_possible_cpu(cpu) {
4834 if (cpu == failed_cpu)
4835 break;
4836 swevent_hlist_put_cpu(event, cpu);
4837 }
4838
4839 put_online_cpus();
4840 return err;
4841}
4842
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004843atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004844
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004845static void sw_perf_event_destroy(struct perf_event *event)
4846{
4847 u64 event_id = event->attr.config;
4848
4849 WARN_ON(event->parent);
4850
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02004851 jump_label_dec(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004852 swevent_hlist_put(event);
4853}
4854
4855static int perf_swevent_init(struct perf_event *event)
4856{
4857 int event_id = event->attr.config;
4858
4859 if (event->attr.type != PERF_TYPE_SOFTWARE)
4860 return -ENOENT;
4861
4862 switch (event_id) {
4863 case PERF_COUNT_SW_CPU_CLOCK:
4864 case PERF_COUNT_SW_TASK_CLOCK:
4865 return -ENOENT;
4866
4867 default:
4868 break;
4869 }
4870
Dan Carpenterce677832010-10-24 21:50:42 +02004871 if (event_id >= PERF_COUNT_SW_MAX)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004872 return -ENOENT;
4873
4874 if (!event->parent) {
4875 int err;
4876
4877 err = swevent_hlist_get(event);
4878 if (err)
4879 return err;
4880
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02004881 jump_label_inc(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004882 event->destroy = sw_perf_event_destroy;
4883 }
4884
4885 return 0;
4886}
4887
4888static struct pmu perf_swevent = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02004889 .task_ctx_nr = perf_sw_context,
4890
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004891 .event_init = perf_swevent_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004892 .add = perf_swevent_add,
4893 .del = perf_swevent_del,
4894 .start = perf_swevent_start,
4895 .stop = perf_swevent_stop,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004896 .read = perf_swevent_read,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004897};
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004898
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004899#ifdef CONFIG_EVENT_TRACING
4900
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004901static int perf_tp_filter_match(struct perf_event *event,
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004902 struct perf_sample_data *data)
4903{
4904 void *record = data->raw->data;
4905
4906 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4907 return 1;
4908 return 0;
4909}
4910
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004911static int perf_tp_event_match(struct perf_event *event,
4912 struct perf_sample_data *data,
4913 struct pt_regs *regs)
4914{
Peter Zijlstra580d6072010-05-20 20:54:31 +02004915 /*
4916 * All tracepoints are from kernel-space.
4917 */
4918 if (event->attr.exclude_kernel)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004919 return 0;
4920
4921 if (!perf_tp_filter_match(event, data))
4922 return 0;
4923
4924 return 1;
4925}
4926
4927void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004928 struct pt_regs *regs, struct hlist_head *head, int rctx)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004929{
4930 struct perf_sample_data data;
4931 struct perf_event *event;
4932 struct hlist_node *node;
4933
4934 struct perf_raw_record raw = {
4935 .size = entry_size,
4936 .data = record,
4937 };
4938
4939 perf_sample_data_init(&data, addr);
4940 data.raw = &raw;
4941
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004942 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4943 if (perf_tp_event_match(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004944 perf_swevent_event(event, count, 1, &data, regs);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004945 }
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004946
4947 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004948}
4949EXPORT_SYMBOL_GPL(perf_tp_event);
4950
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004951static void tp_perf_event_destroy(struct perf_event *event)
4952{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004953 perf_trace_destroy(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004954}
4955
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004956static int perf_tp_event_init(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004957{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004958 int err;
4959
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004960 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4961 return -ENOENT;
4962
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004963 err = perf_trace_init(event);
4964 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004965 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004966
4967 event->destroy = tp_perf_event_destroy;
4968
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004969 return 0;
4970}
4971
4972static struct pmu perf_tracepoint = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02004973 .task_ctx_nr = perf_sw_context,
4974
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004975 .event_init = perf_tp_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004976 .add = perf_trace_add,
4977 .del = perf_trace_del,
4978 .start = perf_swevent_start,
4979 .stop = perf_swevent_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004980 .read = perf_swevent_read,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004981};
4982
4983static inline void perf_tp_register(void)
4984{
Peter Zijlstra2e80a822010-11-17 23:17:36 +01004985 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004986}
Li Zefan6fb29152009-10-15 11:21:42 +08004987
4988static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4989{
4990 char *filter_str;
4991 int ret;
4992
4993 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4994 return -EINVAL;
4995
4996 filter_str = strndup_user(arg, PAGE_SIZE);
4997 if (IS_ERR(filter_str))
4998 return PTR_ERR(filter_str);
4999
5000 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5001
5002 kfree(filter_str);
5003 return ret;
5004}
5005
5006static void perf_event_free_filter(struct perf_event *event)
5007{
5008 ftrace_profile_free_filter(event);
5009}
5010
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005011#else
Li Zefan6fb29152009-10-15 11:21:42 +08005012
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005013static inline void perf_tp_register(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005014{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005015}
Li Zefan6fb29152009-10-15 11:21:42 +08005016
5017static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5018{
5019 return -ENOENT;
5020}
5021
5022static void perf_event_free_filter(struct perf_event *event)
5023{
5024}
5025
Li Zefan07b139c2009-12-21 14:27:35 +08005026#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005027
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005028#ifdef CONFIG_HAVE_HW_BREAKPOINT
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005029void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005030{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005031 struct perf_sample_data sample;
5032 struct pt_regs *regs = data;
5033
Peter Zijlstradc1d6282010-03-03 15:55:04 +01005034 perf_sample_data_init(&sample, bp->attr.bp_addr);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005035
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005036 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5037 perf_swevent_event(bp, 1, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005038}
5039#endif
5040
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005041/*
5042 * hrtimer based swevent callback
5043 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005044
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005045static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005046{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005047 enum hrtimer_restart ret = HRTIMER_RESTART;
5048 struct perf_sample_data data;
5049 struct pt_regs *regs;
5050 struct perf_event *event;
5051 u64 period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005052
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005053 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5054 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005055
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005056 perf_sample_data_init(&data, 0);
5057 data.period = event->hw.last_period;
5058 regs = get_irq_regs();
5059
5060 if (regs && !perf_exclude_event(event, regs)) {
5061 if (!(event->attr.exclude_idle && current->pid == 0))
5062 if (perf_event_overflow(event, 0, &data, regs))
5063 ret = HRTIMER_NORESTART;
5064 }
5065
5066 period = max_t(u64, 10000, event->hw.sample_period);
5067 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5068
5069 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005070}
5071
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005072static void perf_swevent_start_hrtimer(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005073{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005074 struct hw_perf_event *hwc = &event->hw;
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005075 s64 period;
5076
5077 if (!is_sampling_event(event))
5078 return;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005079
5080 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5081 hwc->hrtimer.function = perf_swevent_hrtimer;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005082
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005083 period = local64_read(&hwc->period_left);
5084 if (period) {
5085 if (period < 0)
5086 period = 10000;
Peter Zijlstrafa407f32010-06-24 12:35:12 +02005087
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005088 local64_set(&hwc->period_left, 0);
5089 } else {
5090 period = max_t(u64, 10000, hwc->sample_period);
5091 }
5092 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005093 ns_to_ktime(period), 0,
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02005094 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005095}
5096
5097static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5098{
5099 struct hw_perf_event *hwc = &event->hw;
5100
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01005101 if (is_sampling_event(event)) {
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005102 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
Peter Zijlstrafa407f32010-06-24 12:35:12 +02005103 local64_set(&hwc->period_left, ktime_to_ns(remaining));
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005104
5105 hrtimer_cancel(&hwc->hrtimer);
5106 }
5107}
5108
5109/*
5110 * Software event: cpu wall time clock
5111 */
5112
5113static void cpu_clock_event_update(struct perf_event *event)
5114{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005115 s64 prev;
5116 u64 now;
5117
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005118 now = local_clock();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005119 prev = local64_xchg(&event->hw.prev_count, now);
5120 local64_add(now - prev, &event->count);
5121}
5122
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005123static void cpu_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005124{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005125 local64_set(&event->hw.prev_count, local_clock());
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005126 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005127}
5128
5129static void cpu_clock_event_stop(struct perf_event *event, int flags)
5130{
5131 perf_swevent_cancel_hrtimer(event);
5132 cpu_clock_event_update(event);
5133}
5134
5135static int cpu_clock_event_add(struct perf_event *event, int flags)
5136{
5137 if (flags & PERF_EF_START)
5138 cpu_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005139
5140 return 0;
5141}
5142
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005143static void cpu_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005144{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005145 cpu_clock_event_stop(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005146}
5147
5148static void cpu_clock_event_read(struct perf_event *event)
5149{
5150 cpu_clock_event_update(event);
5151}
5152
5153static int cpu_clock_event_init(struct perf_event *event)
5154{
5155 if (event->attr.type != PERF_TYPE_SOFTWARE)
5156 return -ENOENT;
5157
5158 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5159 return -ENOENT;
5160
5161 return 0;
5162}
5163
5164static struct pmu perf_cpu_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005165 .task_ctx_nr = perf_sw_context,
5166
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005167 .event_init = cpu_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005168 .add = cpu_clock_event_add,
5169 .del = cpu_clock_event_del,
5170 .start = cpu_clock_event_start,
5171 .stop = cpu_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005172 .read = cpu_clock_event_read,
5173};
5174
5175/*
5176 * Software event: task time clock
5177 */
5178
5179static void task_clock_event_update(struct perf_event *event, u64 now)
5180{
5181 u64 prev;
5182 s64 delta;
5183
5184 prev = local64_xchg(&event->hw.prev_count, now);
5185 delta = now - prev;
5186 local64_add(delta, &event->count);
5187}
5188
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005189static void task_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005190{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005191 local64_set(&event->hw.prev_count, event->ctx->time);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005192 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005193}
5194
5195static void task_clock_event_stop(struct perf_event *event, int flags)
5196{
5197 perf_swevent_cancel_hrtimer(event);
5198 task_clock_event_update(event, event->ctx->time);
5199}
5200
5201static int task_clock_event_add(struct perf_event *event, int flags)
5202{
5203 if (flags & PERF_EF_START)
5204 task_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005205
5206 return 0;
5207}
5208
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005209static void task_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005210{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005211 task_clock_event_stop(event, PERF_EF_UPDATE);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005212}
5213
5214static void task_clock_event_read(struct perf_event *event)
5215{
5216 u64 time;
5217
5218 if (!in_nmi()) {
5219 update_context_time(event->ctx);
5220 time = event->ctx->time;
5221 } else {
5222 u64 now = perf_clock();
5223 u64 delta = now - event->ctx->timestamp;
5224 time = event->ctx->time + delta;
5225 }
5226
5227 task_clock_event_update(event, time);
5228}
5229
5230static int task_clock_event_init(struct perf_event *event)
5231{
5232 if (event->attr.type != PERF_TYPE_SOFTWARE)
5233 return -ENOENT;
5234
5235 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5236 return -ENOENT;
5237
5238 return 0;
5239}
5240
5241static struct pmu perf_task_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005242 .task_ctx_nr = perf_sw_context,
5243
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005244 .event_init = task_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005245 .add = task_clock_event_add,
5246 .del = task_clock_event_del,
5247 .start = task_clock_event_start,
5248 .stop = task_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005249 .read = task_clock_event_read,
5250};
5251
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005252static void perf_pmu_nop_void(struct pmu *pmu)
5253{
5254}
5255
5256static int perf_pmu_nop_int(struct pmu *pmu)
5257{
5258 return 0;
5259}
5260
5261static void perf_pmu_start_txn(struct pmu *pmu)
5262{
5263 perf_pmu_disable(pmu);
5264}
5265
5266static int perf_pmu_commit_txn(struct pmu *pmu)
5267{
5268 perf_pmu_enable(pmu);
5269 return 0;
5270}
5271
5272static void perf_pmu_cancel_txn(struct pmu *pmu)
5273{
5274 perf_pmu_enable(pmu);
5275}
5276
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005277/*
5278 * Ensures all contexts with the same task_ctx_nr have the same
5279 * pmu_cpu_context too.
5280 */
5281static void *find_pmu_context(int ctxn)
5282{
5283 struct pmu *pmu;
5284
5285 if (ctxn < 0)
5286 return NULL;
5287
5288 list_for_each_entry(pmu, &pmus, entry) {
5289 if (pmu->task_ctx_nr == ctxn)
5290 return pmu->pmu_cpu_context;
5291 }
5292
5293 return NULL;
5294}
5295
Peter Zijlstra51676952010-12-07 14:18:20 +01005296static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005297{
Peter Zijlstra51676952010-12-07 14:18:20 +01005298 int cpu;
5299
5300 for_each_possible_cpu(cpu) {
5301 struct perf_cpu_context *cpuctx;
5302
5303 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5304
5305 if (cpuctx->active_pmu == old_pmu)
5306 cpuctx->active_pmu = pmu;
5307 }
5308}
5309
5310static void free_pmu_context(struct pmu *pmu)
5311{
5312 struct pmu *i;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005313
5314 mutex_lock(&pmus_lock);
5315 /*
5316 * Like a real lame refcount.
5317 */
Peter Zijlstra51676952010-12-07 14:18:20 +01005318 list_for_each_entry(i, &pmus, entry) {
5319 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5320 update_pmu_context(i, pmu);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005321 goto out;
Peter Zijlstra51676952010-12-07 14:18:20 +01005322 }
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005323 }
5324
Peter Zijlstra51676952010-12-07 14:18:20 +01005325 free_percpu(pmu->pmu_cpu_context);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005326out:
5327 mutex_unlock(&pmus_lock);
5328}
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005329static struct idr pmu_idr;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005330
Peter Zijlstraabe43402010-11-17 23:17:37 +01005331static ssize_t
5332type_show(struct device *dev, struct device_attribute *attr, char *page)
5333{
5334 struct pmu *pmu = dev_get_drvdata(dev);
5335
5336 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5337}
5338
5339static struct device_attribute pmu_dev_attrs[] = {
5340 __ATTR_RO(type),
5341 __ATTR_NULL,
5342};
5343
5344static int pmu_bus_running;
5345static struct bus_type pmu_bus = {
5346 .name = "event_source",
5347 .dev_attrs = pmu_dev_attrs,
5348};
5349
5350static void pmu_dev_release(struct device *dev)
5351{
5352 kfree(dev);
5353}
5354
5355static int pmu_dev_alloc(struct pmu *pmu)
5356{
5357 int ret = -ENOMEM;
5358
5359 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5360 if (!pmu->dev)
5361 goto out;
5362
5363 device_initialize(pmu->dev);
5364 ret = dev_set_name(pmu->dev, "%s", pmu->name);
5365 if (ret)
5366 goto free_dev;
5367
5368 dev_set_drvdata(pmu->dev, pmu);
5369 pmu->dev->bus = &pmu_bus;
5370 pmu->dev->release = pmu_dev_release;
5371 ret = device_add(pmu->dev);
5372 if (ret)
5373 goto free_dev;
5374
5375out:
5376 return ret;
5377
5378free_dev:
5379 put_device(pmu->dev);
5380 goto out;
5381}
5382
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01005383static struct lock_class_key cpuctx_mutex;
5384
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005385int perf_pmu_register(struct pmu *pmu, char *name, int type)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005386{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005387 int cpu, ret;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005388
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005389 mutex_lock(&pmus_lock);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005390 ret = -ENOMEM;
5391 pmu->pmu_disable_count = alloc_percpu(int);
5392 if (!pmu->pmu_disable_count)
5393 goto unlock;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005394
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005395 pmu->type = -1;
5396 if (!name)
5397 goto skip_type;
5398 pmu->name = name;
5399
5400 if (type < 0) {
5401 int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5402 if (!err)
5403 goto free_pdc;
5404
5405 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5406 if (err) {
5407 ret = err;
5408 goto free_pdc;
5409 }
5410 }
5411 pmu->type = type;
5412
Peter Zijlstraabe43402010-11-17 23:17:37 +01005413 if (pmu_bus_running) {
5414 ret = pmu_dev_alloc(pmu);
5415 if (ret)
5416 goto free_idr;
5417 }
5418
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005419skip_type:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005420 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5421 if (pmu->pmu_cpu_context)
5422 goto got_cpu_context;
5423
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005424 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5425 if (!pmu->pmu_cpu_context)
Peter Zijlstraabe43402010-11-17 23:17:37 +01005426 goto free_dev;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005427
5428 for_each_possible_cpu(cpu) {
5429 struct perf_cpu_context *cpuctx;
5430
5431 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Peter Zijlstraeb184472010-09-07 15:55:13 +02005432 __perf_event_init_context(&cpuctx->ctx);
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01005433 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005434 cpuctx->ctx.type = cpu_context;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005435 cpuctx->ctx.pmu = pmu;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02005436 cpuctx->jiffies_interval = 1;
5437 INIT_LIST_HEAD(&cpuctx->rotation_list);
Peter Zijlstra51676952010-12-07 14:18:20 +01005438 cpuctx->active_pmu = pmu;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005439 }
5440
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005441got_cpu_context:
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005442 if (!pmu->start_txn) {
5443 if (pmu->pmu_enable) {
5444 /*
5445 * If we have pmu_enable/pmu_disable calls, install
5446 * transaction stubs that use that to try and batch
5447 * hardware accesses.
5448 */
5449 pmu->start_txn = perf_pmu_start_txn;
5450 pmu->commit_txn = perf_pmu_commit_txn;
5451 pmu->cancel_txn = perf_pmu_cancel_txn;
5452 } else {
5453 pmu->start_txn = perf_pmu_nop_void;
5454 pmu->commit_txn = perf_pmu_nop_int;
5455 pmu->cancel_txn = perf_pmu_nop_void;
5456 }
5457 }
5458
5459 if (!pmu->pmu_enable) {
5460 pmu->pmu_enable = perf_pmu_nop_void;
5461 pmu->pmu_disable = perf_pmu_nop_void;
5462 }
5463
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005464 list_add_rcu(&pmu->entry, &pmus);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005465 ret = 0;
5466unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005467 mutex_unlock(&pmus_lock);
5468
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005469 return ret;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005470
Peter Zijlstraabe43402010-11-17 23:17:37 +01005471free_dev:
5472 device_del(pmu->dev);
5473 put_device(pmu->dev);
5474
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005475free_idr:
5476 if (pmu->type >= PERF_TYPE_MAX)
5477 idr_remove(&pmu_idr, pmu->type);
5478
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005479free_pdc:
5480 free_percpu(pmu->pmu_disable_count);
5481 goto unlock;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005482}
5483
5484void perf_pmu_unregister(struct pmu *pmu)
5485{
5486 mutex_lock(&pmus_lock);
5487 list_del_rcu(&pmu->entry);
5488 mutex_unlock(&pmus_lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005489
5490 /*
Peter Zijlstracde8e882010-09-13 11:06:55 +02005491 * We dereference the pmu list under both SRCU and regular RCU, so
5492 * synchronize against both of those.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005493 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005494 synchronize_srcu(&pmus_srcu);
Peter Zijlstracde8e882010-09-13 11:06:55 +02005495 synchronize_rcu();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005496
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005497 free_percpu(pmu->pmu_disable_count);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005498 if (pmu->type >= PERF_TYPE_MAX)
5499 idr_remove(&pmu_idr, pmu->type);
Peter Zijlstraabe43402010-11-17 23:17:37 +01005500 device_del(pmu->dev);
5501 put_device(pmu->dev);
Peter Zijlstra51676952010-12-07 14:18:20 +01005502 free_pmu_context(pmu);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005503}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005504
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005505struct pmu *perf_init_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005506{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005507 struct pmu *pmu = NULL;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005508 int idx;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005509
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005510 idx = srcu_read_lock(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005511
5512 rcu_read_lock();
5513 pmu = idr_find(&pmu_idr, event->attr.type);
5514 rcu_read_unlock();
5515 if (pmu)
5516 goto unlock;
5517
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005518 list_for_each_entry_rcu(pmu, &pmus, entry) {
5519 int ret = pmu->event_init(event);
5520 if (!ret)
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005521 goto unlock;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005522
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005523 if (ret != -ENOENT) {
5524 pmu = ERR_PTR(ret);
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005525 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005526 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005527 }
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005528 pmu = ERR_PTR(-ENOENT);
5529unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005530 srcu_read_unlock(&pmus_srcu, idx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005531
5532 return pmu;
5533}
5534
5535/*
5536 * Allocate and initialize a event structure
5537 */
5538static struct perf_event *
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005539perf_event_alloc(struct perf_event_attr *attr, int cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005540 struct task_struct *task,
5541 struct perf_event *group_leader,
5542 struct perf_event *parent_event,
5543 perf_overflow_handler_t overflow_handler)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005544{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005545 struct pmu *pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005546 struct perf_event *event;
5547 struct hw_perf_event *hwc;
5548 long err;
5549
Oleg Nesterov66832eb2011-01-18 17:10:32 +01005550 if ((unsigned)cpu >= nr_cpu_ids) {
5551 if (!task || cpu != -1)
5552 return ERR_PTR(-EINVAL);
5553 }
5554
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005555 event = kzalloc(sizeof(*event), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005556 if (!event)
5557 return ERR_PTR(-ENOMEM);
5558
5559 /*
5560 * Single events are their own group leaders, with an
5561 * empty sibling list:
5562 */
5563 if (!group_leader)
5564 group_leader = event;
5565
5566 mutex_init(&event->child_mutex);
5567 INIT_LIST_HEAD(&event->child_list);
5568
5569 INIT_LIST_HEAD(&event->group_entry);
5570 INIT_LIST_HEAD(&event->event_entry);
5571 INIT_LIST_HEAD(&event->sibling_list);
5572 init_waitqueue_head(&event->waitq);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08005573 init_irq_work(&event->pending, perf_pending_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005574
5575 mutex_init(&event->mmap_mutex);
5576
5577 event->cpu = cpu;
5578 event->attr = *attr;
5579 event->group_leader = group_leader;
5580 event->pmu = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005581 event->oncpu = -1;
5582
5583 event->parent = parent_event;
5584
5585 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5586 event->id = atomic64_inc_return(&perf_event_id);
5587
5588 event->state = PERF_EVENT_STATE_INACTIVE;
5589
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005590 if (task) {
5591 event->attach_state = PERF_ATTACH_TASK;
5592#ifdef CONFIG_HAVE_HW_BREAKPOINT
5593 /*
5594 * hw_breakpoint is a bit difficult here..
5595 */
5596 if (attr->type == PERF_TYPE_BREAKPOINT)
5597 event->hw.bp_target = task;
5598#endif
5599 }
5600
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005601 if (!overflow_handler && parent_event)
5602 overflow_handler = parent_event->overflow_handler;
Oleg Nesterov66832eb2011-01-18 17:10:32 +01005603
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005604 event->overflow_handler = overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005605
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005606 if (attr->disabled)
5607 event->state = PERF_EVENT_STATE_OFF;
5608
5609 pmu = NULL;
5610
5611 hwc = &event->hw;
5612 hwc->sample_period = attr->sample_period;
5613 if (attr->freq && attr->sample_freq)
5614 hwc->sample_period = 1;
5615 hwc->last_period = hwc->sample_period;
5616
Peter Zijlstrae7850592010-05-21 14:43:08 +02005617 local64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005618
5619 /*
5620 * we currently do not support PERF_FORMAT_GROUP on inherited events
5621 */
5622 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5623 goto done;
5624
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005625 pmu = perf_init_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005626
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005627done:
5628 err = 0;
5629 if (!pmu)
5630 err = -EINVAL;
5631 else if (IS_ERR(pmu))
5632 err = PTR_ERR(pmu);
5633
5634 if (err) {
5635 if (event->ns)
5636 put_pid_ns(event->ns);
5637 kfree(event);
5638 return ERR_PTR(err);
5639 }
5640
5641 event->pmu = pmu;
5642
5643 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02005644 if (event->attach_state & PERF_ATTACH_TASK)
5645 jump_label_inc(&perf_task_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01005646 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005647 atomic_inc(&nr_mmap_events);
5648 if (event->attr.comm)
5649 atomic_inc(&nr_comm_events);
5650 if (event->attr.task)
5651 atomic_inc(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02005652 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5653 err = get_callchain_buffers();
5654 if (err) {
5655 free_event(event);
5656 return ERR_PTR(err);
5657 }
5658 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005659 }
5660
5661 return event;
5662}
5663
5664static int perf_copy_attr(struct perf_event_attr __user *uattr,
5665 struct perf_event_attr *attr)
5666{
5667 u32 size;
5668 int ret;
5669
5670 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5671 return -EFAULT;
5672
5673 /*
5674 * zero the full structure, so that a short copy will be nice.
5675 */
5676 memset(attr, 0, sizeof(*attr));
5677
5678 ret = get_user(size, &uattr->size);
5679 if (ret)
5680 return ret;
5681
5682 if (size > PAGE_SIZE) /* silly large */
5683 goto err_size;
5684
5685 if (!size) /* abi compat */
5686 size = PERF_ATTR_SIZE_VER0;
5687
5688 if (size < PERF_ATTR_SIZE_VER0)
5689 goto err_size;
5690
5691 /*
5692 * If we're handed a bigger struct than we know of,
5693 * ensure all the unknown bits are 0 - i.e. new
5694 * user-space does not rely on any kernel feature
5695 * extensions we dont know about yet.
5696 */
5697 if (size > sizeof(*attr)) {
5698 unsigned char __user *addr;
5699 unsigned char __user *end;
5700 unsigned char val;
5701
5702 addr = (void __user *)uattr + sizeof(*attr);
5703 end = (void __user *)uattr + size;
5704
5705 for (; addr < end; addr++) {
5706 ret = get_user(val, addr);
5707 if (ret)
5708 return ret;
5709 if (val)
5710 goto err_size;
5711 }
5712 size = sizeof(*attr);
5713 }
5714
5715 ret = copy_from_user(attr, uattr, size);
5716 if (ret)
5717 return -EFAULT;
5718
5719 /*
5720 * If the type exists, the corresponding creation will verify
5721 * the attr->config.
5722 */
5723 if (attr->type >= PERF_TYPE_MAX)
5724 return -EINVAL;
5725
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05305726 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005727 return -EINVAL;
5728
5729 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5730 return -EINVAL;
5731
5732 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5733 return -EINVAL;
5734
5735out:
5736 return ret;
5737
5738err_size:
5739 put_user(sizeof(*attr), &uattr->size);
5740 ret = -E2BIG;
5741 goto out;
5742}
5743
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005744static int
5745perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005746{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005747 struct perf_buffer *buffer = NULL, *old_buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005748 int ret = -EINVAL;
5749
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005750 if (!output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005751 goto set;
5752
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005753 /* don't allow circular references */
5754 if (event == output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005755 goto out;
5756
Peter Zijlstra0f139302010-05-20 14:35:15 +02005757 /*
5758 * Don't allow cross-cpu buffers
5759 */
5760 if (output_event->cpu != event->cpu)
5761 goto out;
5762
5763 /*
5764 * If its not a per-cpu buffer, it must be the same task.
5765 */
5766 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5767 goto out;
5768
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005769set:
5770 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005771 /* Can't redirect output if we've got an active mmap() */
5772 if (atomic_read(&event->mmap_count))
5773 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005774
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005775 if (output_event) {
5776 /* get the buffer we want to redirect to */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005777 buffer = perf_buffer_get(output_event);
5778 if (!buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005779 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005780 }
5781
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005782 old_buffer = event->buffer;
5783 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005784 ret = 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005785unlock:
5786 mutex_unlock(&event->mmap_mutex);
5787
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005788 if (old_buffer)
5789 perf_buffer_put(old_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005790out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005791 return ret;
5792}
5793
5794/**
5795 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5796 *
5797 * @attr_uptr: event_id type attributes for monitoring/sampling
5798 * @pid: target pid
5799 * @cpu: target cpu
5800 * @group_fd: group leader event fd
5801 */
5802SYSCALL_DEFINE5(perf_event_open,
5803 struct perf_event_attr __user *, attr_uptr,
5804 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5805{
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005806 struct perf_event *group_leader = NULL, *output_event = NULL;
5807 struct perf_event *event, *sibling;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005808 struct perf_event_attr attr;
5809 struct perf_event_context *ctx;
5810 struct file *event_file = NULL;
5811 struct file *group_file = NULL;
Matt Helsley38a81da2010-09-13 13:01:20 -07005812 struct task_struct *task = NULL;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005813 struct pmu *pmu;
Al Viroea635c62010-05-26 17:40:29 -04005814 int event_fd;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005815 int move_group = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005816 int fput_needed = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005817 int err;
5818
5819 /* for future expandability... */
5820 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5821 return -EINVAL;
5822
5823 err = perf_copy_attr(attr_uptr, &attr);
5824 if (err)
5825 return err;
5826
5827 if (!attr.exclude_kernel) {
5828 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5829 return -EACCES;
5830 }
5831
5832 if (attr.freq) {
5833 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5834 return -EINVAL;
5835 }
5836
Al Viroea635c62010-05-26 17:40:29 -04005837 event_fd = get_unused_fd_flags(O_RDWR);
5838 if (event_fd < 0)
5839 return event_fd;
5840
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005841 if (group_fd != -1) {
5842 group_leader = perf_fget_light(group_fd, &fput_needed);
5843 if (IS_ERR(group_leader)) {
5844 err = PTR_ERR(group_leader);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005845 goto err_fd;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005846 }
5847 group_file = group_leader->filp;
5848 if (flags & PERF_FLAG_FD_OUTPUT)
5849 output_event = group_leader;
5850 if (flags & PERF_FLAG_FD_NO_GROUP)
5851 group_leader = NULL;
5852 }
5853
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005854 if (pid != -1) {
5855 task = find_lively_task_by_vpid(pid);
5856 if (IS_ERR(task)) {
5857 err = PTR_ERR(task);
5858 goto err_group_fd;
5859 }
5860 }
5861
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005862 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005863 if (IS_ERR(event)) {
5864 err = PTR_ERR(event);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005865 goto err_task;
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005866 }
5867
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005868 /*
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005869 * Special case software events and allow them to be part of
5870 * any hardware group.
5871 */
5872 pmu = event->pmu;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005873
5874 if (group_leader &&
5875 (is_software_event(event) != is_software_event(group_leader))) {
5876 if (is_software_event(event)) {
5877 /*
5878 * If event and group_leader are not both a software
5879 * event, and event is, then group leader is not.
5880 *
5881 * Allow the addition of software events to !software
5882 * groups, this is safe because software events never
5883 * fail to schedule.
5884 */
5885 pmu = group_leader->pmu;
5886 } else if (is_software_event(group_leader) &&
5887 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5888 /*
5889 * In case the group is a pure software group, and we
5890 * try to add a hardware event, move the whole group to
5891 * the hardware context.
5892 */
5893 move_group = 1;
5894 }
5895 }
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005896
5897 /*
5898 * Get the target context (task or percpu):
5899 */
Matt Helsley38a81da2010-09-13 13:01:20 -07005900 ctx = find_get_context(pmu, task, cpu);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005901 if (IS_ERR(ctx)) {
5902 err = PTR_ERR(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005903 goto err_alloc;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005904 }
5905
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005906 /*
5907 * Look up the group leader (we will attach this event to it):
5908 */
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005909 if (group_leader) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005910 err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005911
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005912 /*
5913 * Do not allow a recursive hierarchy (this new sibling
5914 * becoming part of another group-sibling):
5915 */
5916 if (group_leader->group_leader != group_leader)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005917 goto err_context;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005918 /*
5919 * Do not allow to attach to a group in a different
5920 * task or CPU context:
5921 */
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005922 if (move_group) {
5923 if (group_leader->ctx->type != ctx->type)
5924 goto err_context;
5925 } else {
5926 if (group_leader->ctx != ctx)
5927 goto err_context;
5928 }
5929
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005930 /*
5931 * Only a group leader can be exclusive or pinned
5932 */
5933 if (attr.exclusive || attr.pinned)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005934 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005935 }
5936
5937 if (output_event) {
5938 err = perf_event_set_output(event, output_event);
5939 if (err)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005940 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005941 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005942
Al Viroea635c62010-05-26 17:40:29 -04005943 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5944 if (IS_ERR(event_file)) {
5945 err = PTR_ERR(event_file);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005946 goto err_context;
Al Viroea635c62010-05-26 17:40:29 -04005947 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005948
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005949 if (move_group) {
5950 struct perf_event_context *gctx = group_leader->ctx;
5951
5952 mutex_lock(&gctx->mutex);
5953 perf_event_remove_from_context(group_leader);
5954 list_for_each_entry(sibling, &group_leader->sibling_list,
5955 group_entry) {
5956 perf_event_remove_from_context(sibling);
5957 put_ctx(gctx);
5958 }
5959 mutex_unlock(&gctx->mutex);
5960 put_ctx(gctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005961 }
5962
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005963 event->filp = event_file;
5964 WARN_ON_ONCE(ctx->parent_ctx);
5965 mutex_lock(&ctx->mutex);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005966
5967 if (move_group) {
5968 perf_install_in_context(ctx, group_leader, cpu);
5969 get_ctx(ctx);
5970 list_for_each_entry(sibling, &group_leader->sibling_list,
5971 group_entry) {
5972 perf_install_in_context(ctx, sibling, cpu);
5973 get_ctx(ctx);
5974 }
5975 }
5976
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005977 perf_install_in_context(ctx, event, cpu);
5978 ++ctx->generation;
5979 mutex_unlock(&ctx->mutex);
5980
5981 event->owner = current;
Peter Zijlstra88821352010-11-09 19:01:43 +01005982
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005983 mutex_lock(&current->perf_event_mutex);
5984 list_add_tail(&event->owner_entry, &current->perf_event_list);
5985 mutex_unlock(&current->perf_event_mutex);
5986
Peter Zijlstra8a495422010-05-27 15:47:49 +02005987 /*
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02005988 * Precalculate sample_data sizes
5989 */
5990 perf_event__header_size(event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02005991 perf_event__id_header_size(event);
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02005992
5993 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +02005994 * Drop the reference on the group_event after placing the
5995 * new event on the sibling_list. This ensures destruction
5996 * of the group leader will find the pointer to itself in
5997 * perf_group_detach().
5998 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005999 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04006000 fd_install(event_fd, event_file);
6001 return event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006002
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006003err_context:
Al Viroea635c62010-05-26 17:40:29 -04006004 put_ctx(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02006005err_alloc:
6006 free_event(event);
Peter Zijlstrae7d0bc02010-10-14 16:54:51 +02006007err_task:
6008 if (task)
6009 put_task_struct(task);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02006010err_group_fd:
6011 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04006012err_fd:
6013 put_unused_fd(event_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006014 return err;
6015}
6016
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006017/**
6018 * perf_event_create_kernel_counter
6019 *
6020 * @attr: attributes of the counter to create
6021 * @cpu: cpu in which the counter is bound
Matt Helsley38a81da2010-09-13 13:01:20 -07006022 * @task: task to profile (NULL for percpu)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006023 */
6024struct perf_event *
6025perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Matt Helsley38a81da2010-09-13 13:01:20 -07006026 struct task_struct *task,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01006027 perf_overflow_handler_t overflow_handler)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006028{
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006029 struct perf_event_context *ctx;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006030 struct perf_event *event;
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006031 int err;
6032
6033 /*
6034 * Get the target context (task or percpu):
6035 */
6036
Peter Zijlstrad580ff82010-10-14 17:43:23 +02006037 event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006038 if (IS_ERR(event)) {
6039 err = PTR_ERR(event);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006040 goto err;
6041 }
6042
Matt Helsley38a81da2010-09-13 13:01:20 -07006043 ctx = find_get_context(event->pmu, task, cpu);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006044 if (IS_ERR(ctx)) {
6045 err = PTR_ERR(ctx);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006046 goto err_free;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006047 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006048
6049 event->filp = NULL;
6050 WARN_ON_ONCE(ctx->parent_ctx);
6051 mutex_lock(&ctx->mutex);
6052 perf_install_in_context(ctx, event, cpu);
6053 ++ctx->generation;
6054 mutex_unlock(&ctx->mutex);
6055
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006056 return event;
6057
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006058err_free:
6059 free_event(event);
6060err:
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006061 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006062}
6063EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6064
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006065static void sync_child_event(struct perf_event *child_event,
6066 struct task_struct *child)
6067{
6068 struct perf_event *parent_event = child_event->parent;
6069 u64 child_val;
6070
6071 if (child_event->attr.inherit_stat)
6072 perf_event_read_event(child_event, child);
6073
Peter Zijlstrab5e58792010-05-21 14:43:12 +02006074 child_val = perf_event_count(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006075
6076 /*
6077 * Add back the child's count to the parent's count:
6078 */
Peter Zijlstraa6e6dea2010-05-21 14:27:58 +02006079 atomic64_add(child_val, &parent_event->child_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006080 atomic64_add(child_event->total_time_enabled,
6081 &parent_event->child_total_time_enabled);
6082 atomic64_add(child_event->total_time_running,
6083 &parent_event->child_total_time_running);
6084
6085 /*
6086 * Remove this event from the parent's list
6087 */
6088 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6089 mutex_lock(&parent_event->child_mutex);
6090 list_del_init(&child_event->child_list);
6091 mutex_unlock(&parent_event->child_mutex);
6092
6093 /*
6094 * Release the parent event, if this was the last
6095 * reference to it.
6096 */
6097 fput(parent_event->filp);
6098}
6099
6100static void
6101__perf_event_exit_task(struct perf_event *child_event,
6102 struct perf_event_context *child_ctx,
6103 struct task_struct *child)
6104{
6105 struct perf_event *parent_event;
6106
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006107 perf_event_remove_from_context(child_event);
6108
6109 parent_event = child_event->parent;
6110 /*
6111 * It can happen that parent exits first, and has events
6112 * that are still around due to the child reference. These
6113 * events need to be zapped - but otherwise linger.
6114 */
6115 if (parent_event) {
6116 sync_child_event(child_event, child);
6117 free_event(child_event);
6118 }
6119}
6120
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006121static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006122{
6123 struct perf_event *child_event, *tmp;
6124 struct perf_event_context *child_ctx;
6125 unsigned long flags;
6126
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006127 if (likely(!child->perf_event_ctxp[ctxn])) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006128 perf_event_task(child, NULL, 0);
6129 return;
6130 }
6131
6132 local_irq_save(flags);
6133 /*
6134 * We can't reschedule here because interrupts are disabled,
6135 * and either child is current or it is a task that can't be
6136 * scheduled, so we are now safe from rescheduling changing
6137 * our context.
6138 */
Oleg Nesterov806839b2011-01-21 18:45:47 +01006139 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02006140 task_ctx_sched_out(child_ctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006141
6142 /*
6143 * Take the context lock here so that if find_get_context is
6144 * reading child->perf_event_ctxp, we wait until it has
6145 * incremented the context's refcount before we do put_ctx below.
6146 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01006147 raw_spin_lock(&child_ctx->lock);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006148 child->perf_event_ctxp[ctxn] = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006149 /*
6150 * If this context is a clone; unclone it so it can't get
6151 * swapped to another process while we're removing all
6152 * the events from it.
6153 */
6154 unclone_ctx(child_ctx);
Peter Zijlstra5e942bb2009-11-23 11:37:26 +01006155 update_context_time(child_ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01006156 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006157
6158 /*
6159 * Report the task dead after unscheduling the events so that we
6160 * won't get any samples after PERF_RECORD_EXIT. We can however still
6161 * get a few PERF_RECORD_READ events.
6162 */
6163 perf_event_task(child, child_ctx, 0);
6164
6165 /*
6166 * We can recurse on the same lock type through:
6167 *
6168 * __perf_event_exit_task()
6169 * sync_child_event()
6170 * fput(parent_event->filp)
6171 * perf_release()
6172 * mutex_lock(&ctx->mutex)
6173 *
6174 * But since its the parent context it won't be the same instance.
6175 */
Peter Zijlstraa0507c82010-05-06 15:42:53 +02006176 mutex_lock(&child_ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006177
6178again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006179 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6180 group_entry)
6181 __perf_event_exit_task(child_event, child_ctx, child);
6182
6183 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006184 group_entry)
6185 __perf_event_exit_task(child_event, child_ctx, child);
6186
6187 /*
6188 * If the last event was a group event, it will have appended all
6189 * its siblings to the list, but we obtained 'tmp' before that which
6190 * will still point to the list head terminating the iteration.
6191 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006192 if (!list_empty(&child_ctx->pinned_groups) ||
6193 !list_empty(&child_ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006194 goto again;
6195
6196 mutex_unlock(&child_ctx->mutex);
6197
6198 put_ctx(child_ctx);
6199}
6200
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006201/*
6202 * When a child task exits, feed back event values to parent events.
6203 */
6204void perf_event_exit_task(struct task_struct *child)
6205{
Peter Zijlstra88821352010-11-09 19:01:43 +01006206 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006207 int ctxn;
6208
Peter Zijlstra88821352010-11-09 19:01:43 +01006209 mutex_lock(&child->perf_event_mutex);
6210 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6211 owner_entry) {
6212 list_del_init(&event->owner_entry);
6213
6214 /*
6215 * Ensure the list deletion is visible before we clear
6216 * the owner, closes a race against perf_release() where
6217 * we need to serialize on the owner->perf_event_mutex.
6218 */
6219 smp_wmb();
6220 event->owner = NULL;
6221 }
6222 mutex_unlock(&child->perf_event_mutex);
6223
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006224 for_each_task_context_nr(ctxn)
6225 perf_event_exit_task_context(child, ctxn);
6226}
6227
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006228static void perf_free_event(struct perf_event *event,
6229 struct perf_event_context *ctx)
6230{
6231 struct perf_event *parent = event->parent;
6232
6233 if (WARN_ON_ONCE(!parent))
6234 return;
6235
6236 mutex_lock(&parent->child_mutex);
6237 list_del_init(&event->child_list);
6238 mutex_unlock(&parent->child_mutex);
6239
6240 fput(parent->filp);
6241
Peter Zijlstra8a495422010-05-27 15:47:49 +02006242 perf_group_detach(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006243 list_del_event(event, ctx);
6244 free_event(event);
6245}
6246
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006247/*
6248 * free an unexposed, unused context as created by inheritance by
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006249 * perf_event_init_task below, used by fork() in case of fail.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006250 */
6251void perf_event_free_task(struct task_struct *task)
6252{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006253 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006254 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006255 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006256
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006257 for_each_task_context_nr(ctxn) {
6258 ctx = task->perf_event_ctxp[ctxn];
6259 if (!ctx)
6260 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006261
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006262 mutex_lock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006263again:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006264 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6265 group_entry)
6266 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006267
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006268 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6269 group_entry)
6270 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006271
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006272 if (!list_empty(&ctx->pinned_groups) ||
6273 !list_empty(&ctx->flexible_groups))
6274 goto again;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006275
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006276 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006277
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006278 put_ctx(ctx);
6279 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006280}
6281
Peter Zijlstra4e231c72010-09-09 21:01:59 +02006282void perf_event_delayed_put(struct task_struct *task)
6283{
6284 int ctxn;
6285
6286 for_each_task_context_nr(ctxn)
6287 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6288}
6289
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006290/*
6291 * inherit a event from parent task to child task:
6292 */
6293static struct perf_event *
6294inherit_event(struct perf_event *parent_event,
6295 struct task_struct *parent,
6296 struct perf_event_context *parent_ctx,
6297 struct task_struct *child,
6298 struct perf_event *group_leader,
6299 struct perf_event_context *child_ctx)
6300{
6301 struct perf_event *child_event;
Peter Zijlstracee010e2010-09-10 12:51:54 +02006302 unsigned long flags;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006303
6304 /*
6305 * Instead of creating recursive hierarchies of events,
6306 * we link inherited events back to the original parent,
6307 * which has a filp for sure, which we use as the reference
6308 * count:
6309 */
6310 if (parent_event->parent)
6311 parent_event = parent_event->parent;
6312
6313 child_event = perf_event_alloc(&parent_event->attr,
6314 parent_event->cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02006315 child,
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006316 group_leader, parent_event,
6317 NULL);
6318 if (IS_ERR(child_event))
6319 return child_event;
6320 get_ctx(child_ctx);
6321
6322 /*
6323 * Make the child state follow the state of the parent event,
6324 * not its attr.disabled bit. We hold the parent's mutex,
6325 * so we won't race with perf_event_{en, dis}able_family.
6326 */
6327 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6328 child_event->state = PERF_EVENT_STATE_INACTIVE;
6329 else
6330 child_event->state = PERF_EVENT_STATE_OFF;
6331
6332 if (parent_event->attr.freq) {
6333 u64 sample_period = parent_event->hw.sample_period;
6334 struct hw_perf_event *hwc = &child_event->hw;
6335
6336 hwc->sample_period = sample_period;
6337 hwc->last_period = sample_period;
6338
6339 local64_set(&hwc->period_left, sample_period);
6340 }
6341
6342 child_event->ctx = child_ctx;
6343 child_event->overflow_handler = parent_event->overflow_handler;
6344
6345 /*
Thomas Gleixner614b6782010-12-03 16:24:32 -02006346 * Precalculate sample_data sizes
6347 */
6348 perf_event__header_size(child_event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02006349 perf_event__id_header_size(child_event);
Thomas Gleixner614b6782010-12-03 16:24:32 -02006350
6351 /*
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006352 * Link it up in the child's context:
6353 */
Peter Zijlstracee010e2010-09-10 12:51:54 +02006354 raw_spin_lock_irqsave(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006355 add_event_to_ctx(child_event, child_ctx);
Peter Zijlstracee010e2010-09-10 12:51:54 +02006356 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006357
6358 /*
6359 * Get a reference to the parent filp - we will fput it
6360 * when the child event exits. This is safe to do because
6361 * we are in the parent and we know that the filp still
6362 * exists and has a nonzero count:
6363 */
6364 atomic_long_inc(&parent_event->filp->f_count);
6365
6366 /*
6367 * Link this into the parent event's child list
6368 */
6369 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6370 mutex_lock(&parent_event->child_mutex);
6371 list_add_tail(&child_event->child_list, &parent_event->child_list);
6372 mutex_unlock(&parent_event->child_mutex);
6373
6374 return child_event;
6375}
6376
6377static int inherit_group(struct perf_event *parent_event,
6378 struct task_struct *parent,
6379 struct perf_event_context *parent_ctx,
6380 struct task_struct *child,
6381 struct perf_event_context *child_ctx)
6382{
6383 struct perf_event *leader;
6384 struct perf_event *sub;
6385 struct perf_event *child_ctr;
6386
6387 leader = inherit_event(parent_event, parent, parent_ctx,
6388 child, NULL, child_ctx);
6389 if (IS_ERR(leader))
6390 return PTR_ERR(leader);
6391 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6392 child_ctr = inherit_event(sub, parent, parent_ctx,
6393 child, leader, child_ctx);
6394 if (IS_ERR(child_ctr))
6395 return PTR_ERR(child_ctr);
6396 }
6397 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006398}
6399
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006400static int
6401inherit_task_group(struct perf_event *event, struct task_struct *parent,
6402 struct perf_event_context *parent_ctx,
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006403 struct task_struct *child, int ctxn,
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006404 int *inherited_all)
6405{
6406 int ret;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006407 struct perf_event_context *child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006408
6409 if (!event->attr.inherit) {
6410 *inherited_all = 0;
6411 return 0;
6412 }
6413
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006414 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006415 if (!child_ctx) {
6416 /*
6417 * This is executed from the parent task context, so
6418 * inherit events that have been marked for cloning.
6419 * First allocate and initialize a context for the
6420 * child.
6421 */
6422
Peter Zijlstraeb184472010-09-07 15:55:13 +02006423 child_ctx = alloc_perf_context(event->pmu, child);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006424 if (!child_ctx)
6425 return -ENOMEM;
6426
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006427 child->perf_event_ctxp[ctxn] = child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006428 }
6429
6430 ret = inherit_group(event, parent, parent_ctx,
6431 child, child_ctx);
6432
6433 if (ret)
6434 *inherited_all = 0;
6435
6436 return ret;
6437}
6438
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006439/*
6440 * Initialize the perf_event context in task_struct
6441 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006442int perf_event_init_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006443{
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006444 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006445 struct perf_event_context *cloned_ctx;
6446 struct perf_event *event;
6447 struct task_struct *parent = current;
6448 int inherited_all = 1;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006449 unsigned long flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006450 int ret = 0;
6451
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006452 if (likely(!parent->perf_event_ctxp[ctxn]))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006453 return 0;
6454
6455 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006456 * If the parent's context is a clone, pin it so it won't get
6457 * swapped under us.
6458 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006459 parent_ctx = perf_pin_task_context(parent, ctxn);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006460
6461 /*
6462 * No need to check if parent_ctx != NULL here; since we saw
6463 * it non-NULL earlier, the only reason for it to become NULL
6464 * is if we exit, and since we're currently in the middle of
6465 * a fork we can't be exiting at the same time.
6466 */
6467
6468 /*
6469 * Lock the parent list. No need to lock the child - not PID
6470 * hashed yet and not running, so nobody can access it.
6471 */
6472 mutex_lock(&parent_ctx->mutex);
6473
6474 /*
6475 * We dont have to disable NMIs - we are only looking at
6476 * the list, not manipulating it:
6477 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006478 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006479 ret = inherit_task_group(event, parent, parent_ctx,
6480 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006481 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006482 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006483 }
6484
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006485 /*
6486 * We can't hold ctx->lock when iterating the ->flexible_group list due
6487 * to allocations, but we need to prevent rotation because
6488 * rotate_ctx() will change the list from interrupt context.
6489 */
6490 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6491 parent_ctx->rotate_disable = 1;
6492 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6493
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006494 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006495 ret = inherit_task_group(event, parent, parent_ctx,
6496 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006497 if (ret)
6498 break;
6499 }
6500
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006501 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6502 parent_ctx->rotate_disable = 0;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006503
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006504 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006505
Peter Zijlstra05cbaa22009-12-30 16:00:35 +01006506 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006507 /*
6508 * Mark the child context as a clone of the parent
6509 * context, or of whatever the parent is a clone of.
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006510 *
6511 * Note that if the parent is a clone, the holding of
6512 * parent_ctx->lock avoids it from being uncloned.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006513 */
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006514 cloned_ctx = parent_ctx->parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006515 if (cloned_ctx) {
6516 child_ctx->parent_ctx = cloned_ctx;
6517 child_ctx->parent_gen = parent_ctx->parent_gen;
6518 } else {
6519 child_ctx->parent_ctx = parent_ctx;
6520 child_ctx->parent_gen = parent_ctx->generation;
6521 }
6522 get_ctx(child_ctx->parent_ctx);
6523 }
6524
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006525 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006526 mutex_unlock(&parent_ctx->mutex);
6527
6528 perf_unpin_context(parent_ctx);
6529
6530 return ret;
6531}
6532
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006533/*
6534 * Initialize the perf_event context in task_struct
6535 */
6536int perf_event_init_task(struct task_struct *child)
6537{
6538 int ctxn, ret;
6539
Oleg Nesterov8550d7c2011-01-19 19:22:28 +01006540 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
6541 mutex_init(&child->perf_event_mutex);
6542 INIT_LIST_HEAD(&child->perf_event_list);
6543
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006544 for_each_task_context_nr(ctxn) {
6545 ret = perf_event_init_context(child, ctxn);
6546 if (ret)
6547 return ret;
6548 }
6549
6550 return 0;
6551}
6552
Paul Mackerras220b1402010-03-10 20:45:52 +11006553static void __init perf_event_init_all_cpus(void)
6554{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006555 struct swevent_htable *swhash;
Paul Mackerras220b1402010-03-10 20:45:52 +11006556 int cpu;
Paul Mackerras220b1402010-03-10 20:45:52 +11006557
6558 for_each_possible_cpu(cpu) {
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006559 swhash = &per_cpu(swevent_htable, cpu);
6560 mutex_init(&swhash->hlist_mutex);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006561 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
Paul Mackerras220b1402010-03-10 20:45:52 +11006562 }
6563}
6564
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006565static void __cpuinit perf_event_init_cpu(int cpu)
6566{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006567 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006568
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006569 mutex_lock(&swhash->hlist_mutex);
6570 if (swhash->hlist_refcount > 0) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006571 struct swevent_hlist *hlist;
6572
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006573 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6574 WARN_ON(!hlist);
6575 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006576 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006577 mutex_unlock(&swhash->hlist_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006578}
6579
Peter Zijlstrac2774432010-12-08 15:29:02 +01006580#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006581static void perf_pmu_rotate_stop(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006582{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006583 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6584
6585 WARN_ON(!irqs_disabled());
6586
6587 list_del_init(&cpuctx->rotation_list);
6588}
6589
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006590static void __perf_event_exit_context(void *__info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006591{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006592 struct perf_event_context *ctx = __info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006593 struct perf_event *event, *tmp;
6594
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006595 perf_pmu_rotate_stop(ctx->pmu);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02006596
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006597 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6598 __perf_event_remove_from_context(event);
6599 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006600 __perf_event_remove_from_context(event);
6601}
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006602
6603static void perf_event_exit_cpu_context(int cpu)
6604{
6605 struct perf_event_context *ctx;
6606 struct pmu *pmu;
6607 int idx;
6608
6609 idx = srcu_read_lock(&pmus_srcu);
6610 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra917bdd12010-09-17 11:28:49 +02006611 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006612
6613 mutex_lock(&ctx->mutex);
6614 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6615 mutex_unlock(&ctx->mutex);
6616 }
6617 srcu_read_unlock(&pmus_srcu, idx);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006618}
6619
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006620static void perf_event_exit_cpu(int cpu)
6621{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006622 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006623
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006624 mutex_lock(&swhash->hlist_mutex);
6625 swevent_hlist_release(swhash);
6626 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006627
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006628 perf_event_exit_cpu_context(cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006629}
6630#else
6631static inline void perf_event_exit_cpu(int cpu) { }
6632#endif
6633
Peter Zijlstrac2774432010-12-08 15:29:02 +01006634static int
6635perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
6636{
6637 int cpu;
6638
6639 for_each_online_cpu(cpu)
6640 perf_event_exit_cpu(cpu);
6641
6642 return NOTIFY_OK;
6643}
6644
6645/*
6646 * Run the perf reboot notifier at the very last possible moment so that
6647 * the generic watchdog code runs as long as possible.
6648 */
6649static struct notifier_block perf_reboot_notifier = {
6650 .notifier_call = perf_reboot,
6651 .priority = INT_MIN,
6652};
6653
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006654static int __cpuinit
6655perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6656{
6657 unsigned int cpu = (long)hcpu;
6658
Peter Zijlstra5e116372010-06-11 13:35:08 +02006659 switch (action & ~CPU_TASKS_FROZEN) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006660
6661 case CPU_UP_PREPARE:
Peter Zijlstra5e116372010-06-11 13:35:08 +02006662 case CPU_DOWN_FAILED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006663 perf_event_init_cpu(cpu);
6664 break;
6665
Peter Zijlstra5e116372010-06-11 13:35:08 +02006666 case CPU_UP_CANCELED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006667 case CPU_DOWN_PREPARE:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006668 perf_event_exit_cpu(cpu);
6669 break;
6670
6671 default:
6672 break;
6673 }
6674
6675 return NOTIFY_OK;
6676}
6677
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006678void __init perf_event_init(void)
6679{
Jason Wessel3c502e72010-11-04 17:33:01 -05006680 int ret;
6681
Peter Zijlstra2e80a822010-11-17 23:17:36 +01006682 idr_init(&pmu_idr);
6683
Paul Mackerras220b1402010-03-10 20:45:52 +11006684 perf_event_init_all_cpus();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02006685 init_srcu_struct(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01006686 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
6687 perf_pmu_register(&perf_cpu_clock, NULL, -1);
6688 perf_pmu_register(&perf_task_clock, NULL, -1);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02006689 perf_tp_register();
6690 perf_cpu_notifier(perf_cpu_notify);
Peter Zijlstrac2774432010-12-08 15:29:02 +01006691 register_reboot_notifier(&perf_reboot_notifier);
Jason Wessel3c502e72010-11-04 17:33:01 -05006692
6693 ret = init_hw_breakpoint();
6694 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006695}
Peter Zijlstraabe43402010-11-17 23:17:37 +01006696
6697static int __init perf_event_sysfs_init(void)
6698{
6699 struct pmu *pmu;
6700 int ret;
6701
6702 mutex_lock(&pmus_lock);
6703
6704 ret = bus_register(&pmu_bus);
6705 if (ret)
6706 goto unlock;
6707
6708 list_for_each_entry(pmu, &pmus, entry) {
6709 if (!pmu->name || pmu->type < 0)
6710 continue;
6711
6712 ret = pmu_dev_alloc(pmu);
6713 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
6714 }
6715 pmu_bus_running = 1;
6716 ret = 0;
6717
6718unlock:
6719 mutex_unlock(&pmus_lock);
6720
6721 return ret;
6722}
6723device_initcall(perf_event_sysfs_init);