blob: 852ae8c66502129d10a6ffeb4b53e40a0e5c3568 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
Peter Zijlstra2e80a822010-11-17 23:17:36 +010016#include <linux/idr.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020017#include <linux/file.h>
18#include <linux/poll.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020020#include <linux/hash.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020021#include <linux/sysfs.h>
22#include <linux/dcache.h>
23#include <linux/percpu.h>
24#include <linux/ptrace.h>
Peter Zijlstrac2774432010-12-08 15:29:02 +010025#include <linux/reboot.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020026#include <linux/vmstat.h>
Peter Zijlstraabe43402010-11-17 23:17:37 +010027#include <linux/device.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020028#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020029#include <linux/hardirq.h>
30#include <linux/rculist.h>
31#include <linux/uaccess.h>
32#include <linux/syscalls.h>
33#include <linux/anon_inodes.h>
34#include <linux/kernel_stat.h>
35#include <linux/perf_event.h>
Li Zefan6fb29152009-10-15 11:21:42 +080036#include <linux/ftrace_event.h>
Jason Wessel3c502e72010-11-04 17:33:01 -050037#include <linux/hw_breakpoint.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020038
39#include <asm/irq_regs.h>
40
Stephane Eranian0b3fcf12011-01-03 18:20:01 +020041enum event_type_t {
42 EVENT_FLEXIBLE = 0x1,
43 EVENT_PINNED = 0x2,
44 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
45};
46
Peter Zijlstra82cd6de2010-10-14 17:57:23 +020047atomic_t perf_task_events __read_mostly;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020048static atomic_t nr_mmap_events __read_mostly;
49static atomic_t nr_comm_events __read_mostly;
50static atomic_t nr_task_events __read_mostly;
51
Peter Zijlstra108b02c2010-09-06 14:32:03 +020052static LIST_HEAD(pmus);
53static DEFINE_MUTEX(pmus_lock);
54static struct srcu_struct pmus_srcu;
55
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056/*
57 * perf event paranoia level:
58 * -1 - not paranoid at all
59 * 0 - disallow raw tracepoint access for unpriv
60 * 1 - disallow cpu events for unpriv
61 * 2 - disallow kernel profiling for unpriv
62 */
63int sysctl_perf_event_paranoid __read_mostly = 1;
64
Ingo Molnarcdd6c482009-09-21 12:02:48 +020065int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
66
67/*
68 * max perf event sample rate
69 */
70int sysctl_perf_event_sample_rate __read_mostly = 100000;
71
72static atomic64_t perf_event_id;
73
Stephane Eranian0b3fcf12011-01-03 18:20:01 +020074static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
75 enum event_type_t event_type);
76
77static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
78 enum event_type_t event_type);
79
Ingo Molnarcdd6c482009-09-21 12:02:48 +020080void __weak perf_event_print_debug(void) { }
81
Matt Fleming84c79912010-10-03 21:41:13 +010082extern __weak const char *perf_pmu_name(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020083{
Matt Fleming84c79912010-10-03 21:41:13 +010084 return "pmu";
Ingo Molnarcdd6c482009-09-21 12:02:48 +020085}
86
Stephane Eranian0b3fcf12011-01-03 18:20:01 +020087static inline u64 perf_clock(void)
88{
89 return local_clock();
90}
91
Peter Zijlstra33696fc2010-06-14 08:49:00 +020092void perf_pmu_disable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020093{
Peter Zijlstra33696fc2010-06-14 08:49:00 +020094 int *count = this_cpu_ptr(pmu->pmu_disable_count);
95 if (!(*count)++)
96 pmu->pmu_disable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020097}
98
Peter Zijlstra33696fc2010-06-14 08:49:00 +020099void perf_pmu_enable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200100{
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200101 int *count = this_cpu_ptr(pmu->pmu_disable_count);
102 if (!--(*count))
103 pmu->pmu_enable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200104}
105
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200106static DEFINE_PER_CPU(struct list_head, rotation_list);
107
108/*
109 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
110 * because they're strictly cpu affine and rotate_start is called with IRQs
111 * disabled, while rotate_context is called from IRQ context.
112 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200113static void perf_pmu_rotate_start(struct pmu *pmu)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200114{
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200115 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200116 struct list_head *head = &__get_cpu_var(rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200117
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200118 WARN_ON(!irqs_disabled());
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200119
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200120 if (list_empty(&cpuctx->rotation_list))
121 list_add(&cpuctx->rotation_list, head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200122}
123
124static void get_ctx(struct perf_event_context *ctx)
125{
126 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
127}
128
129static void free_ctx(struct rcu_head *head)
130{
131 struct perf_event_context *ctx;
132
133 ctx = container_of(head, struct perf_event_context, rcu_head);
134 kfree(ctx);
135}
136
137static void put_ctx(struct perf_event_context *ctx)
138{
139 if (atomic_dec_and_test(&ctx->refcount)) {
140 if (ctx->parent_ctx)
141 put_ctx(ctx->parent_ctx);
142 if (ctx->task)
143 put_task_struct(ctx->task);
144 call_rcu(&ctx->rcu_head, free_ctx);
145 }
146}
147
148static void unclone_ctx(struct perf_event_context *ctx)
149{
150 if (ctx->parent_ctx) {
151 put_ctx(ctx->parent_ctx);
152 ctx->parent_ctx = NULL;
153 }
154}
155
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200156static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
157{
158 /*
159 * only top level events have the pid namespace they were created in
160 */
161 if (event->parent)
162 event = event->parent;
163
164 return task_tgid_nr_ns(p, event->ns);
165}
166
167static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
168{
169 /*
170 * only top level events have the pid namespace they were created in
171 */
172 if (event->parent)
173 event = event->parent;
174
175 return task_pid_nr_ns(p, event->ns);
176}
177
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200178/*
179 * If we inherit events we want to return the parent event id
180 * to userspace.
181 */
182static u64 primary_event_id(struct perf_event *event)
183{
184 u64 id = event->id;
185
186 if (event->parent)
187 id = event->parent->id;
188
189 return id;
190}
191
192/*
193 * Get the perf_event_context for a task and lock it.
194 * This has to cope with with the fact that until it is locked,
195 * the context could get moved to another task.
196 */
197static struct perf_event_context *
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200198perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200199{
200 struct perf_event_context *ctx;
201
202 rcu_read_lock();
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200203retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200204 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200205 if (ctx) {
206 /*
207 * If this context is a clone of another, it might
208 * get swapped for another underneath us by
209 * perf_event_task_sched_out, though the
210 * rcu_read_lock() protects us from any context
211 * getting freed. Lock the context and check if it
212 * got swapped before we could get the lock, and retry
213 * if so. If we locked the right context, then it
214 * can't get swapped on us any more.
215 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100216 raw_spin_lock_irqsave(&ctx->lock, *flags);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200217 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100218 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200219 goto retry;
220 }
221
222 if (!atomic_inc_not_zero(&ctx->refcount)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100223 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200224 ctx = NULL;
225 }
226 }
227 rcu_read_unlock();
228 return ctx;
229}
230
231/*
232 * Get the context for a task and increment its pin_count so it
233 * can't get swapped to another task. This also increments its
234 * reference count so that the context can't get freed.
235 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200236static struct perf_event_context *
237perf_pin_task_context(struct task_struct *task, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200238{
239 struct perf_event_context *ctx;
240 unsigned long flags;
241
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200242 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200243 if (ctx) {
244 ++ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100245 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200246 }
247 return ctx;
248}
249
250static void perf_unpin_context(struct perf_event_context *ctx)
251{
252 unsigned long flags;
253
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100254 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200255 --ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100256 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200257 put_ctx(ctx);
258}
259
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100260/*
261 * Update the record of the current time in a context.
262 */
263static void update_context_time(struct perf_event_context *ctx)
264{
265 u64 now = perf_clock();
266
267 ctx->time += now - ctx->timestamp;
268 ctx->timestamp = now;
269}
270
Stephane Eranian41587552011-01-03 18:20:01 +0200271static u64 perf_event_time(struct perf_event *event)
272{
273 struct perf_event_context *ctx = event->ctx;
274 return ctx ? ctx->time : 0;
275}
276
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100277/*
278 * Update the total_time_enabled and total_time_running fields for a event.
279 */
280static void update_event_times(struct perf_event *event)
281{
282 struct perf_event_context *ctx = event->ctx;
283 u64 run_end;
284
285 if (event->state < PERF_EVENT_STATE_INACTIVE ||
286 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
287 return;
288
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100289 if (ctx->is_active)
Stephane Eranian41587552011-01-03 18:20:01 +0200290 run_end = perf_event_time(event);
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100291 else
292 run_end = event->tstamp_stopped;
293
294 event->total_time_enabled = run_end - event->tstamp_enabled;
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100295
296 if (event->state == PERF_EVENT_STATE_INACTIVE)
297 run_end = event->tstamp_stopped;
298 else
Stephane Eranian41587552011-01-03 18:20:01 +0200299 run_end = perf_event_time(event);
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100300
301 event->total_time_running = run_end - event->tstamp_running;
302}
303
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200304/*
305 * Update total_time_enabled and total_time_running for all events in a group.
306 */
307static void update_group_times(struct perf_event *leader)
308{
309 struct perf_event *event;
310
311 update_event_times(leader);
312 list_for_each_entry(event, &leader->sibling_list, group_entry)
313 update_event_times(event);
314}
315
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100316static struct list_head *
317ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
318{
319 if (event->attr.pinned)
320 return &ctx->pinned_groups;
321 else
322 return &ctx->flexible_groups;
323}
324
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200325/*
326 * Add a event from the lists for its context.
327 * Must be called with ctx->mutex and ctx->lock held.
328 */
329static void
330list_add_event(struct perf_event *event, struct perf_event_context *ctx)
331{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200332 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
333 event->attach_state |= PERF_ATTACH_CONTEXT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200334
335 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +0200336 * If we're a stand alone event or group leader, we go to the context
337 * list, group events are kept attached to the group so that
338 * perf_group_detach can, at all times, locate all siblings.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200339 */
Peter Zijlstra8a495422010-05-27 15:47:49 +0200340 if (event->group_leader == event) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100341 struct list_head *list;
342
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100343 if (is_software_event(event))
344 event->group_flags |= PERF_GROUP_SOFTWARE;
345
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100346 list = ctx_group_list(event, ctx);
347 list_add_tail(&event->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200348 }
349
350 list_add_rcu(&event->event_entry, &ctx->event_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200351 if (!ctx->nr_events)
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200352 perf_pmu_rotate_start(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200353 ctx->nr_events++;
354 if (event->attr.inherit_stat)
355 ctx->nr_stat++;
356}
357
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200358/*
359 * Called at perf_event creation and when events are attached/detached from a
360 * group.
361 */
362static void perf_event__read_size(struct perf_event *event)
363{
364 int entry = sizeof(u64); /* value */
365 int size = 0;
366 int nr = 1;
367
368 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
369 size += sizeof(u64);
370
371 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
372 size += sizeof(u64);
373
374 if (event->attr.read_format & PERF_FORMAT_ID)
375 entry += sizeof(u64);
376
377 if (event->attr.read_format & PERF_FORMAT_GROUP) {
378 nr += event->group_leader->nr_siblings;
379 size += sizeof(u64);
380 }
381
382 size += entry * nr;
383 event->read_size = size;
384}
385
386static void perf_event__header_size(struct perf_event *event)
387{
388 struct perf_sample_data *data;
389 u64 sample_type = event->attr.sample_type;
390 u16 size = 0;
391
392 perf_event__read_size(event);
393
394 if (sample_type & PERF_SAMPLE_IP)
395 size += sizeof(data->ip);
396
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200397 if (sample_type & PERF_SAMPLE_ADDR)
398 size += sizeof(data->addr);
399
400 if (sample_type & PERF_SAMPLE_PERIOD)
401 size += sizeof(data->period);
402
403 if (sample_type & PERF_SAMPLE_READ)
404 size += event->read_size;
405
406 event->header_size = size;
407}
408
409static void perf_event__id_header_size(struct perf_event *event)
410{
411 struct perf_sample_data *data;
412 u64 sample_type = event->attr.sample_type;
413 u16 size = 0;
414
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200415 if (sample_type & PERF_SAMPLE_TID)
416 size += sizeof(data->tid_entry);
417
418 if (sample_type & PERF_SAMPLE_TIME)
419 size += sizeof(data->time);
420
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200421 if (sample_type & PERF_SAMPLE_ID)
422 size += sizeof(data->id);
423
424 if (sample_type & PERF_SAMPLE_STREAM_ID)
425 size += sizeof(data->stream_id);
426
427 if (sample_type & PERF_SAMPLE_CPU)
428 size += sizeof(data->cpu_entry);
429
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200430 event->id_header_size = size;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200431}
432
Peter Zijlstra8a495422010-05-27 15:47:49 +0200433static void perf_group_attach(struct perf_event *event)
434{
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200435 struct perf_event *group_leader = event->group_leader, *pos;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200436
Peter Zijlstra74c33372010-10-15 11:40:29 +0200437 /*
438 * We can have double attach due to group movement in perf_event_open.
439 */
440 if (event->attach_state & PERF_ATTACH_GROUP)
441 return;
442
Peter Zijlstra8a495422010-05-27 15:47:49 +0200443 event->attach_state |= PERF_ATTACH_GROUP;
444
445 if (group_leader == event)
446 return;
447
448 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
449 !is_software_event(event))
450 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
451
452 list_add_tail(&event->group_entry, &group_leader->sibling_list);
453 group_leader->nr_siblings++;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200454
455 perf_event__header_size(group_leader);
456
457 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
458 perf_event__header_size(pos);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200459}
460
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200461/*
462 * Remove a event from the lists for its context.
463 * Must be called with ctx->mutex and ctx->lock held.
464 */
465static void
466list_del_event(struct perf_event *event, struct perf_event_context *ctx)
467{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200468 /*
469 * We can have double detach due to exit/hot-unplug + close.
470 */
471 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200472 return;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200473
474 event->attach_state &= ~PERF_ATTACH_CONTEXT;
475
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200476 ctx->nr_events--;
477 if (event->attr.inherit_stat)
478 ctx->nr_stat--;
479
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200480 list_del_rcu(&event->event_entry);
481
Peter Zijlstra8a495422010-05-27 15:47:49 +0200482 if (event->group_leader == event)
483 list_del_init(&event->group_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200484
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200485 update_group_times(event);
Stephane Eranianb2e74a22009-11-26 09:24:30 -0800486
487 /*
488 * If event was in error state, then keep it
489 * that way, otherwise bogus counts will be
490 * returned on read(). The only way to get out
491 * of error state is by explicit re-enabling
492 * of the event
493 */
494 if (event->state > PERF_EVENT_STATE_OFF)
495 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra050735b2010-05-11 11:51:53 +0200496}
497
Peter Zijlstra8a495422010-05-27 15:47:49 +0200498static void perf_group_detach(struct perf_event *event)
Peter Zijlstra050735b2010-05-11 11:51:53 +0200499{
500 struct perf_event *sibling, *tmp;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200501 struct list_head *list = NULL;
502
503 /*
504 * We can have double detach due to exit/hot-unplug + close.
505 */
506 if (!(event->attach_state & PERF_ATTACH_GROUP))
507 return;
508
509 event->attach_state &= ~PERF_ATTACH_GROUP;
510
511 /*
512 * If this is a sibling, remove it from its group.
513 */
514 if (event->group_leader != event) {
515 list_del_init(&event->group_entry);
516 event->group_leader->nr_siblings--;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200517 goto out;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200518 }
519
520 if (!list_empty(&event->group_entry))
521 list = &event->group_entry;
Peter Zijlstra2e2af502009-11-23 11:37:25 +0100522
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200523 /*
524 * If this was a group event with sibling events then
525 * upgrade the siblings to singleton events by adding them
Peter Zijlstra8a495422010-05-27 15:47:49 +0200526 * to whatever list we are on.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200527 */
528 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Peter Zijlstra8a495422010-05-27 15:47:49 +0200529 if (list)
530 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200531 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100532
533 /* Inherit group flags from the previous leader */
534 sibling->group_flags = event->group_flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200535 }
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200536
537out:
538 perf_event__header_size(event->group_leader);
539
540 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
541 perf_event__header_size(tmp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200542}
543
Stephane Eranianfa66f072010-08-26 16:40:01 +0200544static inline int
545event_filter_match(struct perf_event *event)
546{
547 return event->cpu == -1 || event->cpu == smp_processor_id();
548}
549
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200550static void
551event_sched_out(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200552 struct perf_cpu_context *cpuctx,
553 struct perf_event_context *ctx)
554{
Stephane Eranian41587552011-01-03 18:20:01 +0200555 u64 tstamp = perf_event_time(event);
Stephane Eranianfa66f072010-08-26 16:40:01 +0200556 u64 delta;
557 /*
558 * An event which could not be activated because of
559 * filter mismatch still needs to have its timings
560 * maintained, otherwise bogus information is return
561 * via read() for time_enabled, time_running:
562 */
563 if (event->state == PERF_EVENT_STATE_INACTIVE
564 && !event_filter_match(event)) {
565 delta = ctx->time - event->tstamp_stopped;
566 event->tstamp_running += delta;
Stephane Eranian41587552011-01-03 18:20:01 +0200567 event->tstamp_stopped = tstamp;
Stephane Eranianfa66f072010-08-26 16:40:01 +0200568 }
569
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200570 if (event->state != PERF_EVENT_STATE_ACTIVE)
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200571 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200572
573 event->state = PERF_EVENT_STATE_INACTIVE;
574 if (event->pending_disable) {
575 event->pending_disable = 0;
576 event->state = PERF_EVENT_STATE_OFF;
577 }
Stephane Eranian41587552011-01-03 18:20:01 +0200578 event->tstamp_stopped = tstamp;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200579 event->pmu->del(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200580 event->oncpu = -1;
581
582 if (!is_software_event(event))
583 cpuctx->active_oncpu--;
584 ctx->nr_active--;
585 if (event->attr.exclusive || !cpuctx->active_oncpu)
586 cpuctx->exclusive = 0;
587}
588
589static void
590group_sched_out(struct perf_event *group_event,
591 struct perf_cpu_context *cpuctx,
592 struct perf_event_context *ctx)
593{
594 struct perf_event *event;
Stephane Eranianfa66f072010-08-26 16:40:01 +0200595 int state = group_event->state;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200596
597 event_sched_out(group_event, cpuctx, ctx);
598
599 /*
600 * Schedule out siblings (if any):
601 */
602 list_for_each_entry(event, &group_event->sibling_list, group_entry)
603 event_sched_out(event, cpuctx, ctx);
604
Stephane Eranianfa66f072010-08-26 16:40:01 +0200605 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200606 cpuctx->exclusive = 0;
607}
608
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200609static inline struct perf_cpu_context *
610__get_cpu_context(struct perf_event_context *ctx)
611{
612 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
613}
614
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200615/*
616 * Cross CPU call to remove a performance event
617 *
618 * We disable the event on the hardware level first. After that we
619 * remove it from the context list.
620 */
621static void __perf_event_remove_from_context(void *info)
622{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200623 struct perf_event *event = info;
624 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200625 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200626
627 /*
628 * If this is a task context, we need to check whether it is
629 * the current task context of this cpu. If not it has been
630 * scheduled out before the smp call arrived.
631 */
632 if (ctx->task && cpuctx->task_ctx != ctx)
633 return;
634
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100635 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200636
637 event_sched_out(event, cpuctx, ctx);
638
639 list_del_event(event, ctx);
640
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100641 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200642}
643
644
645/*
646 * Remove the event from a task's (or a CPU's) list of events.
647 *
648 * Must be called with ctx->mutex held.
649 *
650 * CPU events are removed with a smp call. For task events we only
651 * call when the task is on a CPU.
652 *
653 * If event->ctx is a cloned context, callers must make sure that
654 * every task struct that event->ctx->task could possibly point to
655 * remains valid. This is OK when called from perf_release since
656 * that only calls us on the top-level context, which can't be a clone.
657 * When called from perf_event_exit_task, it's OK because the
658 * context has been detached from its task.
659 */
660static void perf_event_remove_from_context(struct perf_event *event)
661{
662 struct perf_event_context *ctx = event->ctx;
663 struct task_struct *task = ctx->task;
664
665 if (!task) {
666 /*
667 * Per cpu events are removed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200668 * the removal is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200669 */
670 smp_call_function_single(event->cpu,
671 __perf_event_remove_from_context,
672 event, 1);
673 return;
674 }
675
676retry:
677 task_oncpu_function_call(task, __perf_event_remove_from_context,
678 event);
679
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100680 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200681 /*
682 * If the context is active we need to retry the smp call.
683 */
684 if (ctx->nr_active && !list_empty(&event->group_entry)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100685 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200686 goto retry;
687 }
688
689 /*
690 * The lock prevents that this context is scheduled in so we
691 * can remove the event safely, if the call above did not
692 * succeed.
693 */
Peter Zijlstra6c2bfcb2009-11-23 11:37:24 +0100694 if (!list_empty(&event->group_entry))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200695 list_del_event(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100696 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200697}
698
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200699/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200700 * Cross CPU call to disable a performance event
701 */
702static void __perf_event_disable(void *info)
703{
704 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200705 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200706 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200707
708 /*
709 * If this is a per-task event, need to check whether this
710 * event's task is the current task on this cpu.
711 */
712 if (ctx->task && cpuctx->task_ctx != ctx)
713 return;
714
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100715 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200716
717 /*
718 * If the event is on, turn it off.
719 * If it is in error state, leave it in error state.
720 */
721 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
722 update_context_time(ctx);
723 update_group_times(event);
724 if (event == event->group_leader)
725 group_sched_out(event, cpuctx, ctx);
726 else
727 event_sched_out(event, cpuctx, ctx);
728 event->state = PERF_EVENT_STATE_OFF;
729 }
730
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100731 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200732}
733
734/*
735 * Disable a event.
736 *
737 * If event->ctx is a cloned context, callers must make sure that
738 * every task struct that event->ctx->task could possibly point to
739 * remains valid. This condition is satisifed when called through
740 * perf_event_for_each_child or perf_event_for_each because they
741 * hold the top-level event's child_mutex, so any descendant that
742 * goes to exit will block in sync_child_event.
743 * When called from perf_pending_event it's OK because event->ctx
744 * is the current context on this CPU and preemption is disabled,
745 * hence we can't get into perf_event_task_sched_out for this context.
746 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100747void perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200748{
749 struct perf_event_context *ctx = event->ctx;
750 struct task_struct *task = ctx->task;
751
752 if (!task) {
753 /*
754 * Disable the event on the cpu that it's on
755 */
756 smp_call_function_single(event->cpu, __perf_event_disable,
757 event, 1);
758 return;
759 }
760
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200761retry:
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200762 task_oncpu_function_call(task, __perf_event_disable, event);
763
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100764 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200765 /*
766 * If the event is still active, we need to retry the cross-call.
767 */
768 if (event->state == PERF_EVENT_STATE_ACTIVE) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100769 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200770 goto retry;
771 }
772
773 /*
774 * Since we have the lock this context can't be scheduled
775 * in, so we can change the state safely.
776 */
777 if (event->state == PERF_EVENT_STATE_INACTIVE) {
778 update_group_times(event);
779 event->state = PERF_EVENT_STATE_OFF;
780 }
781
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100782 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200783}
784
785static int
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200786event_sched_in(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200787 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100788 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200789{
Stephane Eranian41587552011-01-03 18:20:01 +0200790 u64 tstamp = perf_event_time(event);
791
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200792 if (event->state <= PERF_EVENT_STATE_OFF)
793 return 0;
794
795 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +0100796 event->oncpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200797 /*
798 * The new state must be visible before we turn it on in the hardware:
799 */
800 smp_wmb();
801
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200802 if (event->pmu->add(event, PERF_EF_START)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200803 event->state = PERF_EVENT_STATE_INACTIVE;
804 event->oncpu = -1;
805 return -EAGAIN;
806 }
807
Stephane Eranian41587552011-01-03 18:20:01 +0200808 event->tstamp_running += tstamp - event->tstamp_stopped;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200809
Stephane Eranian41587552011-01-03 18:20:01 +0200810 event->shadow_ctx_time = tstamp - ctx->timestamp;
Stephane Eranianeed01522010-10-26 16:08:01 +0200811
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200812 if (!is_software_event(event))
813 cpuctx->active_oncpu++;
814 ctx->nr_active++;
815
816 if (event->attr.exclusive)
817 cpuctx->exclusive = 1;
818
819 return 0;
820}
821
822static int
823group_sched_in(struct perf_event *group_event,
824 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100825 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200826{
Lin Ming6bde9b62010-04-23 13:56:00 +0800827 struct perf_event *event, *partial_group = NULL;
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200828 struct pmu *pmu = group_event->pmu;
Stephane Eraniand7842da2010-10-20 15:25:01 +0200829 u64 now = ctx->time;
830 bool simulate = false;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200831
832 if (group_event->state == PERF_EVENT_STATE_OFF)
833 return 0;
834
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200835 pmu->start_txn(pmu);
Lin Ming6bde9b62010-04-23 13:56:00 +0800836
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200837 if (event_sched_in(group_event, cpuctx, ctx)) {
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200838 pmu->cancel_txn(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200839 return -EAGAIN;
Stephane Eranian90151c32010-05-25 16:23:10 +0200840 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200841
842 /*
843 * Schedule in siblings as one group (if any):
844 */
845 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200846 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200847 partial_group = event;
848 goto group_error;
849 }
850 }
851
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200852 if (!pmu->commit_txn(pmu))
Paul Mackerras6e851582010-05-08 20:58:00 +1000853 return 0;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200854
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200855group_error:
856 /*
857 * Groups can be scheduled in as one unit only, so undo any
858 * partial group before returning:
Stephane Eraniand7842da2010-10-20 15:25:01 +0200859 * The events up to the failed event are scheduled out normally,
860 * tstamp_stopped will be updated.
861 *
862 * The failed events and the remaining siblings need to have
863 * their timings updated as if they had gone thru event_sched_in()
864 * and event_sched_out(). This is required to get consistent timings
865 * across the group. This also takes care of the case where the group
866 * could never be scheduled by ensuring tstamp_stopped is set to mark
867 * the time the event was actually stopped, such that time delta
868 * calculation in update_event_times() is correct.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200869 */
870 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
871 if (event == partial_group)
Stephane Eraniand7842da2010-10-20 15:25:01 +0200872 simulate = true;
873
874 if (simulate) {
875 event->tstamp_running += now - event->tstamp_stopped;
876 event->tstamp_stopped = now;
877 } else {
878 event_sched_out(event, cpuctx, ctx);
879 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200880 }
Stephane Eranian9ffcfa62010-10-20 15:25:01 +0200881 event_sched_out(group_event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200882
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200883 pmu->cancel_txn(pmu);
Stephane Eranian90151c32010-05-25 16:23:10 +0200884
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200885 return -EAGAIN;
886}
887
888/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200889 * Work out whether we can put this event group on the CPU now.
890 */
891static int group_can_go_on(struct perf_event *event,
892 struct perf_cpu_context *cpuctx,
893 int can_add_hw)
894{
895 /*
896 * Groups consisting entirely of software events can always go on.
897 */
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100898 if (event->group_flags & PERF_GROUP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200899 return 1;
900 /*
901 * If an exclusive group is already on, no other hardware
902 * events can go on.
903 */
904 if (cpuctx->exclusive)
905 return 0;
906 /*
907 * If this group is exclusive and there are already
908 * events on the CPU, it can't go on.
909 */
910 if (event->attr.exclusive && cpuctx->active_oncpu)
911 return 0;
912 /*
913 * Otherwise, try to add it if all previous groups were able
914 * to go on.
915 */
916 return can_add_hw;
917}
918
919static void add_event_to_ctx(struct perf_event *event,
920 struct perf_event_context *ctx)
921{
Stephane Eranian41587552011-01-03 18:20:01 +0200922 u64 tstamp = perf_event_time(event);
923
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200924 list_add_event(event, ctx);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200925 perf_group_attach(event);
Stephane Eranian41587552011-01-03 18:20:01 +0200926 event->tstamp_enabled = tstamp;
927 event->tstamp_running = tstamp;
928 event->tstamp_stopped = tstamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200929}
930
931/*
932 * Cross CPU call to install and enable a performance event
933 *
934 * Must be called with ctx->mutex held
935 */
936static void __perf_install_in_context(void *info)
937{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200938 struct perf_event *event = info;
939 struct perf_event_context *ctx = event->ctx;
940 struct perf_event *leader = event->group_leader;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200941 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200942 int err;
943
944 /*
945 * If this is a task context, we need to check whether it is
946 * the current task context of this cpu. If not it has been
947 * scheduled out before the smp call arrived.
948 * Or possibly this is the right context but it isn't
949 * on this cpu because it had no events.
950 */
951 if (ctx->task && cpuctx->task_ctx != ctx) {
952 if (cpuctx->task_ctx || ctx->task != current)
953 return;
954 cpuctx->task_ctx = ctx;
955 }
956
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100957 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200958 ctx->is_active = 1;
959 update_context_time(ctx);
960
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200961 add_event_to_ctx(event, ctx);
962
Stephane Eranian5632ab12011-01-03 18:20:01 +0200963 if (!event_filter_match(event))
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100964 goto unlock;
965
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200966 /*
967 * Don't put the event on if it is disabled or if
968 * it is in a group and the group isn't on.
969 */
970 if (event->state != PERF_EVENT_STATE_INACTIVE ||
971 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
972 goto unlock;
973
974 /*
975 * An exclusive event can't go on if there are already active
976 * hardware events, and no hardware event can go on if there
977 * is already an exclusive event on.
978 */
979 if (!group_can_go_on(event, cpuctx, 1))
980 err = -EEXIST;
981 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100982 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200983
984 if (err) {
985 /*
986 * This event couldn't go on. If it is in a group
987 * then we have to pull the whole group off.
988 * If the event group is pinned then put it in error state.
989 */
990 if (leader != event)
991 group_sched_out(leader, cpuctx, ctx);
992 if (leader->attr.pinned) {
993 update_group_times(leader);
994 leader->state = PERF_EVENT_STATE_ERROR;
995 }
996 }
997
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200998unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100999 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001000}
1001
1002/*
1003 * Attach a performance event to a context
1004 *
1005 * First we add the event to the list with the hardware enable bit
1006 * in event->hw_config cleared.
1007 *
1008 * If the event is attached to a task which is on a CPU we use a smp
1009 * call to enable it in the task context. The task might have been
1010 * scheduled away, but we check this in the smp call again.
1011 *
1012 * Must be called with ctx->mutex held.
1013 */
1014static void
1015perf_install_in_context(struct perf_event_context *ctx,
1016 struct perf_event *event,
1017 int cpu)
1018{
1019 struct task_struct *task = ctx->task;
1020
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02001021 event->ctx = ctx;
1022
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001023 if (!task) {
1024 /*
1025 * Per cpu events are installed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001026 * the install is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001027 */
1028 smp_call_function_single(cpu, __perf_install_in_context,
1029 event, 1);
1030 return;
1031 }
1032
1033retry:
1034 task_oncpu_function_call(task, __perf_install_in_context,
1035 event);
1036
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001037 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001038 /*
1039 * we need to retry the smp call.
1040 */
1041 if (ctx->is_active && list_empty(&event->group_entry)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001042 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001043 goto retry;
1044 }
1045
1046 /*
1047 * The lock prevents that this context is scheduled in so we
1048 * can add the event safely, if it the call above did not
1049 * succeed.
1050 */
1051 if (list_empty(&event->group_entry))
1052 add_event_to_ctx(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001053 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001054}
1055
1056/*
1057 * Put a event into inactive state and update time fields.
1058 * Enabling the leader of a group effectively enables all
1059 * the group members that aren't explicitly disabled, so we
1060 * have to update their ->tstamp_enabled also.
1061 * Note: this works for group members as well as group leaders
1062 * since the non-leader members' sibling_lists will be empty.
1063 */
1064static void __perf_event_mark_enabled(struct perf_event *event,
1065 struct perf_event_context *ctx)
1066{
1067 struct perf_event *sub;
Stephane Eranian41587552011-01-03 18:20:01 +02001068 u64 tstamp = perf_event_time(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001069
1070 event->state = PERF_EVENT_STATE_INACTIVE;
Stephane Eranian41587552011-01-03 18:20:01 +02001071 event->tstamp_enabled = tstamp - event->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001072 list_for_each_entry(sub, &event->sibling_list, group_entry) {
Stephane Eranian41587552011-01-03 18:20:01 +02001073 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1074 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001075 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001076}
1077
1078/*
1079 * Cross CPU call to enable a performance event
1080 */
1081static void __perf_event_enable(void *info)
1082{
1083 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001084 struct perf_event_context *ctx = event->ctx;
1085 struct perf_event *leader = event->group_leader;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001086 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001087 int err;
1088
1089 /*
1090 * If this is a per-task event, need to check whether this
1091 * event's task is the current task on this cpu.
1092 */
1093 if (ctx->task && cpuctx->task_ctx != ctx) {
1094 if (cpuctx->task_ctx || ctx->task != current)
1095 return;
1096 cpuctx->task_ctx = ctx;
1097 }
1098
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001099 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001100 ctx->is_active = 1;
1101 update_context_time(ctx);
1102
1103 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1104 goto unlock;
1105 __perf_event_mark_enabled(event, ctx);
1106
Stephane Eranian5632ab12011-01-03 18:20:01 +02001107 if (!event_filter_match(event))
Peter Zijlstraf4c41762009-12-16 17:55:54 +01001108 goto unlock;
1109
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001110 /*
1111 * If the event is in a group and isn't the group leader,
1112 * then don't put it on unless the group is on.
1113 */
1114 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1115 goto unlock;
1116
1117 if (!group_can_go_on(event, cpuctx, 1)) {
1118 err = -EEXIST;
1119 } else {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001120 if (event == leader)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001121 err = group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001122 else
Peter Zijlstra6e377382010-02-11 13:21:58 +01001123 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001124 }
1125
1126 if (err) {
1127 /*
1128 * If this event can't go on and it's part of a
1129 * group, then the whole group has to come off.
1130 */
1131 if (leader != event)
1132 group_sched_out(leader, cpuctx, ctx);
1133 if (leader->attr.pinned) {
1134 update_group_times(leader);
1135 leader->state = PERF_EVENT_STATE_ERROR;
1136 }
1137 }
1138
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001139unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001140 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001141}
1142
1143/*
1144 * Enable a event.
1145 *
1146 * If event->ctx is a cloned context, callers must make sure that
1147 * every task struct that event->ctx->task could possibly point to
1148 * remains valid. This condition is satisfied when called through
1149 * perf_event_for_each_child or perf_event_for_each as described
1150 * for perf_event_disable.
1151 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +01001152void perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001153{
1154 struct perf_event_context *ctx = event->ctx;
1155 struct task_struct *task = ctx->task;
1156
1157 if (!task) {
1158 /*
1159 * Enable the event on the cpu that it's on
1160 */
1161 smp_call_function_single(event->cpu, __perf_event_enable,
1162 event, 1);
1163 return;
1164 }
1165
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001166 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001167 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1168 goto out;
1169
1170 /*
1171 * If the event is in error state, clear that first.
1172 * That way, if we see the event in error state below, we
1173 * know that it has gone back into error state, as distinct
1174 * from the task having been scheduled away before the
1175 * cross-call arrived.
1176 */
1177 if (event->state == PERF_EVENT_STATE_ERROR)
1178 event->state = PERF_EVENT_STATE_OFF;
1179
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001180retry:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001181 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001182 task_oncpu_function_call(task, __perf_event_enable, event);
1183
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001184 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001185
1186 /*
1187 * If the context is active and the event is still off,
1188 * we need to retry the cross-call.
1189 */
1190 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1191 goto retry;
1192
1193 /*
1194 * Since we have the lock this context can't be scheduled
1195 * in, so we can change the state safely.
1196 */
1197 if (event->state == PERF_EVENT_STATE_OFF)
1198 __perf_event_mark_enabled(event, ctx);
1199
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001200out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001201 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001202}
1203
1204static int perf_event_refresh(struct perf_event *event, int refresh)
1205{
1206 /*
1207 * not supported on inherited events
1208 */
Franck Bui-Huu2e939d12010-11-23 16:21:44 +01001209 if (event->attr.inherit || !is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001210 return -EINVAL;
1211
1212 atomic_add(refresh, &event->event_limit);
1213 perf_event_enable(event);
1214
1215 return 0;
1216}
1217
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001218static void ctx_sched_out(struct perf_event_context *ctx,
1219 struct perf_cpu_context *cpuctx,
1220 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001221{
1222 struct perf_event *event;
1223
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001224 raw_spin_lock(&ctx->lock);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001225 perf_pmu_disable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001226 ctx->is_active = 0;
1227 if (likely(!ctx->nr_events))
1228 goto out;
1229 update_context_time(ctx);
1230
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001231 if (!ctx->nr_active)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001232 goto out;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001233
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001234 if (event_type & EVENT_PINNED) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001235 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1236 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001237 }
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001238
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001239 if (event_type & EVENT_FLEXIBLE) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001240 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001241 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001242 }
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001243out:
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001244 perf_pmu_enable(ctx->pmu);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001245 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001246}
1247
1248/*
1249 * Test whether two contexts are equivalent, i.e. whether they
1250 * have both been cloned from the same version of the same context
1251 * and they both have the same number of enabled events.
1252 * If the number of enabled events is the same, then the set
1253 * of enabled events should be the same, because these are both
1254 * inherited contexts, therefore we can't access individual events
1255 * in them directly with an fd; we can only enable/disable all
1256 * events via prctl, or enable/disable all events in a family
1257 * via ioctl, which will have the same effect on both contexts.
1258 */
1259static int context_equiv(struct perf_event_context *ctx1,
1260 struct perf_event_context *ctx2)
1261{
1262 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1263 && ctx1->parent_gen == ctx2->parent_gen
1264 && !ctx1->pin_count && !ctx2->pin_count;
1265}
1266
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001267static void __perf_event_sync_stat(struct perf_event *event,
1268 struct perf_event *next_event)
1269{
1270 u64 value;
1271
1272 if (!event->attr.inherit_stat)
1273 return;
1274
1275 /*
1276 * Update the event value, we cannot use perf_event_read()
1277 * because we're in the middle of a context switch and have IRQs
1278 * disabled, which upsets smp_call_function_single(), however
1279 * we know the event must be on the current CPU, therefore we
1280 * don't need to use it.
1281 */
1282 switch (event->state) {
1283 case PERF_EVENT_STATE_ACTIVE:
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01001284 event->pmu->read(event);
1285 /* fall-through */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001286
1287 case PERF_EVENT_STATE_INACTIVE:
1288 update_event_times(event);
1289 break;
1290
1291 default:
1292 break;
1293 }
1294
1295 /*
1296 * In order to keep per-task stats reliable we need to flip the event
1297 * values when we flip the contexts.
1298 */
Peter Zijlstrae7850592010-05-21 14:43:08 +02001299 value = local64_read(&next_event->count);
1300 value = local64_xchg(&event->count, value);
1301 local64_set(&next_event->count, value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001302
1303 swap(event->total_time_enabled, next_event->total_time_enabled);
1304 swap(event->total_time_running, next_event->total_time_running);
1305
1306 /*
1307 * Since we swizzled the values, update the user visible data too.
1308 */
1309 perf_event_update_userpage(event);
1310 perf_event_update_userpage(next_event);
1311}
1312
1313#define list_next_entry(pos, member) \
1314 list_entry(pos->member.next, typeof(*pos), member)
1315
1316static void perf_event_sync_stat(struct perf_event_context *ctx,
1317 struct perf_event_context *next_ctx)
1318{
1319 struct perf_event *event, *next_event;
1320
1321 if (!ctx->nr_stat)
1322 return;
1323
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01001324 update_context_time(ctx);
1325
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001326 event = list_first_entry(&ctx->event_list,
1327 struct perf_event, event_entry);
1328
1329 next_event = list_first_entry(&next_ctx->event_list,
1330 struct perf_event, event_entry);
1331
1332 while (&event->event_entry != &ctx->event_list &&
1333 &next_event->event_entry != &next_ctx->event_list) {
1334
1335 __perf_event_sync_stat(event, next_event);
1336
1337 event = list_next_entry(event, event_entry);
1338 next_event = list_next_entry(next_event, event_entry);
1339 }
1340}
1341
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001342void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1343 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001344{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001345 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001346 struct perf_event_context *next_ctx;
1347 struct perf_event_context *parent;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001348 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001349 int do_switch = 1;
1350
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001351 if (likely(!ctx))
1352 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001353
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001354 cpuctx = __get_cpu_context(ctx);
1355 if (!cpuctx->task_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001356 return;
1357
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001358 rcu_read_lock();
1359 parent = rcu_dereference(ctx->parent_ctx);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001360 next_ctx = next->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001361 if (parent && next_ctx &&
1362 rcu_dereference(next_ctx->parent_ctx) == parent) {
1363 /*
1364 * Looks like the two contexts are clones, so we might be
1365 * able to optimize the context switch. We lock both
1366 * contexts and check that they are clones under the
1367 * lock (including re-checking that neither has been
1368 * uncloned in the meantime). It doesn't matter which
1369 * order we take the locks because no other cpu could
1370 * be trying to lock both of these tasks.
1371 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001372 raw_spin_lock(&ctx->lock);
1373 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001374 if (context_equiv(ctx, next_ctx)) {
1375 /*
1376 * XXX do we need a memory barrier of sorts
1377 * wrt to rcu_dereference() of perf_event_ctxp
1378 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001379 task->perf_event_ctxp[ctxn] = next_ctx;
1380 next->perf_event_ctxp[ctxn] = ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001381 ctx->task = next;
1382 next_ctx->task = task;
1383 do_switch = 0;
1384
1385 perf_event_sync_stat(ctx, next_ctx);
1386 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001387 raw_spin_unlock(&next_ctx->lock);
1388 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001389 }
1390 rcu_read_unlock();
1391
1392 if (do_switch) {
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001393 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001394 cpuctx->task_ctx = NULL;
1395 }
1396}
1397
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001398#define for_each_task_context_nr(ctxn) \
1399 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
1400
1401/*
1402 * Called from scheduler to remove the events of the current task,
1403 * with interrupts disabled.
1404 *
1405 * We stop each event and update the event value in event->count.
1406 *
1407 * This does not protect us against NMI, but disable()
1408 * sets the disabled bit in the control field of event _before_
1409 * accessing the event control register. If a NMI hits, then it will
1410 * not restart the event.
1411 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02001412void __perf_event_task_sched_out(struct task_struct *task,
1413 struct task_struct *next)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001414{
1415 int ctxn;
1416
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001417 for_each_task_context_nr(ctxn)
1418 perf_event_context_sched_out(task, ctxn, next);
1419}
1420
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001421static void task_ctx_sched_out(struct perf_event_context *ctx,
1422 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001423{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001424 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001425
1426 if (!cpuctx->task_ctx)
1427 return;
1428
1429 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1430 return;
1431
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001432 ctx_sched_out(ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001433 cpuctx->task_ctx = NULL;
1434}
1435
1436/*
1437 * Called with IRQs disabled
1438 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001439static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1440 enum event_type_t event_type)
1441{
1442 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001443}
1444
1445static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001446ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001447 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001448{
1449 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001450
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001451 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1452 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001453 continue;
Stephane Eranian5632ab12011-01-03 18:20:01 +02001454 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001455 continue;
1456
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001457 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01001458 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001459
1460 /*
1461 * If this pinned group hasn't been scheduled,
1462 * put it in error state.
1463 */
1464 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1465 update_group_times(event);
1466 event->state = PERF_EVENT_STATE_ERROR;
1467 }
1468 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001469}
1470
1471static void
1472ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001473 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001474{
1475 struct perf_event *event;
1476 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001477
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001478 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1479 /* Ignore events in OFF or ERROR state */
1480 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001481 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001482 /*
1483 * Listen to the 'cpu' scheduling filter constraint
1484 * of events:
1485 */
Stephane Eranian5632ab12011-01-03 18:20:01 +02001486 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001487 continue;
1488
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001489 if (group_can_go_on(event, cpuctx, can_add_hw)) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001490 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001491 can_add_hw = 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001492 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001493 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001494}
1495
1496static void
1497ctx_sched_in(struct perf_event_context *ctx,
1498 struct perf_cpu_context *cpuctx,
1499 enum event_type_t event_type)
1500{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001501 raw_spin_lock(&ctx->lock);
1502 ctx->is_active = 1;
1503 if (likely(!ctx->nr_events))
1504 goto out;
1505
1506 ctx->timestamp = perf_clock();
1507
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001508 /*
1509 * First go through the list and put on any pinned groups
1510 * in order to give them the best chance of going on.
1511 */
1512 if (event_type & EVENT_PINNED)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001513 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001514
1515 /* Then walk through the lower prio flexible groups */
1516 if (event_type & EVENT_FLEXIBLE)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001517 ctx_flexible_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001518
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001519out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001520 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001521}
1522
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001523static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1524 enum event_type_t event_type)
1525{
1526 struct perf_event_context *ctx = &cpuctx->ctx;
1527
1528 ctx_sched_in(ctx, cpuctx, event_type);
1529}
1530
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001531static void task_ctx_sched_in(struct perf_event_context *ctx,
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001532 enum event_type_t event_type)
1533{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001534 struct perf_cpu_context *cpuctx;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001535
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001536 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001537 if (cpuctx->task_ctx == ctx)
1538 return;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001539
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001540 ctx_sched_in(ctx, cpuctx, event_type);
1541 cpuctx->task_ctx = ctx;
1542}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001543
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001544void perf_event_context_sched_in(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001545{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001546 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001547
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001548 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001549 if (cpuctx->task_ctx == ctx)
1550 return;
1551
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001552 perf_pmu_disable(ctx->pmu);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001553 /*
1554 * We want to keep the following priority order:
1555 * cpu pinned (that don't need to move), task pinned,
1556 * cpu flexible, task flexible.
1557 */
1558 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1559
1560 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1561 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1562 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1563
1564 cpuctx->task_ctx = ctx;
eranian@google.com9b33fa62010-03-10 22:26:05 -08001565
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001566 /*
1567 * Since these rotations are per-cpu, we need to ensure the
1568 * cpu-context we got scheduled on is actually rotating.
1569 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001570 perf_pmu_rotate_start(ctx->pmu);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001571 perf_pmu_enable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001572}
1573
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001574/*
1575 * Called from scheduler to add the events of the current task
1576 * with interrupts disabled.
1577 *
1578 * We restore the event value and then enable it.
1579 *
1580 * This does not protect us against NMI, but enable()
1581 * sets the enabled bit in the control field of event _before_
1582 * accessing the event control register. If a NMI hits, then it will
1583 * keep the event running.
1584 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02001585void __perf_event_task_sched_in(struct task_struct *task)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001586{
1587 struct perf_event_context *ctx;
1588 int ctxn;
1589
1590 for_each_task_context_nr(ctxn) {
1591 ctx = task->perf_event_ctxp[ctxn];
1592 if (likely(!ctx))
1593 continue;
1594
1595 perf_event_context_sched_in(ctx);
1596 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001597}
1598
1599#define MAX_INTERRUPTS (~0ULL)
1600
1601static void perf_log_throttle(struct perf_event *event, int enable);
1602
Peter Zijlstraabd50712010-01-26 18:50:16 +01001603static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1604{
1605 u64 frequency = event->attr.sample_freq;
1606 u64 sec = NSEC_PER_SEC;
1607 u64 divisor, dividend;
1608
1609 int count_fls, nsec_fls, frequency_fls, sec_fls;
1610
1611 count_fls = fls64(count);
1612 nsec_fls = fls64(nsec);
1613 frequency_fls = fls64(frequency);
1614 sec_fls = 30;
1615
1616 /*
1617 * We got @count in @nsec, with a target of sample_freq HZ
1618 * the target period becomes:
1619 *
1620 * @count * 10^9
1621 * period = -------------------
1622 * @nsec * sample_freq
1623 *
1624 */
1625
1626 /*
1627 * Reduce accuracy by one bit such that @a and @b converge
1628 * to a similar magnitude.
1629 */
1630#define REDUCE_FLS(a, b) \
1631do { \
1632 if (a##_fls > b##_fls) { \
1633 a >>= 1; \
1634 a##_fls--; \
1635 } else { \
1636 b >>= 1; \
1637 b##_fls--; \
1638 } \
1639} while (0)
1640
1641 /*
1642 * Reduce accuracy until either term fits in a u64, then proceed with
1643 * the other, so that finally we can do a u64/u64 division.
1644 */
1645 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1646 REDUCE_FLS(nsec, frequency);
1647 REDUCE_FLS(sec, count);
1648 }
1649
1650 if (count_fls + sec_fls > 64) {
1651 divisor = nsec * frequency;
1652
1653 while (count_fls + sec_fls > 64) {
1654 REDUCE_FLS(count, sec);
1655 divisor >>= 1;
1656 }
1657
1658 dividend = count * sec;
1659 } else {
1660 dividend = count * sec;
1661
1662 while (nsec_fls + frequency_fls > 64) {
1663 REDUCE_FLS(nsec, frequency);
1664 dividend >>= 1;
1665 }
1666
1667 divisor = nsec * frequency;
1668 }
1669
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001670 if (!divisor)
1671 return dividend;
1672
Peter Zijlstraabd50712010-01-26 18:50:16 +01001673 return div64_u64(dividend, divisor);
1674}
1675
1676static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001677{
1678 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001679 s64 period, sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001680 s64 delta;
1681
Peter Zijlstraabd50712010-01-26 18:50:16 +01001682 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001683
1684 delta = (s64)(period - hwc->sample_period);
1685 delta = (delta + 7) / 8; /* low pass filter */
1686
1687 sample_period = hwc->sample_period + delta;
1688
1689 if (!sample_period)
1690 sample_period = 1;
1691
1692 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001693
Peter Zijlstrae7850592010-05-21 14:43:08 +02001694 if (local64_read(&hwc->period_left) > 8*sample_period) {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001695 event->pmu->stop(event, PERF_EF_UPDATE);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001696 local64_set(&hwc->period_left, 0);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001697 event->pmu->start(event, PERF_EF_RELOAD);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001698 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001699}
1700
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001701static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001702{
1703 struct perf_event *event;
1704 struct hw_perf_event *hwc;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001705 u64 interrupts, now;
1706 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001707
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001708 raw_spin_lock(&ctx->lock);
Paul Mackerras03541f82009-10-14 16:58:03 +11001709 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001710 if (event->state != PERF_EVENT_STATE_ACTIVE)
1711 continue;
1712
Stephane Eranian5632ab12011-01-03 18:20:01 +02001713 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01001714 continue;
1715
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001716 hwc = &event->hw;
1717
1718 interrupts = hwc->interrupts;
1719 hwc->interrupts = 0;
1720
1721 /*
1722 * unthrottle events on the tick
1723 */
1724 if (interrupts == MAX_INTERRUPTS) {
1725 perf_log_throttle(event, 1);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001726 event->pmu->start(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001727 }
1728
1729 if (!event->attr.freq || !event->attr.sample_freq)
1730 continue;
1731
Peter Zijlstraabd50712010-01-26 18:50:16 +01001732 event->pmu->read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001733 now = local64_read(&event->count);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001734 delta = now - hwc->freq_count_stamp;
1735 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001736
Peter Zijlstraabd50712010-01-26 18:50:16 +01001737 if (delta > 0)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001738 perf_adjust_period(event, period, delta);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001739 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001740 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001741}
1742
1743/*
1744 * Round-robin a context's events:
1745 */
1746static void rotate_ctx(struct perf_event_context *ctx)
1747{
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001748 raw_spin_lock(&ctx->lock);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001749
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01001750 /*
1751 * Rotate the first entry last of non-pinned groups. Rotation might be
1752 * disabled by the inheritance code.
1753 */
1754 if (!ctx->rotate_disable)
1755 list_rotate_left(&ctx->flexible_groups);
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001756
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001757 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001758}
1759
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001760/*
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001761 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
1762 * because they're strictly cpu affine and rotate_start is called with IRQs
1763 * disabled, while rotate_context is called from IRQ context.
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001764 */
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001765static void perf_rotate_context(struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001766{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001767 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001768 struct perf_event_context *ctx = NULL;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001769 int rotate = 0, remove = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001770
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001771 if (cpuctx->ctx.nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001772 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001773 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1774 rotate = 1;
1775 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001776
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001777 ctx = cpuctx->task_ctx;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001778 if (ctx && ctx->nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001779 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001780 if (ctx->nr_events != ctx->nr_active)
1781 rotate = 1;
1782 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001783
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001784 perf_pmu_disable(cpuctx->ctx.pmu);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001785 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001786 if (ctx)
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001787 perf_ctx_adjust_freq(ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001788
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001789 if (!rotate)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001790 goto done;
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001791
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001792 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001793 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001794 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001795
1796 rotate_ctx(&cpuctx->ctx);
1797 if (ctx)
1798 rotate_ctx(ctx);
1799
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001800 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001801 if (ctx)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001802 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001803
1804done:
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001805 if (remove)
1806 list_del_init(&cpuctx->rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001807
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001808 perf_pmu_enable(cpuctx->ctx.pmu);
1809}
1810
1811void perf_event_task_tick(void)
1812{
1813 struct list_head *head = &__get_cpu_var(rotation_list);
1814 struct perf_cpu_context *cpuctx, *tmp;
1815
1816 WARN_ON(!irqs_disabled());
1817
1818 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
1819 if (cpuctx->jiffies_interval == 1 ||
1820 !(jiffies % cpuctx->jiffies_interval))
1821 perf_rotate_context(cpuctx);
1822 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001823}
1824
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001825static int event_enable_on_exec(struct perf_event *event,
1826 struct perf_event_context *ctx)
1827{
1828 if (!event->attr.enable_on_exec)
1829 return 0;
1830
1831 event->attr.enable_on_exec = 0;
1832 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1833 return 0;
1834
1835 __perf_event_mark_enabled(event, ctx);
1836
1837 return 1;
1838}
1839
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001840/*
1841 * Enable all of a task's events that have been marked enable-on-exec.
1842 * This expects task == current.
1843 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001844static void perf_event_enable_on_exec(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001845{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001846 struct perf_event *event;
1847 unsigned long flags;
1848 int enabled = 0;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001849 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001850
1851 local_irq_save(flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001852 if (!ctx || !ctx->nr_events)
1853 goto out;
1854
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001855 task_ctx_sched_out(ctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001856
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001857 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001858
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001859 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1860 ret = event_enable_on_exec(event, ctx);
1861 if (ret)
1862 enabled = 1;
1863 }
1864
1865 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1866 ret = event_enable_on_exec(event, ctx);
1867 if (ret)
1868 enabled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001869 }
1870
1871 /*
1872 * Unclone this context if we enabled any event.
1873 */
1874 if (enabled)
1875 unclone_ctx(ctx);
1876
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001877 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001878
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001879 perf_event_context_sched_in(ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001880out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001881 local_irq_restore(flags);
1882}
1883
1884/*
1885 * Cross CPU call to read the hardware event
1886 */
1887static void __perf_event_read(void *info)
1888{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001889 struct perf_event *event = info;
1890 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001891 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001892
1893 /*
1894 * If this is a task context, we need to check whether it is
1895 * the current task context of this cpu. If not it has been
1896 * scheduled out before the smp call arrived. In that case
1897 * event->count would have been updated to a recent sample
1898 * when the event was scheduled out.
1899 */
1900 if (ctx->task && cpuctx->task_ctx != ctx)
1901 return;
1902
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001903 raw_spin_lock(&ctx->lock);
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001904 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001905 update_event_times(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001906 raw_spin_unlock(&ctx->lock);
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001907
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001908 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001909}
1910
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001911static inline u64 perf_event_count(struct perf_event *event)
1912{
Peter Zijlstrae7850592010-05-21 14:43:08 +02001913 return local64_read(&event->count) + atomic64_read(&event->child_count);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001914}
1915
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001916static u64 perf_event_read(struct perf_event *event)
1917{
1918 /*
1919 * If event is enabled and currently active on a CPU, update the
1920 * value in the event structure:
1921 */
1922 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1923 smp_call_function_single(event->oncpu,
1924 __perf_event_read, event, 1);
1925 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001926 struct perf_event_context *ctx = event->ctx;
1927 unsigned long flags;
1928
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001929 raw_spin_lock_irqsave(&ctx->lock, flags);
Stephane Eranianc530ccd2010-10-15 15:26:01 +02001930 /*
1931 * may read while context is not active
1932 * (e.g., thread is blocked), in that case
1933 * we cannot update context time
1934 */
1935 if (ctx->is_active)
1936 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001937 update_event_times(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001938 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001939 }
1940
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001941 return perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001942}
1943
1944/*
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001945 * Callchain support
1946 */
1947
1948struct callchain_cpus_entries {
1949 struct rcu_head rcu_head;
1950 struct perf_callchain_entry *cpu_entries[0];
1951};
1952
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02001953static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001954static atomic_t nr_callchain_events;
1955static DEFINE_MUTEX(callchain_mutex);
1956struct callchain_cpus_entries *callchain_cpus_entries;
1957
1958
1959__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
1960 struct pt_regs *regs)
1961{
1962}
1963
1964__weak void perf_callchain_user(struct perf_callchain_entry *entry,
1965 struct pt_regs *regs)
1966{
1967}
1968
1969static void release_callchain_buffers_rcu(struct rcu_head *head)
1970{
1971 struct callchain_cpus_entries *entries;
1972 int cpu;
1973
1974 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
1975
1976 for_each_possible_cpu(cpu)
1977 kfree(entries->cpu_entries[cpu]);
1978
1979 kfree(entries);
1980}
1981
1982static void release_callchain_buffers(void)
1983{
1984 struct callchain_cpus_entries *entries;
1985
1986 entries = callchain_cpus_entries;
1987 rcu_assign_pointer(callchain_cpus_entries, NULL);
1988 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
1989}
1990
1991static int alloc_callchain_buffers(void)
1992{
1993 int cpu;
1994 int size;
1995 struct callchain_cpus_entries *entries;
1996
1997 /*
1998 * We can't use the percpu allocation API for data that can be
1999 * accessed from NMI. Use a temporary manual per cpu allocation
2000 * until that gets sorted out.
2001 */
Eric Dumazet88d4f0d2011-01-25 19:40:51 +01002002 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002003
2004 entries = kzalloc(size, GFP_KERNEL);
2005 if (!entries)
2006 return -ENOMEM;
2007
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02002008 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002009
2010 for_each_possible_cpu(cpu) {
2011 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
2012 cpu_to_node(cpu));
2013 if (!entries->cpu_entries[cpu])
2014 goto fail;
2015 }
2016
2017 rcu_assign_pointer(callchain_cpus_entries, entries);
2018
2019 return 0;
2020
2021fail:
2022 for_each_possible_cpu(cpu)
2023 kfree(entries->cpu_entries[cpu]);
2024 kfree(entries);
2025
2026 return -ENOMEM;
2027}
2028
2029static int get_callchain_buffers(void)
2030{
2031 int err = 0;
2032 int count;
2033
2034 mutex_lock(&callchain_mutex);
2035
2036 count = atomic_inc_return(&nr_callchain_events);
2037 if (WARN_ON_ONCE(count < 1)) {
2038 err = -EINVAL;
2039 goto exit;
2040 }
2041
2042 if (count > 1) {
2043 /* If the allocation failed, give up */
2044 if (!callchain_cpus_entries)
2045 err = -ENOMEM;
2046 goto exit;
2047 }
2048
2049 err = alloc_callchain_buffers();
2050 if (err)
2051 release_callchain_buffers();
2052exit:
2053 mutex_unlock(&callchain_mutex);
2054
2055 return err;
2056}
2057
2058static void put_callchain_buffers(void)
2059{
2060 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
2061 release_callchain_buffers();
2062 mutex_unlock(&callchain_mutex);
2063 }
2064}
2065
2066static int get_recursion_context(int *recursion)
2067{
2068 int rctx;
2069
2070 if (in_nmi())
2071 rctx = 3;
2072 else if (in_irq())
2073 rctx = 2;
2074 else if (in_softirq())
2075 rctx = 1;
2076 else
2077 rctx = 0;
2078
2079 if (recursion[rctx])
2080 return -1;
2081
2082 recursion[rctx]++;
2083 barrier();
2084
2085 return rctx;
2086}
2087
2088static inline void put_recursion_context(int *recursion, int rctx)
2089{
2090 barrier();
2091 recursion[rctx]--;
2092}
2093
2094static struct perf_callchain_entry *get_callchain_entry(int *rctx)
2095{
2096 int cpu;
2097 struct callchain_cpus_entries *entries;
2098
2099 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
2100 if (*rctx == -1)
2101 return NULL;
2102
2103 entries = rcu_dereference(callchain_cpus_entries);
2104 if (!entries)
2105 return NULL;
2106
2107 cpu = smp_processor_id();
2108
2109 return &entries->cpu_entries[cpu][*rctx];
2110}
2111
2112static void
2113put_callchain_entry(int rctx)
2114{
2115 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
2116}
2117
2118static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2119{
2120 int rctx;
2121 struct perf_callchain_entry *entry;
2122
2123
2124 entry = get_callchain_entry(&rctx);
2125 if (rctx == -1)
2126 return NULL;
2127
2128 if (!entry)
2129 goto exit_put;
2130
2131 entry->nr = 0;
2132
2133 if (!user_mode(regs)) {
2134 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
2135 perf_callchain_kernel(entry, regs);
2136 if (current->mm)
2137 regs = task_pt_regs(current);
2138 else
2139 regs = NULL;
2140 }
2141
2142 if (regs) {
2143 perf_callchain_store(entry, PERF_CONTEXT_USER);
2144 perf_callchain_user(entry, regs);
2145 }
2146
2147exit_put:
2148 put_callchain_entry(rctx);
2149
2150 return entry;
2151}
2152
2153/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002154 * Initialize the perf_event context in a task_struct:
2155 */
Peter Zijlstraeb184472010-09-07 15:55:13 +02002156static void __perf_event_init_context(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002157{
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002158 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002159 mutex_init(&ctx->mutex);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002160 INIT_LIST_HEAD(&ctx->pinned_groups);
2161 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002162 INIT_LIST_HEAD(&ctx->event_list);
2163 atomic_set(&ctx->refcount, 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002164}
2165
Peter Zijlstraeb184472010-09-07 15:55:13 +02002166static struct perf_event_context *
2167alloc_perf_context(struct pmu *pmu, struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002168{
2169 struct perf_event_context *ctx;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002170
2171 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2172 if (!ctx)
2173 return NULL;
2174
2175 __perf_event_init_context(ctx);
2176 if (task) {
2177 ctx->task = task;
2178 get_task_struct(task);
2179 }
2180 ctx->pmu = pmu;
2181
2182 return ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002183}
2184
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002185static struct task_struct *
2186find_lively_task_by_vpid(pid_t vpid)
2187{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002188 struct task_struct *task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002189 int err;
2190
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002191 rcu_read_lock();
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002192 if (!vpid)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002193 task = current;
2194 else
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002195 task = find_task_by_vpid(vpid);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002196 if (task)
2197 get_task_struct(task);
2198 rcu_read_unlock();
2199
2200 if (!task)
2201 return ERR_PTR(-ESRCH);
2202
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002203 /* Reuse ptrace permission checks for now. */
2204 err = -EACCES;
2205 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2206 goto errout;
2207
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002208 return task;
2209errout:
2210 put_task_struct(task);
2211 return ERR_PTR(err);
2212
2213}
2214
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002215static struct perf_event_context *
Matt Helsley38a81da2010-09-13 13:01:20 -07002216find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002217{
2218 struct perf_event_context *ctx;
2219 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002220 unsigned long flags;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002221 int ctxn, err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002222
Oleg Nesterov22a4ec72011-01-18 17:10:08 +01002223 if (!task) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002224 /* Must be root to operate on a CPU event: */
2225 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2226 return ERR_PTR(-EACCES);
2227
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002228 /*
2229 * We could be clever and allow to attach a event to an
2230 * offline CPU and activate it when the CPU comes up, but
2231 * that's for later.
2232 */
2233 if (!cpu_online(cpu))
2234 return ERR_PTR(-ENODEV);
2235
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002236 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002237 ctx = &cpuctx->ctx;
2238 get_ctx(ctx);
2239
2240 return ctx;
2241 }
2242
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002243 err = -EINVAL;
2244 ctxn = pmu->task_ctx_nr;
2245 if (ctxn < 0)
2246 goto errout;
2247
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002248retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002249 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002250 if (ctx) {
2251 unclone_ctx(ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002252 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002253 }
2254
2255 if (!ctx) {
Peter Zijlstraeb184472010-09-07 15:55:13 +02002256 ctx = alloc_perf_context(pmu, task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002257 err = -ENOMEM;
2258 if (!ctx)
2259 goto errout;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002260
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002261 get_ctx(ctx);
Peter Zijlstraeb184472010-09-07 15:55:13 +02002262
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002263 err = 0;
2264 mutex_lock(&task->perf_event_mutex);
2265 /*
2266 * If it has already passed perf_event_exit_task().
2267 * we must see PF_EXITING, it takes this mutex too.
2268 */
2269 if (task->flags & PF_EXITING)
2270 err = -ESRCH;
2271 else if (task->perf_event_ctxp[ctxn])
2272 err = -EAGAIN;
2273 else
2274 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2275 mutex_unlock(&task->perf_event_mutex);
2276
2277 if (unlikely(err)) {
Peter Zijlstraeb184472010-09-07 15:55:13 +02002278 put_task_struct(task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002279 kfree(ctx);
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002280
2281 if (err == -EAGAIN)
2282 goto retry;
2283 goto errout;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002284 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002285 }
2286
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002287 return ctx;
2288
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002289errout:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002290 return ERR_PTR(err);
2291}
2292
Li Zefan6fb29152009-10-15 11:21:42 +08002293static void perf_event_free_filter(struct perf_event *event);
2294
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002295static void free_event_rcu(struct rcu_head *head)
2296{
2297 struct perf_event *event;
2298
2299 event = container_of(head, struct perf_event, rcu_head);
2300 if (event->ns)
2301 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08002302 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002303 kfree(event);
2304}
2305
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002306static void perf_buffer_put(struct perf_buffer *buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002307
2308static void free_event(struct perf_event *event)
2309{
Peter Zijlstrae360adb2010-10-14 14:01:34 +08002310 irq_work_sync(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002311
2312 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02002313 if (event->attach_state & PERF_ATTACH_TASK)
2314 jump_label_dec(&perf_task_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01002315 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002316 atomic_dec(&nr_mmap_events);
2317 if (event->attr.comm)
2318 atomic_dec(&nr_comm_events);
2319 if (event->attr.task)
2320 atomic_dec(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002321 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2322 put_callchain_buffers();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002323 }
2324
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002325 if (event->buffer) {
2326 perf_buffer_put(event->buffer);
2327 event->buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002328 }
2329
2330 if (event->destroy)
2331 event->destroy(event);
2332
Peter Zijlstra0c67b402010-09-13 11:15:58 +02002333 if (event->ctx)
2334 put_ctx(event->ctx);
2335
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002336 call_rcu(&event->rcu_head, free_event_rcu);
2337}
2338
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002339int perf_event_release_kernel(struct perf_event *event)
2340{
2341 struct perf_event_context *ctx = event->ctx;
2342
Peter Zijlstra050735b2010-05-11 11:51:53 +02002343 /*
2344 * Remove from the PMU, can't get re-enabled since we got
2345 * here because the last ref went.
2346 */
2347 perf_event_disable(event);
2348
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002349 WARN_ON_ONCE(ctx->parent_ctx);
Peter Zijlstraa0507c82010-05-06 15:42:53 +02002350 /*
2351 * There are two ways this annotation is useful:
2352 *
2353 * 1) there is a lock recursion from perf_event_exit_task
2354 * see the comment there.
2355 *
2356 * 2) there is a lock-inversion with mmap_sem through
2357 * perf_event_read_group(), which takes faults while
2358 * holding ctx->mutex, however this is called after
2359 * the last filedesc died, so there is no possibility
2360 * to trigger the AB-BA case.
2361 */
2362 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002363 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra8a495422010-05-27 15:47:49 +02002364 perf_group_detach(event);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002365 list_del_event(event, ctx);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002366 raw_spin_unlock_irq(&ctx->lock);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002367 mutex_unlock(&ctx->mutex);
2368
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002369 free_event(event);
2370
2371 return 0;
2372}
2373EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2374
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002375/*
2376 * Called when the last reference to the file is gone.
2377 */
2378static int perf_release(struct inode *inode, struct file *file)
2379{
2380 struct perf_event *event = file->private_data;
Peter Zijlstra88821352010-11-09 19:01:43 +01002381 struct task_struct *owner;
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002382
2383 file->private_data = NULL;
2384
Peter Zijlstra88821352010-11-09 19:01:43 +01002385 rcu_read_lock();
2386 owner = ACCESS_ONCE(event->owner);
2387 /*
2388 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2389 * !owner it means the list deletion is complete and we can indeed
2390 * free this event, otherwise we need to serialize on
2391 * owner->perf_event_mutex.
2392 */
2393 smp_read_barrier_depends();
2394 if (owner) {
2395 /*
2396 * Since delayed_put_task_struct() also drops the last
2397 * task reference we can safely take a new reference
2398 * while holding the rcu_read_lock().
2399 */
2400 get_task_struct(owner);
2401 }
2402 rcu_read_unlock();
2403
2404 if (owner) {
2405 mutex_lock(&owner->perf_event_mutex);
2406 /*
2407 * We have to re-check the event->owner field, if it is cleared
2408 * we raced with perf_event_exit_task(), acquiring the mutex
2409 * ensured they're done, and we can proceed with freeing the
2410 * event.
2411 */
2412 if (event->owner)
2413 list_del_init(&event->owner_entry);
2414 mutex_unlock(&owner->perf_event_mutex);
2415 put_task_struct(owner);
2416 }
2417
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002418 return perf_event_release_kernel(event);
2419}
2420
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002421u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002422{
2423 struct perf_event *child;
2424 u64 total = 0;
2425
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002426 *enabled = 0;
2427 *running = 0;
2428
Peter Zijlstra6f105812009-11-20 22:19:56 +01002429 mutex_lock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002430 total += perf_event_read(event);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002431 *enabled += event->total_time_enabled +
2432 atomic64_read(&event->child_total_time_enabled);
2433 *running += event->total_time_running +
2434 atomic64_read(&event->child_total_time_running);
2435
2436 list_for_each_entry(child, &event->child_list, child_list) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002437 total += perf_event_read(child);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002438 *enabled += child->total_time_enabled;
2439 *running += child->total_time_running;
2440 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002441 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002442
2443 return total;
2444}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002445EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002446
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002447static int perf_event_read_group(struct perf_event *event,
2448 u64 read_format, char __user *buf)
2449{
2450 struct perf_event *leader = event->group_leader, *sub;
Peter Zijlstra6f105812009-11-20 22:19:56 +01002451 int n = 0, size = 0, ret = -EFAULT;
2452 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002453 u64 values[5];
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002454 u64 count, enabled, running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002455
Peter Zijlstra6f105812009-11-20 22:19:56 +01002456 mutex_lock(&ctx->mutex);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002457 count = perf_event_read_value(leader, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002458
2459 values[n++] = 1 + leader->nr_siblings;
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002460 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2461 values[n++] = enabled;
2462 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2463 values[n++] = running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002464 values[n++] = count;
2465 if (read_format & PERF_FORMAT_ID)
2466 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002467
2468 size = n * sizeof(u64);
2469
2470 if (copy_to_user(buf, values, size))
Peter Zijlstra6f105812009-11-20 22:19:56 +01002471 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002472
Peter Zijlstra6f105812009-11-20 22:19:56 +01002473 ret = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002474
2475 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstraabf48682009-11-20 22:19:49 +01002476 n = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002477
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002478 values[n++] = perf_event_read_value(sub, &enabled, &running);
Peter Zijlstraabf48682009-11-20 22:19:49 +01002479 if (read_format & PERF_FORMAT_ID)
2480 values[n++] = primary_event_id(sub);
2481
2482 size = n * sizeof(u64);
2483
Stephane Eranian184d3da2009-11-23 21:40:49 -08002484 if (copy_to_user(buf + ret, values, size)) {
Peter Zijlstra6f105812009-11-20 22:19:56 +01002485 ret = -EFAULT;
2486 goto unlock;
2487 }
Peter Zijlstraabf48682009-11-20 22:19:49 +01002488
2489 ret += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002490 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002491unlock:
2492 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002493
Peter Zijlstraabf48682009-11-20 22:19:49 +01002494 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002495}
2496
2497static int perf_event_read_one(struct perf_event *event,
2498 u64 read_format, char __user *buf)
2499{
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002500 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002501 u64 values[4];
2502 int n = 0;
2503
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002504 values[n++] = perf_event_read_value(event, &enabled, &running);
2505 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2506 values[n++] = enabled;
2507 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2508 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002509 if (read_format & PERF_FORMAT_ID)
2510 values[n++] = primary_event_id(event);
2511
2512 if (copy_to_user(buf, values, n * sizeof(u64)))
2513 return -EFAULT;
2514
2515 return n * sizeof(u64);
2516}
2517
2518/*
2519 * Read the performance event - simple non blocking version for now
2520 */
2521static ssize_t
2522perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2523{
2524 u64 read_format = event->attr.read_format;
2525 int ret;
2526
2527 /*
2528 * Return end-of-file for a read on a event that is in
2529 * error state (i.e. because it was pinned but it couldn't be
2530 * scheduled on to the CPU at some point).
2531 */
2532 if (event->state == PERF_EVENT_STATE_ERROR)
2533 return 0;
2534
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02002535 if (count < event->read_size)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002536 return -ENOSPC;
2537
2538 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002539 if (read_format & PERF_FORMAT_GROUP)
2540 ret = perf_event_read_group(event, read_format, buf);
2541 else
2542 ret = perf_event_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002543
2544 return ret;
2545}
2546
2547static ssize_t
2548perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2549{
2550 struct perf_event *event = file->private_data;
2551
2552 return perf_read_hw(event, buf, count);
2553}
2554
2555static unsigned int perf_poll(struct file *file, poll_table *wait)
2556{
2557 struct perf_event *event = file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002558 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002559 unsigned int events = POLL_HUP;
2560
2561 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002562 buffer = rcu_dereference(event->buffer);
2563 if (buffer)
2564 events = atomic_xchg(&buffer->poll, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002565 rcu_read_unlock();
2566
2567 poll_wait(file, &event->waitq, wait);
2568
2569 return events;
2570}
2571
2572static void perf_event_reset(struct perf_event *event)
2573{
2574 (void)perf_event_read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02002575 local64_set(&event->count, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002576 perf_event_update_userpage(event);
2577}
2578
2579/*
2580 * Holding the top-level event's child_mutex means that any
2581 * descendant process that has inherited this event will block
2582 * in sync_child_event if it goes to exit, thus satisfying the
2583 * task existence requirements of perf_event_enable/disable.
2584 */
2585static void perf_event_for_each_child(struct perf_event *event,
2586 void (*func)(struct perf_event *))
2587{
2588 struct perf_event *child;
2589
2590 WARN_ON_ONCE(event->ctx->parent_ctx);
2591 mutex_lock(&event->child_mutex);
2592 func(event);
2593 list_for_each_entry(child, &event->child_list, child_list)
2594 func(child);
2595 mutex_unlock(&event->child_mutex);
2596}
2597
2598static void perf_event_for_each(struct perf_event *event,
2599 void (*func)(struct perf_event *))
2600{
2601 struct perf_event_context *ctx = event->ctx;
2602 struct perf_event *sibling;
2603
2604 WARN_ON_ONCE(ctx->parent_ctx);
2605 mutex_lock(&ctx->mutex);
2606 event = event->group_leader;
2607
2608 perf_event_for_each_child(event, func);
2609 func(event);
2610 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2611 perf_event_for_each_child(event, func);
2612 mutex_unlock(&ctx->mutex);
2613}
2614
2615static int perf_event_period(struct perf_event *event, u64 __user *arg)
2616{
2617 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002618 int ret = 0;
2619 u64 value;
2620
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01002621 if (!is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002622 return -EINVAL;
2623
John Blackwoodad0cf342010-09-28 18:03:11 -04002624 if (copy_from_user(&value, arg, sizeof(value)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002625 return -EFAULT;
2626
2627 if (!value)
2628 return -EINVAL;
2629
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002630 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002631 if (event->attr.freq) {
2632 if (value > sysctl_perf_event_sample_rate) {
2633 ret = -EINVAL;
2634 goto unlock;
2635 }
2636
2637 event->attr.sample_freq = value;
2638 } else {
2639 event->attr.sample_period = value;
2640 event->hw.sample_period = value;
2641 }
2642unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002643 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002644
2645 return ret;
2646}
2647
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002648static const struct file_operations perf_fops;
2649
2650static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2651{
2652 struct file *file;
2653
2654 file = fget_light(fd, fput_needed);
2655 if (!file)
2656 return ERR_PTR(-EBADF);
2657
2658 if (file->f_op != &perf_fops) {
2659 fput_light(file, *fput_needed);
2660 *fput_needed = 0;
2661 return ERR_PTR(-EBADF);
2662 }
2663
2664 return file->private_data;
2665}
2666
2667static int perf_event_set_output(struct perf_event *event,
2668 struct perf_event *output_event);
Li Zefan6fb29152009-10-15 11:21:42 +08002669static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002670
2671static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2672{
2673 struct perf_event *event = file->private_data;
2674 void (*func)(struct perf_event *);
2675 u32 flags = arg;
2676
2677 switch (cmd) {
2678 case PERF_EVENT_IOC_ENABLE:
2679 func = perf_event_enable;
2680 break;
2681 case PERF_EVENT_IOC_DISABLE:
2682 func = perf_event_disable;
2683 break;
2684 case PERF_EVENT_IOC_RESET:
2685 func = perf_event_reset;
2686 break;
2687
2688 case PERF_EVENT_IOC_REFRESH:
2689 return perf_event_refresh(event, arg);
2690
2691 case PERF_EVENT_IOC_PERIOD:
2692 return perf_event_period(event, (u64 __user *)arg);
2693
2694 case PERF_EVENT_IOC_SET_OUTPUT:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002695 {
2696 struct perf_event *output_event = NULL;
2697 int fput_needed = 0;
2698 int ret;
2699
2700 if (arg != -1) {
2701 output_event = perf_fget_light(arg, &fput_needed);
2702 if (IS_ERR(output_event))
2703 return PTR_ERR(output_event);
2704 }
2705
2706 ret = perf_event_set_output(event, output_event);
2707 if (output_event)
2708 fput_light(output_event->filp, fput_needed);
2709
2710 return ret;
2711 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002712
Li Zefan6fb29152009-10-15 11:21:42 +08002713 case PERF_EVENT_IOC_SET_FILTER:
2714 return perf_event_set_filter(event, (void __user *)arg);
2715
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002716 default:
2717 return -ENOTTY;
2718 }
2719
2720 if (flags & PERF_IOC_FLAG_GROUP)
2721 perf_event_for_each(event, func);
2722 else
2723 perf_event_for_each_child(event, func);
2724
2725 return 0;
2726}
2727
2728int perf_event_task_enable(void)
2729{
2730 struct perf_event *event;
2731
2732 mutex_lock(&current->perf_event_mutex);
2733 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2734 perf_event_for_each_child(event, perf_event_enable);
2735 mutex_unlock(&current->perf_event_mutex);
2736
2737 return 0;
2738}
2739
2740int perf_event_task_disable(void)
2741{
2742 struct perf_event *event;
2743
2744 mutex_lock(&current->perf_event_mutex);
2745 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2746 perf_event_for_each_child(event, perf_event_disable);
2747 mutex_unlock(&current->perf_event_mutex);
2748
2749 return 0;
2750}
2751
2752#ifndef PERF_EVENT_INDEX_OFFSET
2753# define PERF_EVENT_INDEX_OFFSET 0
2754#endif
2755
2756static int perf_event_index(struct perf_event *event)
2757{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002758 if (event->hw.state & PERF_HES_STOPPED)
2759 return 0;
2760
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002761 if (event->state != PERF_EVENT_STATE_ACTIVE)
2762 return 0;
2763
2764 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2765}
2766
2767/*
2768 * Callers need to ensure there can be no nesting of this function, otherwise
2769 * the seqlock logic goes bad. We can not serialize this because the arch
2770 * code calls this from NMI context.
2771 */
2772void perf_event_update_userpage(struct perf_event *event)
2773{
2774 struct perf_event_mmap_page *userpg;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002775 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002776
2777 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002778 buffer = rcu_dereference(event->buffer);
2779 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002780 goto unlock;
2781
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002782 userpg = buffer->user_page;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002783
2784 /*
2785 * Disable preemption so as to not let the corresponding user-space
2786 * spin too long if we get preempted.
2787 */
2788 preempt_disable();
2789 ++userpg->lock;
2790 barrier();
2791 userpg->index = perf_event_index(event);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02002792 userpg->offset = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002793 if (event->state == PERF_EVENT_STATE_ACTIVE)
Peter Zijlstrae7850592010-05-21 14:43:08 +02002794 userpg->offset -= local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002795
2796 userpg->time_enabled = event->total_time_enabled +
2797 atomic64_read(&event->child_total_time_enabled);
2798
2799 userpg->time_running = event->total_time_running +
2800 atomic64_read(&event->child_total_time_running);
2801
2802 barrier();
2803 ++userpg->lock;
2804 preempt_enable();
2805unlock:
2806 rcu_read_unlock();
2807}
2808
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002809static unsigned long perf_data_size(struct perf_buffer *buffer);
2810
2811static void
2812perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2813{
2814 long max_size = perf_data_size(buffer);
2815
2816 if (watermark)
2817 buffer->watermark = min(max_size, watermark);
2818
2819 if (!buffer->watermark)
2820 buffer->watermark = max_size / 2;
2821
2822 if (flags & PERF_BUFFER_WRITABLE)
2823 buffer->writable = 1;
2824
2825 atomic_set(&buffer->refcount, 1);
2826}
2827
Peter Zijlstra906010b2009-09-21 16:08:49 +02002828#ifndef CONFIG_PERF_USE_VMALLOC
2829
2830/*
2831 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2832 */
2833
2834static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002835perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002836{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002837 if (pgoff > buffer->nr_pages)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002838 return NULL;
2839
2840 if (pgoff == 0)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002841 return virt_to_page(buffer->user_page);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002842
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002843 return virt_to_page(buffer->data_pages[pgoff - 1]);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002844}
2845
Peter Zijlstraa19d35c2010-05-17 18:48:00 +02002846static void *perf_mmap_alloc_page(int cpu)
2847{
2848 struct page *page;
2849 int node;
2850
2851 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2852 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2853 if (!page)
2854 return NULL;
2855
2856 return page_address(page);
2857}
2858
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002859static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002860perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002861{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002862 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002863 unsigned long size;
2864 int i;
2865
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002866 size = sizeof(struct perf_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002867 size += nr_pages * sizeof(void *);
2868
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002869 buffer = kzalloc(size, GFP_KERNEL);
2870 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002871 goto fail;
2872
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002873 buffer->user_page = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002874 if (!buffer->user_page)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002875 goto fail_user_page;
2876
2877 for (i = 0; i < nr_pages; i++) {
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002878 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002879 if (!buffer->data_pages[i])
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002880 goto fail_data_pages;
2881 }
2882
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002883 buffer->nr_pages = nr_pages;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002884
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002885 perf_buffer_init(buffer, watermark, flags);
2886
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002887 return buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002888
2889fail_data_pages:
2890 for (i--; i >= 0; i--)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002891 free_page((unsigned long)buffer->data_pages[i]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002892
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002893 free_page((unsigned long)buffer->user_page);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002894
2895fail_user_page:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002896 kfree(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002897
2898fail:
Peter Zijlstra906010b2009-09-21 16:08:49 +02002899 return NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002900}
2901
2902static void perf_mmap_free_page(unsigned long addr)
2903{
2904 struct page *page = virt_to_page((void *)addr);
2905
2906 page->mapping = NULL;
2907 __free_page(page);
2908}
2909
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002910static void perf_buffer_free(struct perf_buffer *buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002911{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002912 int i;
2913
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002914 perf_mmap_free_page((unsigned long)buffer->user_page);
2915 for (i = 0; i < buffer->nr_pages; i++)
2916 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2917 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002918}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002919
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002920static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002921{
2922 return 0;
2923}
2924
Peter Zijlstra906010b2009-09-21 16:08:49 +02002925#else
2926
2927/*
2928 * Back perf_mmap() with vmalloc memory.
2929 *
2930 * Required for architectures that have d-cache aliasing issues.
2931 */
2932
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002933static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002934{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002935 return buffer->page_order;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002936}
2937
Peter Zijlstra906010b2009-09-21 16:08:49 +02002938static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002939perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002940{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002941 if (pgoff > (1UL << page_order(buffer)))
Peter Zijlstra906010b2009-09-21 16:08:49 +02002942 return NULL;
2943
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002944 return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002945}
2946
2947static void perf_mmap_unmark_page(void *addr)
2948{
2949 struct page *page = vmalloc_to_page(addr);
2950
2951 page->mapping = NULL;
2952}
2953
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002954static void perf_buffer_free_work(struct work_struct *work)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002955{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002956 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002957 void *base;
2958 int i, nr;
2959
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002960 buffer = container_of(work, struct perf_buffer, work);
2961 nr = 1 << page_order(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002962
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002963 base = buffer->user_page;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002964 for (i = 0; i < nr + 1; i++)
2965 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2966
2967 vfree(base);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002968 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002969}
2970
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002971static void perf_buffer_free(struct perf_buffer *buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002972{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002973 schedule_work(&buffer->work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002974}
2975
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002976static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002977perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002978{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002979 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002980 unsigned long size;
2981 void *all_buf;
2982
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002983 size = sizeof(struct perf_buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002984 size += sizeof(void *);
2985
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002986 buffer = kzalloc(size, GFP_KERNEL);
2987 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002988 goto fail;
2989
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002990 INIT_WORK(&buffer->work, perf_buffer_free_work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002991
2992 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2993 if (!all_buf)
2994 goto fail_all_buf;
2995
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002996 buffer->user_page = all_buf;
2997 buffer->data_pages[0] = all_buf + PAGE_SIZE;
2998 buffer->page_order = ilog2(nr_pages);
2999 buffer->nr_pages = 1;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003000
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003001 perf_buffer_init(buffer, watermark, flags);
3002
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003003 return buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003004
3005fail_all_buf:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003006 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003007
3008fail:
3009 return NULL;
3010}
3011
3012#endif
3013
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003014static unsigned long perf_data_size(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003015{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003016 return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003017}
3018
Peter Zijlstra906010b2009-09-21 16:08:49 +02003019static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3020{
3021 struct perf_event *event = vma->vm_file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003022 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003023 int ret = VM_FAULT_SIGBUS;
3024
3025 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3026 if (vmf->pgoff == 0)
3027 ret = 0;
3028 return ret;
3029 }
3030
3031 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003032 buffer = rcu_dereference(event->buffer);
3033 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003034 goto unlock;
3035
3036 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3037 goto unlock;
3038
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003039 vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003040 if (!vmf->page)
3041 goto unlock;
3042
3043 get_page(vmf->page);
3044 vmf->page->mapping = vma->vm_file->f_mapping;
3045 vmf->page->index = vmf->pgoff;
3046
3047 ret = 0;
3048unlock:
3049 rcu_read_unlock();
3050
3051 return ret;
3052}
3053
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003054static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003055{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003056 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003057
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003058 buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
3059 perf_buffer_free(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003060}
3061
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003062static struct perf_buffer *perf_buffer_get(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003063{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003064 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003065
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003066 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003067 buffer = rcu_dereference(event->buffer);
3068 if (buffer) {
3069 if (!atomic_inc_not_zero(&buffer->refcount))
3070 buffer = NULL;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003071 }
3072 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003073
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003074 return buffer;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003075}
3076
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003077static void perf_buffer_put(struct perf_buffer *buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003078{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003079 if (!atomic_dec_and_test(&buffer->refcount))
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003080 return;
3081
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003082 call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003083}
3084
3085static void perf_mmap_open(struct vm_area_struct *vma)
3086{
3087 struct perf_event *event = vma->vm_file->private_data;
3088
3089 atomic_inc(&event->mmap_count);
3090}
3091
3092static void perf_mmap_close(struct vm_area_struct *vma)
3093{
3094 struct perf_event *event = vma->vm_file->private_data;
3095
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003096 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003097 unsigned long size = perf_data_size(event->buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003098 struct user_struct *user = event->mmap_user;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003099 struct perf_buffer *buffer = event->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003100
Peter Zijlstra906010b2009-09-21 16:08:49 +02003101 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003102 vma->vm_mm->locked_vm -= event->mmap_locked;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003103 rcu_assign_pointer(event->buffer, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003104 mutex_unlock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003105
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003106 perf_buffer_put(buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003107 free_uid(user);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003108 }
3109}
3110
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003111static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003112 .open = perf_mmap_open,
3113 .close = perf_mmap_close,
3114 .fault = perf_mmap_fault,
3115 .page_mkwrite = perf_mmap_fault,
3116};
3117
3118static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3119{
3120 struct perf_event *event = file->private_data;
3121 unsigned long user_locked, user_lock_limit;
3122 struct user_struct *user = current_user();
3123 unsigned long locked, lock_limit;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003124 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003125 unsigned long vma_size;
3126 unsigned long nr_pages;
3127 long user_extra, extra;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003128 int ret = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003129
Peter Zijlstrac7920612010-05-18 10:33:24 +02003130 /*
3131 * Don't allow mmap() of inherited per-task counters. This would
3132 * create a performance issue due to all children writing to the
3133 * same buffer.
3134 */
3135 if (event->cpu == -1 && event->attr.inherit)
3136 return -EINVAL;
3137
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003138 if (!(vma->vm_flags & VM_SHARED))
3139 return -EINVAL;
3140
3141 vma_size = vma->vm_end - vma->vm_start;
3142 nr_pages = (vma_size / PAGE_SIZE) - 1;
3143
3144 /*
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003145 * If we have buffer pages ensure they're a power-of-two number, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003146 * can do bitmasks instead of modulo.
3147 */
3148 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3149 return -EINVAL;
3150
3151 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3152 return -EINVAL;
3153
3154 if (vma->vm_pgoff != 0)
3155 return -EINVAL;
3156
3157 WARN_ON_ONCE(event->ctx->parent_ctx);
3158 mutex_lock(&event->mmap_mutex);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003159 if (event->buffer) {
3160 if (event->buffer->nr_pages == nr_pages)
3161 atomic_inc(&event->buffer->refcount);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003162 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003163 ret = -EINVAL;
3164 goto unlock;
3165 }
3166
3167 user_extra = nr_pages + 1;
3168 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3169
3170 /*
3171 * Increase the limit linearly with more CPUs:
3172 */
3173 user_lock_limit *= num_online_cpus();
3174
3175 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3176
3177 extra = 0;
3178 if (user_locked > user_lock_limit)
3179 extra = user_locked - user_lock_limit;
3180
Jiri Slaby78d7d402010-03-05 13:42:54 -08003181 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003182 lock_limit >>= PAGE_SHIFT;
3183 locked = vma->vm_mm->locked_vm + extra;
3184
3185 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3186 !capable(CAP_IPC_LOCK)) {
3187 ret = -EPERM;
3188 goto unlock;
3189 }
3190
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003191 WARN_ON(event->buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003192
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003193 if (vma->vm_flags & VM_WRITE)
3194 flags |= PERF_BUFFER_WRITABLE;
3195
3196 buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
3197 event->cpu, flags);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003198 if (!buffer) {
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003199 ret = -ENOMEM;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003200 goto unlock;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003201 }
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003202 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003203
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003204 atomic_long_add(user_extra, &user->locked_vm);
3205 event->mmap_locked = extra;
3206 event->mmap_user = get_current_user();
3207 vma->vm_mm->locked_vm += event->mmap_locked;
3208
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003209unlock:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003210 if (!ret)
3211 atomic_inc(&event->mmap_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003212 mutex_unlock(&event->mmap_mutex);
3213
3214 vma->vm_flags |= VM_RESERVED;
3215 vma->vm_ops = &perf_mmap_vmops;
3216
3217 return ret;
3218}
3219
3220static int perf_fasync(int fd, struct file *filp, int on)
3221{
3222 struct inode *inode = filp->f_path.dentry->d_inode;
3223 struct perf_event *event = filp->private_data;
3224 int retval;
3225
3226 mutex_lock(&inode->i_mutex);
3227 retval = fasync_helper(fd, filp, on, &event->fasync);
3228 mutex_unlock(&inode->i_mutex);
3229
3230 if (retval < 0)
3231 return retval;
3232
3233 return 0;
3234}
3235
3236static const struct file_operations perf_fops = {
Arnd Bergmann3326c1c2010-03-23 19:09:33 +01003237 .llseek = no_llseek,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003238 .release = perf_release,
3239 .read = perf_read,
3240 .poll = perf_poll,
3241 .unlocked_ioctl = perf_ioctl,
3242 .compat_ioctl = perf_ioctl,
3243 .mmap = perf_mmap,
3244 .fasync = perf_fasync,
3245};
3246
3247/*
3248 * Perf event wakeup
3249 *
3250 * If there's data, ensure we set the poll() state and publish everything
3251 * to user-space before waking everybody up.
3252 */
3253
3254void perf_event_wakeup(struct perf_event *event)
3255{
3256 wake_up_all(&event->waitq);
3257
3258 if (event->pending_kill) {
3259 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3260 event->pending_kill = 0;
3261 }
3262}
3263
Peter Zijlstrae360adb2010-10-14 14:01:34 +08003264static void perf_pending_event(struct irq_work *entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003265{
3266 struct perf_event *event = container_of(entry,
3267 struct perf_event, pending);
3268
3269 if (event->pending_disable) {
3270 event->pending_disable = 0;
3271 __perf_event_disable(event);
3272 }
3273
3274 if (event->pending_wakeup) {
3275 event->pending_wakeup = 0;
3276 perf_event_wakeup(event);
3277 }
3278}
3279
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003280/*
Zhang, Yanmin39447b32010-04-19 13:32:41 +08003281 * We assume there is only KVM supporting the callbacks.
3282 * Later on, we might change it to a list if there is
3283 * another virtualization implementation supporting the callbacks.
3284 */
3285struct perf_guest_info_callbacks *perf_guest_cbs;
3286
3287int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3288{
3289 perf_guest_cbs = cbs;
3290 return 0;
3291}
3292EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3293
3294int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3295{
3296 perf_guest_cbs = NULL;
3297 return 0;
3298}
3299EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3300
3301/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003302 * Output
3303 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003304static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003305 unsigned long offset, unsigned long head)
3306{
3307 unsigned long mask;
3308
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003309 if (!buffer->writable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003310 return true;
3311
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003312 mask = perf_data_size(buffer) - 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003313
3314 offset = (offset - tail) & mask;
3315 head = (head - tail) & mask;
3316
3317 if ((int)(head - offset) < 0)
3318 return false;
3319
3320 return true;
3321}
3322
3323static void perf_output_wakeup(struct perf_output_handle *handle)
3324{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003325 atomic_set(&handle->buffer->poll, POLL_IN);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003326
3327 if (handle->nmi) {
3328 handle->event->pending_wakeup = 1;
Peter Zijlstrae360adb2010-10-14 14:01:34 +08003329 irq_work_queue(&handle->event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003330 } else
3331 perf_event_wakeup(handle->event);
3332}
3333
3334/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003335 * We need to ensure a later event_id doesn't publish a head when a former
Peter Zijlstraef607772010-05-18 10:50:41 +02003336 * event isn't done writing. However since we need to deal with NMIs we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003337 * cannot fully serialize things.
3338 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003339 * We only publish the head (and generate a wakeup) when the outer-most
Peter Zijlstraef607772010-05-18 10:50:41 +02003340 * event completes.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003341 */
Peter Zijlstraef607772010-05-18 10:50:41 +02003342static void perf_output_get_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003343{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003344 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003345
Peter Zijlstraef607772010-05-18 10:50:41 +02003346 preempt_disable();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003347 local_inc(&buffer->nest);
3348 handle->wakeup = local_read(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003349}
3350
Peter Zijlstraef607772010-05-18 10:50:41 +02003351static void perf_output_put_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003352{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003353 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003354 unsigned long head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003355
3356again:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003357 head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003358
3359 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003360 * IRQ/NMI can happen here, which means we can miss a head update.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003361 */
3362
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003363 if (!local_dec_and_test(&buffer->nest))
Frederic Weisbeckeracd35a42010-05-20 21:28:34 +02003364 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003365
3366 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003367 * Publish the known good head. Rely on the full barrier implied
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003368 * by atomic_dec_and_test() order the buffer->head read and this
Peter Zijlstraef607772010-05-18 10:50:41 +02003369 * write.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003370 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003371 buffer->user_page->data_head = head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003372
Peter Zijlstraef607772010-05-18 10:50:41 +02003373 /*
3374 * Now check if we missed an update, rely on the (compiler)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003375 * barrier in atomic_dec_and_test() to re-read buffer->head.
Peter Zijlstraef607772010-05-18 10:50:41 +02003376 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003377 if (unlikely(head != local_read(&buffer->head))) {
3378 local_inc(&buffer->nest);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003379 goto again;
3380 }
3381
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003382 if (handle->wakeup != local_read(&buffer->wakeup))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003383 perf_output_wakeup(handle);
Peter Zijlstraef607772010-05-18 10:50:41 +02003384
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003385out:
Peter Zijlstraef607772010-05-18 10:50:41 +02003386 preempt_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003387}
3388
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003389__always_inline void perf_output_copy(struct perf_output_handle *handle,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003390 const void *buf, unsigned int len)
3391{
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003392 do {
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003393 unsigned long size = min_t(unsigned long, handle->size, len);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003394
3395 memcpy(handle->addr, buf, size);
3396
3397 len -= size;
3398 handle->addr += size;
Frederic Weisbecker74048f82010-05-27 21:34:58 +02003399 buf += size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003400 handle->size -= size;
3401 if (!handle->size) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003402 struct perf_buffer *buffer = handle->buffer;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003403
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003404 handle->page++;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003405 handle->page &= buffer->nr_pages - 1;
3406 handle->addr = buffer->data_pages[handle->page];
3407 handle->size = PAGE_SIZE << page_order(buffer);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003408 }
3409 } while (len);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003410}
3411
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003412static void __perf_event_header__init_id(struct perf_event_header *header,
3413 struct perf_sample_data *data,
3414 struct perf_event *event)
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02003415{
3416 u64 sample_type = event->attr.sample_type;
3417
3418 data->type = sample_type;
3419 header->size += event->id_header_size;
3420
3421 if (sample_type & PERF_SAMPLE_TID) {
3422 /* namespace issues */
3423 data->tid_entry.pid = perf_event_pid(event, current);
3424 data->tid_entry.tid = perf_event_tid(event, current);
3425 }
3426
3427 if (sample_type & PERF_SAMPLE_TIME)
3428 data->time = perf_clock();
3429
3430 if (sample_type & PERF_SAMPLE_ID)
3431 data->id = primary_event_id(event);
3432
3433 if (sample_type & PERF_SAMPLE_STREAM_ID)
3434 data->stream_id = event->id;
3435
3436 if (sample_type & PERF_SAMPLE_CPU) {
3437 data->cpu_entry.cpu = raw_smp_processor_id();
3438 data->cpu_entry.reserved = 0;
3439 }
3440}
3441
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003442static void perf_event_header__init_id(struct perf_event_header *header,
3443 struct perf_sample_data *data,
3444 struct perf_event *event)
3445{
3446 if (event->attr.sample_id_all)
3447 __perf_event_header__init_id(header, data, event);
3448}
3449
3450static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3451 struct perf_sample_data *data)
3452{
3453 u64 sample_type = data->type;
3454
3455 if (sample_type & PERF_SAMPLE_TID)
3456 perf_output_put(handle, data->tid_entry);
3457
3458 if (sample_type & PERF_SAMPLE_TIME)
3459 perf_output_put(handle, data->time);
3460
3461 if (sample_type & PERF_SAMPLE_ID)
3462 perf_output_put(handle, data->id);
3463
3464 if (sample_type & PERF_SAMPLE_STREAM_ID)
3465 perf_output_put(handle, data->stream_id);
3466
3467 if (sample_type & PERF_SAMPLE_CPU)
3468 perf_output_put(handle, data->cpu_entry);
3469}
3470
3471static void perf_event__output_id_sample(struct perf_event *event,
3472 struct perf_output_handle *handle,
3473 struct perf_sample_data *sample)
3474{
3475 if (event->attr.sample_id_all)
3476 __perf_event__output_id_sample(handle, sample);
3477}
3478
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003479int perf_output_begin(struct perf_output_handle *handle,
3480 struct perf_event *event, unsigned int size,
3481 int nmi, int sample)
3482{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003483 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003484 unsigned long tail, offset, head;
3485 int have_lost;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003486 struct perf_sample_data sample_data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003487 struct {
3488 struct perf_event_header header;
3489 u64 id;
3490 u64 lost;
3491 } lost_event;
3492
3493 rcu_read_lock();
3494 /*
3495 * For inherited events we send all the output towards the parent.
3496 */
3497 if (event->parent)
3498 event = event->parent;
3499
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003500 buffer = rcu_dereference(event->buffer);
3501 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003502 goto out;
3503
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003504 handle->buffer = buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003505 handle->event = event;
3506 handle->nmi = nmi;
3507 handle->sample = sample;
3508
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003509 if (!buffer->nr_pages)
Stephane Eranian00d1d0b2010-05-17 12:46:01 +02003510 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003511
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003512 have_lost = local_read(&buffer->lost);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003513 if (have_lost) {
3514 lost_event.header.size = sizeof(lost_event);
3515 perf_event_header__init_id(&lost_event.header, &sample_data,
3516 event);
3517 size += lost_event.header.size;
3518 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003519
Peter Zijlstraef607772010-05-18 10:50:41 +02003520 perf_output_get_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003521
3522 do {
3523 /*
3524 * Userspace could choose to issue a mb() before updating the
3525 * tail pointer. So that all reads will be completed before the
3526 * write is issued.
3527 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003528 tail = ACCESS_ONCE(buffer->user_page->data_tail);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003529 smp_rmb();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003530 offset = head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003531 head += size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003532 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003533 goto fail;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003534 } while (local_cmpxchg(&buffer->head, offset, head) != offset);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003535
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003536 if (head - local_read(&buffer->wakeup) > buffer->watermark)
3537 local_add(buffer->watermark, &buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003538
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003539 handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3540 handle->page &= buffer->nr_pages - 1;
3541 handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3542 handle->addr = buffer->data_pages[handle->page];
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003543 handle->addr += handle->size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003544 handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003545
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003546 if (have_lost) {
3547 lost_event.header.type = PERF_RECORD_LOST;
3548 lost_event.header.misc = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003549 lost_event.id = event->id;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003550 lost_event.lost = local_xchg(&buffer->lost, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003551
3552 perf_output_put(handle, lost_event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003553 perf_event__output_id_sample(event, handle, &sample_data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003554 }
3555
3556 return 0;
3557
3558fail:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003559 local_inc(&buffer->lost);
Peter Zijlstraef607772010-05-18 10:50:41 +02003560 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003561out:
3562 rcu_read_unlock();
3563
3564 return -ENOSPC;
3565}
3566
3567void perf_output_end(struct perf_output_handle *handle)
3568{
3569 struct perf_event *event = handle->event;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003570 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003571
3572 int wakeup_events = event->attr.wakeup_events;
3573
3574 if (handle->sample && wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003575 int events = local_inc_return(&buffer->events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003576 if (events >= wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003577 local_sub(wakeup_events, &buffer->events);
3578 local_inc(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003579 }
3580 }
3581
Peter Zijlstraef607772010-05-18 10:50:41 +02003582 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003583 rcu_read_unlock();
3584}
3585
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003586static void perf_output_read_one(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003587 struct perf_event *event,
3588 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003589{
3590 u64 read_format = event->attr.read_format;
3591 u64 values[4];
3592 int n = 0;
3593
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003594 values[n++] = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003595 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003596 values[n++] = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003597 atomic64_read(&event->child_total_time_enabled);
3598 }
3599 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003600 values[n++] = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003601 atomic64_read(&event->child_total_time_running);
3602 }
3603 if (read_format & PERF_FORMAT_ID)
3604 values[n++] = primary_event_id(event);
3605
3606 perf_output_copy(handle, values, n * sizeof(u64));
3607}
3608
3609/*
3610 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3611 */
3612static void perf_output_read_group(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003613 struct perf_event *event,
3614 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003615{
3616 struct perf_event *leader = event->group_leader, *sub;
3617 u64 read_format = event->attr.read_format;
3618 u64 values[5];
3619 int n = 0;
3620
3621 values[n++] = 1 + leader->nr_siblings;
3622
3623 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
Stephane Eranianeed01522010-10-26 16:08:01 +02003624 values[n++] = enabled;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003625
3626 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
Stephane Eranianeed01522010-10-26 16:08:01 +02003627 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003628
3629 if (leader != event)
3630 leader->pmu->read(leader);
3631
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003632 values[n++] = perf_event_count(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003633 if (read_format & PERF_FORMAT_ID)
3634 values[n++] = primary_event_id(leader);
3635
3636 perf_output_copy(handle, values, n * sizeof(u64));
3637
3638 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3639 n = 0;
3640
3641 if (sub != event)
3642 sub->pmu->read(sub);
3643
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003644 values[n++] = perf_event_count(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003645 if (read_format & PERF_FORMAT_ID)
3646 values[n++] = primary_event_id(sub);
3647
3648 perf_output_copy(handle, values, n * sizeof(u64));
3649 }
3650}
3651
Stephane Eranianeed01522010-10-26 16:08:01 +02003652#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3653 PERF_FORMAT_TOTAL_TIME_RUNNING)
3654
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003655static void perf_output_read(struct perf_output_handle *handle,
3656 struct perf_event *event)
3657{
Stephane Eranianeed01522010-10-26 16:08:01 +02003658 u64 enabled = 0, running = 0, now, ctx_time;
3659 u64 read_format = event->attr.read_format;
3660
3661 /*
3662 * compute total_time_enabled, total_time_running
3663 * based on snapshot values taken when the event
3664 * was last scheduled in.
3665 *
3666 * we cannot simply called update_context_time()
3667 * because of locking issue as we are called in
3668 * NMI context
3669 */
3670 if (read_format & PERF_FORMAT_TOTAL_TIMES) {
3671 now = perf_clock();
3672 ctx_time = event->shadow_ctx_time + now;
3673 enabled = ctx_time - event->tstamp_enabled;
3674 running = ctx_time - event->tstamp_running;
3675 }
3676
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003677 if (event->attr.read_format & PERF_FORMAT_GROUP)
Stephane Eranianeed01522010-10-26 16:08:01 +02003678 perf_output_read_group(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003679 else
Stephane Eranianeed01522010-10-26 16:08:01 +02003680 perf_output_read_one(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003681}
3682
3683void perf_output_sample(struct perf_output_handle *handle,
3684 struct perf_event_header *header,
3685 struct perf_sample_data *data,
3686 struct perf_event *event)
3687{
3688 u64 sample_type = data->type;
3689
3690 perf_output_put(handle, *header);
3691
3692 if (sample_type & PERF_SAMPLE_IP)
3693 perf_output_put(handle, data->ip);
3694
3695 if (sample_type & PERF_SAMPLE_TID)
3696 perf_output_put(handle, data->tid_entry);
3697
3698 if (sample_type & PERF_SAMPLE_TIME)
3699 perf_output_put(handle, data->time);
3700
3701 if (sample_type & PERF_SAMPLE_ADDR)
3702 perf_output_put(handle, data->addr);
3703
3704 if (sample_type & PERF_SAMPLE_ID)
3705 perf_output_put(handle, data->id);
3706
3707 if (sample_type & PERF_SAMPLE_STREAM_ID)
3708 perf_output_put(handle, data->stream_id);
3709
3710 if (sample_type & PERF_SAMPLE_CPU)
3711 perf_output_put(handle, data->cpu_entry);
3712
3713 if (sample_type & PERF_SAMPLE_PERIOD)
3714 perf_output_put(handle, data->period);
3715
3716 if (sample_type & PERF_SAMPLE_READ)
3717 perf_output_read(handle, event);
3718
3719 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3720 if (data->callchain) {
3721 int size = 1;
3722
3723 if (data->callchain)
3724 size += data->callchain->nr;
3725
3726 size *= sizeof(u64);
3727
3728 perf_output_copy(handle, data->callchain, size);
3729 } else {
3730 u64 nr = 0;
3731 perf_output_put(handle, nr);
3732 }
3733 }
3734
3735 if (sample_type & PERF_SAMPLE_RAW) {
3736 if (data->raw) {
3737 perf_output_put(handle, data->raw->size);
3738 perf_output_copy(handle, data->raw->data,
3739 data->raw->size);
3740 } else {
3741 struct {
3742 u32 size;
3743 u32 data;
3744 } raw = {
3745 .size = sizeof(u32),
3746 .data = 0,
3747 };
3748 perf_output_put(handle, raw);
3749 }
3750 }
3751}
3752
3753void perf_prepare_sample(struct perf_event_header *header,
3754 struct perf_sample_data *data,
3755 struct perf_event *event,
3756 struct pt_regs *regs)
3757{
3758 u64 sample_type = event->attr.sample_type;
3759
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003760 header->type = PERF_RECORD_SAMPLE;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003761 header->size = sizeof(*header) + event->header_size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003762
3763 header->misc = 0;
3764 header->misc |= perf_misc_flags(regs);
3765
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003766 __perf_event_header__init_id(header, data, event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02003767
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003768 if (sample_type & PERF_SAMPLE_IP)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003769 data->ip = perf_instruction_pointer(regs);
3770
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003771 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3772 int size = 1;
3773
3774 data->callchain = perf_callchain(regs);
3775
3776 if (data->callchain)
3777 size += data->callchain->nr;
3778
3779 header->size += size * sizeof(u64);
3780 }
3781
3782 if (sample_type & PERF_SAMPLE_RAW) {
3783 int size = sizeof(u32);
3784
3785 if (data->raw)
3786 size += data->raw->size;
3787 else
3788 size += sizeof(u32);
3789
3790 WARN_ON_ONCE(size & (sizeof(u64)-1));
3791 header->size += size;
3792 }
3793}
3794
3795static void perf_event_output(struct perf_event *event, int nmi,
3796 struct perf_sample_data *data,
3797 struct pt_regs *regs)
3798{
3799 struct perf_output_handle handle;
3800 struct perf_event_header header;
3801
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003802 /* protect the callchain buffers */
3803 rcu_read_lock();
3804
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003805 perf_prepare_sample(&header, data, event, regs);
3806
3807 if (perf_output_begin(&handle, event, header.size, nmi, 1))
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003808 goto exit;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003809
3810 perf_output_sample(&handle, &header, data, event);
3811
3812 perf_output_end(&handle);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003813
3814exit:
3815 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003816}
3817
3818/*
3819 * read event_id
3820 */
3821
3822struct perf_read_event {
3823 struct perf_event_header header;
3824
3825 u32 pid;
3826 u32 tid;
3827};
3828
3829static void
3830perf_event_read_event(struct perf_event *event,
3831 struct task_struct *task)
3832{
3833 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003834 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003835 struct perf_read_event read_event = {
3836 .header = {
3837 .type = PERF_RECORD_READ,
3838 .misc = 0,
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003839 .size = sizeof(read_event) + event->read_size,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003840 },
3841 .pid = perf_event_pid(event, task),
3842 .tid = perf_event_tid(event, task),
3843 };
3844 int ret;
3845
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003846 perf_event_header__init_id(&read_event.header, &sample, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003847 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3848 if (ret)
3849 return;
3850
3851 perf_output_put(&handle, read_event);
3852 perf_output_read(&handle, event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003853 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003854
3855 perf_output_end(&handle);
3856}
3857
3858/*
3859 * task tracking -- fork/exit
3860 *
Eric B Munson3af9e852010-05-18 15:30:49 +01003861 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003862 */
3863
3864struct perf_task_event {
3865 struct task_struct *task;
3866 struct perf_event_context *task_ctx;
3867
3868 struct {
3869 struct perf_event_header header;
3870
3871 u32 pid;
3872 u32 ppid;
3873 u32 tid;
3874 u32 ptid;
3875 u64 time;
3876 } event_id;
3877};
3878
3879static void perf_event_task_output(struct perf_event *event,
3880 struct perf_task_event *task_event)
3881{
3882 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003883 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003884 struct task_struct *task = task_event->task;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003885 int ret, size = task_event->event_id.header.size;
Mike Galbraith8bb39f92010-03-26 11:11:33 +01003886
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003887 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003888
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003889 ret = perf_output_begin(&handle, event,
3890 task_event->event_id.header.size, 0, 0);
Peter Zijlstraef607772010-05-18 10:50:41 +02003891 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003892 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003893
3894 task_event->event_id.pid = perf_event_pid(event, task);
3895 task_event->event_id.ppid = perf_event_pid(event, current);
3896
3897 task_event->event_id.tid = perf_event_tid(event, task);
3898 task_event->event_id.ptid = perf_event_tid(event, current);
3899
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003900 perf_output_put(&handle, task_event->event_id);
3901
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003902 perf_event__output_id_sample(event, &handle, &sample);
3903
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003904 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003905out:
3906 task_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003907}
3908
3909static int perf_event_task_match(struct perf_event *event)
3910{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003911 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003912 return 0;
3913
Stephane Eranian5632ab12011-01-03 18:20:01 +02003914 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003915 return 0;
3916
Eric B Munson3af9e852010-05-18 15:30:49 +01003917 if (event->attr.comm || event->attr.mmap ||
3918 event->attr.mmap_data || event->attr.task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003919 return 1;
3920
3921 return 0;
3922}
3923
3924static void perf_event_task_ctx(struct perf_event_context *ctx,
3925 struct perf_task_event *task_event)
3926{
3927 struct perf_event *event;
3928
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003929 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3930 if (perf_event_task_match(event))
3931 perf_event_task_output(event, task_event);
3932 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003933}
3934
3935static void perf_event_task_event(struct perf_task_event *task_event)
3936{
3937 struct perf_cpu_context *cpuctx;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003938 struct perf_event_context *ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003939 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003940 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003941
Peter Zijlstrad6ff86c2009-11-20 22:19:46 +01003942 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003943 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02003944 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01003945 if (cpuctx->active_pmu != pmu)
3946 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003947 perf_event_task_ctx(&cpuctx->ctx, task_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003948
3949 ctx = task_event->task_ctx;
3950 if (!ctx) {
3951 ctxn = pmu->task_ctx_nr;
3952 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02003953 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02003954 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3955 }
3956 if (ctx)
3957 perf_event_task_ctx(ctx, task_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02003958next:
3959 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003960 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003961 rcu_read_unlock();
3962}
3963
3964static void perf_event_task(struct task_struct *task,
3965 struct perf_event_context *task_ctx,
3966 int new)
3967{
3968 struct perf_task_event task_event;
3969
3970 if (!atomic_read(&nr_comm_events) &&
3971 !atomic_read(&nr_mmap_events) &&
3972 !atomic_read(&nr_task_events))
3973 return;
3974
3975 task_event = (struct perf_task_event){
3976 .task = task,
3977 .task_ctx = task_ctx,
3978 .event_id = {
3979 .header = {
3980 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3981 .misc = 0,
3982 .size = sizeof(task_event.event_id),
3983 },
3984 /* .pid */
3985 /* .ppid */
3986 /* .tid */
3987 /* .ptid */
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003988 .time = perf_clock(),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003989 },
3990 };
3991
3992 perf_event_task_event(&task_event);
3993}
3994
3995void perf_event_fork(struct task_struct *task)
3996{
3997 perf_event_task(task, NULL, 1);
3998}
3999
4000/*
4001 * comm tracking
4002 */
4003
4004struct perf_comm_event {
4005 struct task_struct *task;
4006 char *comm;
4007 int comm_size;
4008
4009 struct {
4010 struct perf_event_header header;
4011
4012 u32 pid;
4013 u32 tid;
4014 } event_id;
4015};
4016
4017static void perf_event_comm_output(struct perf_event *event,
4018 struct perf_comm_event *comm_event)
4019{
4020 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004021 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004022 int size = comm_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004023 int ret;
4024
4025 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4026 ret = perf_output_begin(&handle, event,
4027 comm_event->event_id.header.size, 0, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004028
4029 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004030 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004031
4032 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4033 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4034
4035 perf_output_put(&handle, comm_event->event_id);
4036 perf_output_copy(&handle, comm_event->comm,
4037 comm_event->comm_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004038
4039 perf_event__output_id_sample(event, &handle, &sample);
4040
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004041 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004042out:
4043 comm_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004044}
4045
4046static int perf_event_comm_match(struct perf_event *event)
4047{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004048 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004049 return 0;
4050
Stephane Eranian5632ab12011-01-03 18:20:01 +02004051 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004052 return 0;
4053
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004054 if (event->attr.comm)
4055 return 1;
4056
4057 return 0;
4058}
4059
4060static void perf_event_comm_ctx(struct perf_event_context *ctx,
4061 struct perf_comm_event *comm_event)
4062{
4063 struct perf_event *event;
4064
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004065 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4066 if (perf_event_comm_match(event))
4067 perf_event_comm_output(event, comm_event);
4068 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004069}
4070
4071static void perf_event_comm_event(struct perf_comm_event *comm_event)
4072{
4073 struct perf_cpu_context *cpuctx;
4074 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004075 char comm[TASK_COMM_LEN];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004076 unsigned int size;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004077 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004078 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004079
4080 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01004081 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004082 size = ALIGN(strlen(comm)+1, sizeof(u64));
4083
4084 comm_event->comm = comm;
4085 comm_event->comm_size = size;
4086
4087 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
Peter Zijlstraf6595f32009-11-20 22:19:47 +01004088 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004089 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004090 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004091 if (cpuctx->active_pmu != pmu)
4092 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004093 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004094
4095 ctxn = pmu->task_ctx_nr;
4096 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004097 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004098
4099 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4100 if (ctx)
4101 perf_event_comm_ctx(ctx, comm_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02004102next:
4103 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004104 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004105 rcu_read_unlock();
4106}
4107
4108void perf_event_comm(struct task_struct *task)
4109{
4110 struct perf_comm_event comm_event;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004111 struct perf_event_context *ctx;
4112 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004113
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004114 for_each_task_context_nr(ctxn) {
4115 ctx = task->perf_event_ctxp[ctxn];
4116 if (!ctx)
4117 continue;
4118
4119 perf_event_enable_on_exec(ctx);
4120 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004121
4122 if (!atomic_read(&nr_comm_events))
4123 return;
4124
4125 comm_event = (struct perf_comm_event){
4126 .task = task,
4127 /* .comm */
4128 /* .comm_size */
4129 .event_id = {
4130 .header = {
4131 .type = PERF_RECORD_COMM,
4132 .misc = 0,
4133 /* .size */
4134 },
4135 /* .pid */
4136 /* .tid */
4137 },
4138 };
4139
4140 perf_event_comm_event(&comm_event);
4141}
4142
4143/*
4144 * mmap tracking
4145 */
4146
4147struct perf_mmap_event {
4148 struct vm_area_struct *vma;
4149
4150 const char *file_name;
4151 int file_size;
4152
4153 struct {
4154 struct perf_event_header header;
4155
4156 u32 pid;
4157 u32 tid;
4158 u64 start;
4159 u64 len;
4160 u64 pgoff;
4161 } event_id;
4162};
4163
4164static void perf_event_mmap_output(struct perf_event *event,
4165 struct perf_mmap_event *mmap_event)
4166{
4167 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004168 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004169 int size = mmap_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004170 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004171
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004172 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4173 ret = perf_output_begin(&handle, event,
4174 mmap_event->event_id.header.size, 0, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004175 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004176 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004177
4178 mmap_event->event_id.pid = perf_event_pid(event, current);
4179 mmap_event->event_id.tid = perf_event_tid(event, current);
4180
4181 perf_output_put(&handle, mmap_event->event_id);
4182 perf_output_copy(&handle, mmap_event->file_name,
4183 mmap_event->file_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004184
4185 perf_event__output_id_sample(event, &handle, &sample);
4186
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004187 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004188out:
4189 mmap_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004190}
4191
4192static int perf_event_mmap_match(struct perf_event *event,
Eric B Munson3af9e852010-05-18 15:30:49 +01004193 struct perf_mmap_event *mmap_event,
4194 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004195{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004196 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004197 return 0;
4198
Stephane Eranian5632ab12011-01-03 18:20:01 +02004199 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004200 return 0;
4201
Eric B Munson3af9e852010-05-18 15:30:49 +01004202 if ((!executable && event->attr.mmap_data) ||
4203 (executable && event->attr.mmap))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004204 return 1;
4205
4206 return 0;
4207}
4208
4209static void perf_event_mmap_ctx(struct perf_event_context *ctx,
Eric B Munson3af9e852010-05-18 15:30:49 +01004210 struct perf_mmap_event *mmap_event,
4211 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004212{
4213 struct perf_event *event;
4214
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004215 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Eric B Munson3af9e852010-05-18 15:30:49 +01004216 if (perf_event_mmap_match(event, mmap_event, executable))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004217 perf_event_mmap_output(event, mmap_event);
4218 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004219}
4220
4221static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4222{
4223 struct perf_cpu_context *cpuctx;
4224 struct perf_event_context *ctx;
4225 struct vm_area_struct *vma = mmap_event->vma;
4226 struct file *file = vma->vm_file;
4227 unsigned int size;
4228 char tmp[16];
4229 char *buf = NULL;
4230 const char *name;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004231 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004232 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004233
4234 memset(tmp, 0, sizeof(tmp));
4235
4236 if (file) {
4237 /*
4238 * d_path works from the end of the buffer backwards, so we
4239 * need to add enough zero bytes after the string to handle
4240 * the 64bit alignment we do later.
4241 */
4242 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4243 if (!buf) {
4244 name = strncpy(tmp, "//enomem", sizeof(tmp));
4245 goto got_name;
4246 }
4247 name = d_path(&file->f_path, buf, PATH_MAX);
4248 if (IS_ERR(name)) {
4249 name = strncpy(tmp, "//toolong", sizeof(tmp));
4250 goto got_name;
4251 }
4252 } else {
4253 if (arch_vma_name(mmap_event->vma)) {
4254 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4255 sizeof(tmp));
4256 goto got_name;
4257 }
4258
4259 if (!vma->vm_mm) {
4260 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4261 goto got_name;
Eric B Munson3af9e852010-05-18 15:30:49 +01004262 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4263 vma->vm_end >= vma->vm_mm->brk) {
4264 name = strncpy(tmp, "[heap]", sizeof(tmp));
4265 goto got_name;
4266 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4267 vma->vm_end >= vma->vm_mm->start_stack) {
4268 name = strncpy(tmp, "[stack]", sizeof(tmp));
4269 goto got_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004270 }
4271
4272 name = strncpy(tmp, "//anon", sizeof(tmp));
4273 goto got_name;
4274 }
4275
4276got_name:
4277 size = ALIGN(strlen(name)+1, sizeof(u64));
4278
4279 mmap_event->file_name = name;
4280 mmap_event->file_size = size;
4281
4282 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4283
Peter Zijlstraf6d9dd22009-11-20 22:19:48 +01004284 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004285 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004286 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004287 if (cpuctx->active_pmu != pmu)
4288 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004289 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4290 vma->vm_flags & VM_EXEC);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004291
4292 ctxn = pmu->task_ctx_nr;
4293 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004294 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004295
4296 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4297 if (ctx) {
4298 perf_event_mmap_ctx(ctx, mmap_event,
4299 vma->vm_flags & VM_EXEC);
4300 }
Peter Zijlstra41945f62010-09-16 19:17:24 +02004301next:
4302 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004303 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004304 rcu_read_unlock();
4305
4306 kfree(buf);
4307}
4308
Eric B Munson3af9e852010-05-18 15:30:49 +01004309void perf_event_mmap(struct vm_area_struct *vma)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004310{
4311 struct perf_mmap_event mmap_event;
4312
4313 if (!atomic_read(&nr_mmap_events))
4314 return;
4315
4316 mmap_event = (struct perf_mmap_event){
4317 .vma = vma,
4318 /* .file_name */
4319 /* .file_size */
4320 .event_id = {
4321 .header = {
4322 .type = PERF_RECORD_MMAP,
Zhang, Yanmin39447b32010-04-19 13:32:41 +08004323 .misc = PERF_RECORD_MISC_USER,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004324 /* .size */
4325 },
4326 /* .pid */
4327 /* .tid */
4328 .start = vma->vm_start,
4329 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01004330 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004331 },
4332 };
4333
4334 perf_event_mmap_event(&mmap_event);
4335}
4336
4337/*
4338 * IRQ throttle logging
4339 */
4340
4341static void perf_log_throttle(struct perf_event *event, int enable)
4342{
4343 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004344 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004345 int ret;
4346
4347 struct {
4348 struct perf_event_header header;
4349 u64 time;
4350 u64 id;
4351 u64 stream_id;
4352 } throttle_event = {
4353 .header = {
4354 .type = PERF_RECORD_THROTTLE,
4355 .misc = 0,
4356 .size = sizeof(throttle_event),
4357 },
4358 .time = perf_clock(),
4359 .id = primary_event_id(event),
4360 .stream_id = event->id,
4361 };
4362
4363 if (enable)
4364 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4365
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004366 perf_event_header__init_id(&throttle_event.header, &sample, event);
4367
4368 ret = perf_output_begin(&handle, event,
4369 throttle_event.header.size, 1, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004370 if (ret)
4371 return;
4372
4373 perf_output_put(&handle, throttle_event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004374 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004375 perf_output_end(&handle);
4376}
4377
4378/*
4379 * Generic event overflow handling, sampling.
4380 */
4381
4382static int __perf_event_overflow(struct perf_event *event, int nmi,
4383 int throttle, struct perf_sample_data *data,
4384 struct pt_regs *regs)
4385{
4386 int events = atomic_read(&event->event_limit);
4387 struct hw_perf_event *hwc = &event->hw;
4388 int ret = 0;
4389
Peter Zijlstra96398822010-11-24 18:55:29 +01004390 /*
4391 * Non-sampling counters might still use the PMI to fold short
4392 * hardware counters, ignore those.
4393 */
4394 if (unlikely(!is_sampling_event(event)))
4395 return 0;
4396
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004397 if (!throttle) {
4398 hwc->interrupts++;
4399 } else {
4400 if (hwc->interrupts != MAX_INTERRUPTS) {
4401 hwc->interrupts++;
4402 if (HZ * hwc->interrupts >
4403 (u64)sysctl_perf_event_sample_rate) {
4404 hwc->interrupts = MAX_INTERRUPTS;
4405 perf_log_throttle(event, 0);
4406 ret = 1;
4407 }
4408 } else {
4409 /*
4410 * Keep re-disabling events even though on the previous
4411 * pass we disabled it - just in case we raced with a
4412 * sched-in and the event got enabled again:
4413 */
4414 ret = 1;
4415 }
4416 }
4417
4418 if (event->attr.freq) {
4419 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01004420 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004421
Peter Zijlstraabd50712010-01-26 18:50:16 +01004422 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004423
Peter Zijlstraabd50712010-01-26 18:50:16 +01004424 if (delta > 0 && delta < 2*TICK_NSEC)
4425 perf_adjust_period(event, delta, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004426 }
4427
4428 /*
4429 * XXX event_limit might not quite work as expected on inherited
4430 * events
4431 */
4432
4433 event->pending_kill = POLL_IN;
4434 if (events && atomic_dec_and_test(&event->event_limit)) {
4435 ret = 1;
4436 event->pending_kill = POLL_HUP;
4437 if (nmi) {
4438 event->pending_disable = 1;
Peter Zijlstrae360adb2010-10-14 14:01:34 +08004439 irq_work_queue(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004440 } else
4441 perf_event_disable(event);
4442 }
4443
Peter Zijlstra453f19e2009-11-20 22:19:43 +01004444 if (event->overflow_handler)
4445 event->overflow_handler(event, nmi, data, regs);
4446 else
4447 perf_event_output(event, nmi, data, regs);
4448
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004449 return ret;
4450}
4451
4452int perf_event_overflow(struct perf_event *event, int nmi,
4453 struct perf_sample_data *data,
4454 struct pt_regs *regs)
4455{
4456 return __perf_event_overflow(event, nmi, 1, data, regs);
4457}
4458
4459/*
4460 * Generic software event infrastructure
4461 */
4462
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004463struct swevent_htable {
4464 struct swevent_hlist *swevent_hlist;
4465 struct mutex hlist_mutex;
4466 int hlist_refcount;
4467
4468 /* Recursion avoidance in each contexts */
4469 int recursion[PERF_NR_CONTEXTS];
4470};
4471
4472static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4473
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004474/*
4475 * We directly increment event->count and keep a second value in
4476 * event->hw.period_left to count intervals. This period event
4477 * is kept in the range [-sample_period, 0] so that we can use the
4478 * sign as trigger.
4479 */
4480
4481static u64 perf_swevent_set_period(struct perf_event *event)
4482{
4483 struct hw_perf_event *hwc = &event->hw;
4484 u64 period = hwc->last_period;
4485 u64 nr, offset;
4486 s64 old, val;
4487
4488 hwc->last_period = hwc->sample_period;
4489
4490again:
Peter Zijlstrae7850592010-05-21 14:43:08 +02004491 old = val = local64_read(&hwc->period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004492 if (val < 0)
4493 return 0;
4494
4495 nr = div64_u64(period + val, period);
4496 offset = nr * period;
4497 val -= offset;
Peter Zijlstrae7850592010-05-21 14:43:08 +02004498 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004499 goto again;
4500
4501 return nr;
4502}
4503
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004504static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004505 int nmi, struct perf_sample_data *data,
4506 struct pt_regs *regs)
4507{
4508 struct hw_perf_event *hwc = &event->hw;
4509 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004510
4511 data->period = event->hw.last_period;
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004512 if (!overflow)
4513 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004514
4515 if (hwc->interrupts == MAX_INTERRUPTS)
4516 return;
4517
4518 for (; overflow; overflow--) {
4519 if (__perf_event_overflow(event, nmi, throttle,
4520 data, regs)) {
4521 /*
4522 * We inhibit the overflow from happening when
4523 * hwc->interrupts == MAX_INTERRUPTS.
4524 */
4525 break;
4526 }
4527 throttle = 1;
4528 }
4529}
4530
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004531static void perf_swevent_event(struct perf_event *event, u64 nr,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004532 int nmi, struct perf_sample_data *data,
4533 struct pt_regs *regs)
4534{
4535 struct hw_perf_event *hwc = &event->hw;
4536
Peter Zijlstrae7850592010-05-21 14:43:08 +02004537 local64_add(nr, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004538
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004539 if (!regs)
4540 return;
4541
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01004542 if (!is_sampling_event(event))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004543 return;
4544
4545 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4546 return perf_swevent_overflow(event, 1, nmi, data, regs);
4547
Peter Zijlstrae7850592010-05-21 14:43:08 +02004548 if (local64_add_negative(nr, &hwc->period_left))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004549 return;
4550
4551 perf_swevent_overflow(event, 0, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004552}
4553
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004554static int perf_exclude_event(struct perf_event *event,
4555 struct pt_regs *regs)
4556{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004557 if (event->hw.state & PERF_HES_STOPPED)
4558 return 0;
4559
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004560 if (regs) {
4561 if (event->attr.exclude_user && user_mode(regs))
4562 return 1;
4563
4564 if (event->attr.exclude_kernel && !user_mode(regs))
4565 return 1;
4566 }
4567
4568 return 0;
4569}
4570
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004571static int perf_swevent_match(struct perf_event *event,
4572 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08004573 u32 event_id,
4574 struct perf_sample_data *data,
4575 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004576{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004577 if (event->attr.type != type)
4578 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004579
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004580 if (event->attr.config != event_id)
4581 return 0;
4582
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004583 if (perf_exclude_event(event, regs))
4584 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004585
4586 return 1;
4587}
4588
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004589static inline u64 swevent_hash(u64 type, u32 event_id)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004590{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004591 u64 val = event_id | (type << 32);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004592
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004593 return hash_64(val, SWEVENT_HLIST_BITS);
4594}
4595
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004596static inline struct hlist_head *
4597__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004598{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004599 u64 hash = swevent_hash(type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004600
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004601 return &hlist->heads[hash];
4602}
4603
4604/* For the read side: events when they trigger */
4605static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004606find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004607{
4608 struct swevent_hlist *hlist;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004609
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004610 hlist = rcu_dereference(swhash->swevent_hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004611 if (!hlist)
4612 return NULL;
4613
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004614 return __find_swevent_head(hlist, type, event_id);
4615}
4616
4617/* For the event head insertion and removal in the hlist */
4618static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004619find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004620{
4621 struct swevent_hlist *hlist;
4622 u32 event_id = event->attr.config;
4623 u64 type = event->attr.type;
4624
4625 /*
4626 * Event scheduling is always serialized against hlist allocation
4627 * and release. Which makes the protected version suitable here.
4628 * The context lock guarantees that.
4629 */
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004630 hlist = rcu_dereference_protected(swhash->swevent_hlist,
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004631 lockdep_is_held(&event->ctx->lock));
4632 if (!hlist)
4633 return NULL;
4634
4635 return __find_swevent_head(hlist, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004636}
4637
4638static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4639 u64 nr, int nmi,
4640 struct perf_sample_data *data,
4641 struct pt_regs *regs)
4642{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004643 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004644 struct perf_event *event;
4645 struct hlist_node *node;
4646 struct hlist_head *head;
4647
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004648 rcu_read_lock();
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004649 head = find_swevent_head_rcu(swhash, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004650 if (!head)
4651 goto end;
4652
4653 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08004654 if (perf_swevent_match(event, type, event_id, data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004655 perf_swevent_event(event, nr, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004656 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004657end:
4658 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004659}
4660
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004661int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004662{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004663 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004664
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004665 return get_recursion_context(swhash->recursion);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004666}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004667EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004668
Jesper Juhlfa9f90b2010-11-28 21:39:34 +01004669inline void perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004670{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004671 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02004672
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004673 put_recursion_context(swhash->recursion, rctx);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004674}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004675
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004676void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4677 struct pt_regs *regs, u64 addr)
4678{
Ingo Molnara4234bf2009-11-23 10:57:59 +01004679 struct perf_sample_data data;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004680 int rctx;
4681
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004682 preempt_disable_notrace();
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004683 rctx = perf_swevent_get_recursion_context();
4684 if (rctx < 0)
4685 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004686
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004687 perf_sample_data_init(&data, addr);
Ingo Molnara4234bf2009-11-23 10:57:59 +01004688
4689 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004690
4691 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004692 preempt_enable_notrace();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004693}
4694
4695static void perf_swevent_read(struct perf_event *event)
4696{
4697}
4698
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004699static int perf_swevent_add(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004700{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004701 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004702 struct hw_perf_event *hwc = &event->hw;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004703 struct hlist_head *head;
4704
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01004705 if (is_sampling_event(event)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004706 hwc->last_period = hwc->sample_period;
4707 perf_swevent_set_period(event);
4708 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004709
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004710 hwc->state = !(flags & PERF_EF_START);
4711
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004712 head = find_swevent_head(swhash, event);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004713 if (WARN_ON_ONCE(!head))
4714 return -EINVAL;
4715
4716 hlist_add_head_rcu(&event->hlist_entry, head);
4717
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004718 return 0;
4719}
4720
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004721static void perf_swevent_del(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004722{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004723 hlist_del_rcu(&event->hlist_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004724}
4725
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004726static void perf_swevent_start(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004727{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004728 event->hw.state = 0;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004729}
4730
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004731static void perf_swevent_stop(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004732{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004733 event->hw.state = PERF_HES_STOPPED;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004734}
4735
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004736/* Deref the hlist from the update side */
4737static inline struct swevent_hlist *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004738swevent_hlist_deref(struct swevent_htable *swhash)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004739{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004740 return rcu_dereference_protected(swhash->swevent_hlist,
4741 lockdep_is_held(&swhash->hlist_mutex));
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004742}
4743
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004744static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4745{
4746 struct swevent_hlist *hlist;
4747
4748 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4749 kfree(hlist);
4750}
4751
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004752static void swevent_hlist_release(struct swevent_htable *swhash)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004753{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004754 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004755
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004756 if (!hlist)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004757 return;
4758
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004759 rcu_assign_pointer(swhash->swevent_hlist, NULL);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004760 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4761}
4762
4763static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4764{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004765 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004766
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004767 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004768
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004769 if (!--swhash->hlist_refcount)
4770 swevent_hlist_release(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004771
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004772 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004773}
4774
4775static void swevent_hlist_put(struct perf_event *event)
4776{
4777 int cpu;
4778
4779 if (event->cpu != -1) {
4780 swevent_hlist_put_cpu(event, event->cpu);
4781 return;
4782 }
4783
4784 for_each_possible_cpu(cpu)
4785 swevent_hlist_put_cpu(event, cpu);
4786}
4787
4788static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4789{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004790 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004791 int err = 0;
4792
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004793 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004794
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004795 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004796 struct swevent_hlist *hlist;
4797
4798 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4799 if (!hlist) {
4800 err = -ENOMEM;
4801 goto exit;
4802 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004803 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004804 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004805 swhash->hlist_refcount++;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004806exit:
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004807 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004808
4809 return err;
4810}
4811
4812static int swevent_hlist_get(struct perf_event *event)
4813{
4814 int err;
4815 int cpu, failed_cpu;
4816
4817 if (event->cpu != -1)
4818 return swevent_hlist_get_cpu(event, event->cpu);
4819
4820 get_online_cpus();
4821 for_each_possible_cpu(cpu) {
4822 err = swevent_hlist_get_cpu(event, cpu);
4823 if (err) {
4824 failed_cpu = cpu;
4825 goto fail;
4826 }
4827 }
4828 put_online_cpus();
4829
4830 return 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004831fail:
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004832 for_each_possible_cpu(cpu) {
4833 if (cpu == failed_cpu)
4834 break;
4835 swevent_hlist_put_cpu(event, cpu);
4836 }
4837
4838 put_online_cpus();
4839 return err;
4840}
4841
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004842atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004843
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004844static void sw_perf_event_destroy(struct perf_event *event)
4845{
4846 u64 event_id = event->attr.config;
4847
4848 WARN_ON(event->parent);
4849
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02004850 jump_label_dec(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004851 swevent_hlist_put(event);
4852}
4853
4854static int perf_swevent_init(struct perf_event *event)
4855{
4856 int event_id = event->attr.config;
4857
4858 if (event->attr.type != PERF_TYPE_SOFTWARE)
4859 return -ENOENT;
4860
4861 switch (event_id) {
4862 case PERF_COUNT_SW_CPU_CLOCK:
4863 case PERF_COUNT_SW_TASK_CLOCK:
4864 return -ENOENT;
4865
4866 default:
4867 break;
4868 }
4869
Dan Carpenterce677832010-10-24 21:50:42 +02004870 if (event_id >= PERF_COUNT_SW_MAX)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004871 return -ENOENT;
4872
4873 if (!event->parent) {
4874 int err;
4875
4876 err = swevent_hlist_get(event);
4877 if (err)
4878 return err;
4879
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02004880 jump_label_inc(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004881 event->destroy = sw_perf_event_destroy;
4882 }
4883
4884 return 0;
4885}
4886
4887static struct pmu perf_swevent = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02004888 .task_ctx_nr = perf_sw_context,
4889
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004890 .event_init = perf_swevent_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004891 .add = perf_swevent_add,
4892 .del = perf_swevent_del,
4893 .start = perf_swevent_start,
4894 .stop = perf_swevent_stop,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004895 .read = perf_swevent_read,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004896};
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004897
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004898#ifdef CONFIG_EVENT_TRACING
4899
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004900static int perf_tp_filter_match(struct perf_event *event,
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004901 struct perf_sample_data *data)
4902{
4903 void *record = data->raw->data;
4904
4905 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4906 return 1;
4907 return 0;
4908}
4909
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004910static int perf_tp_event_match(struct perf_event *event,
4911 struct perf_sample_data *data,
4912 struct pt_regs *regs)
4913{
Peter Zijlstra580d6072010-05-20 20:54:31 +02004914 /*
4915 * All tracepoints are from kernel-space.
4916 */
4917 if (event->attr.exclude_kernel)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004918 return 0;
4919
4920 if (!perf_tp_filter_match(event, data))
4921 return 0;
4922
4923 return 1;
4924}
4925
4926void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004927 struct pt_regs *regs, struct hlist_head *head, int rctx)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004928{
4929 struct perf_sample_data data;
4930 struct perf_event *event;
4931 struct hlist_node *node;
4932
4933 struct perf_raw_record raw = {
4934 .size = entry_size,
4935 .data = record,
4936 };
4937
4938 perf_sample_data_init(&data, addr);
4939 data.raw = &raw;
4940
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004941 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4942 if (perf_tp_event_match(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004943 perf_swevent_event(event, count, 1, &data, regs);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004944 }
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004945
4946 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004947}
4948EXPORT_SYMBOL_GPL(perf_tp_event);
4949
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004950static void tp_perf_event_destroy(struct perf_event *event)
4951{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004952 perf_trace_destroy(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004953}
4954
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004955static int perf_tp_event_init(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004956{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004957 int err;
4958
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004959 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4960 return -ENOENT;
4961
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004962 err = perf_trace_init(event);
4963 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004964 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004965
4966 event->destroy = tp_perf_event_destroy;
4967
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004968 return 0;
4969}
4970
4971static struct pmu perf_tracepoint = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02004972 .task_ctx_nr = perf_sw_context,
4973
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004974 .event_init = perf_tp_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004975 .add = perf_trace_add,
4976 .del = perf_trace_del,
4977 .start = perf_swevent_start,
4978 .stop = perf_swevent_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004979 .read = perf_swevent_read,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004980};
4981
4982static inline void perf_tp_register(void)
4983{
Peter Zijlstra2e80a822010-11-17 23:17:36 +01004984 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004985}
Li Zefan6fb29152009-10-15 11:21:42 +08004986
4987static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4988{
4989 char *filter_str;
4990 int ret;
4991
4992 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4993 return -EINVAL;
4994
4995 filter_str = strndup_user(arg, PAGE_SIZE);
4996 if (IS_ERR(filter_str))
4997 return PTR_ERR(filter_str);
4998
4999 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5000
5001 kfree(filter_str);
5002 return ret;
5003}
5004
5005static void perf_event_free_filter(struct perf_event *event)
5006{
5007 ftrace_profile_free_filter(event);
5008}
5009
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005010#else
Li Zefan6fb29152009-10-15 11:21:42 +08005011
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005012static inline void perf_tp_register(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005013{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005014}
Li Zefan6fb29152009-10-15 11:21:42 +08005015
5016static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5017{
5018 return -ENOENT;
5019}
5020
5021static void perf_event_free_filter(struct perf_event *event)
5022{
5023}
5024
Li Zefan07b139c2009-12-21 14:27:35 +08005025#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005026
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005027#ifdef CONFIG_HAVE_HW_BREAKPOINT
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005028void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005029{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005030 struct perf_sample_data sample;
5031 struct pt_regs *regs = data;
5032
Peter Zijlstradc1d6282010-03-03 15:55:04 +01005033 perf_sample_data_init(&sample, bp->attr.bp_addr);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005034
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005035 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5036 perf_swevent_event(bp, 1, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005037}
5038#endif
5039
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005040/*
5041 * hrtimer based swevent callback
5042 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005043
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005044static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005045{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005046 enum hrtimer_restart ret = HRTIMER_RESTART;
5047 struct perf_sample_data data;
5048 struct pt_regs *regs;
5049 struct perf_event *event;
5050 u64 period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005051
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005052 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5053 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005054
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005055 perf_sample_data_init(&data, 0);
5056 data.period = event->hw.last_period;
5057 regs = get_irq_regs();
5058
5059 if (regs && !perf_exclude_event(event, regs)) {
5060 if (!(event->attr.exclude_idle && current->pid == 0))
5061 if (perf_event_overflow(event, 0, &data, regs))
5062 ret = HRTIMER_NORESTART;
5063 }
5064
5065 period = max_t(u64, 10000, event->hw.sample_period);
5066 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5067
5068 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005069}
5070
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005071static void perf_swevent_start_hrtimer(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005072{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005073 struct hw_perf_event *hwc = &event->hw;
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005074 s64 period;
5075
5076 if (!is_sampling_event(event))
5077 return;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005078
5079 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5080 hwc->hrtimer.function = perf_swevent_hrtimer;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005081
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005082 period = local64_read(&hwc->period_left);
5083 if (period) {
5084 if (period < 0)
5085 period = 10000;
Peter Zijlstrafa407f32010-06-24 12:35:12 +02005086
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005087 local64_set(&hwc->period_left, 0);
5088 } else {
5089 period = max_t(u64, 10000, hwc->sample_period);
5090 }
5091 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005092 ns_to_ktime(period), 0,
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02005093 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005094}
5095
5096static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5097{
5098 struct hw_perf_event *hwc = &event->hw;
5099
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01005100 if (is_sampling_event(event)) {
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005101 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
Peter Zijlstrafa407f32010-06-24 12:35:12 +02005102 local64_set(&hwc->period_left, ktime_to_ns(remaining));
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005103
5104 hrtimer_cancel(&hwc->hrtimer);
5105 }
5106}
5107
5108/*
5109 * Software event: cpu wall time clock
5110 */
5111
5112static void cpu_clock_event_update(struct perf_event *event)
5113{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005114 s64 prev;
5115 u64 now;
5116
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005117 now = local_clock();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005118 prev = local64_xchg(&event->hw.prev_count, now);
5119 local64_add(now - prev, &event->count);
5120}
5121
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005122static void cpu_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005123{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005124 local64_set(&event->hw.prev_count, local_clock());
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005125 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005126}
5127
5128static void cpu_clock_event_stop(struct perf_event *event, int flags)
5129{
5130 perf_swevent_cancel_hrtimer(event);
5131 cpu_clock_event_update(event);
5132}
5133
5134static int cpu_clock_event_add(struct perf_event *event, int flags)
5135{
5136 if (flags & PERF_EF_START)
5137 cpu_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005138
5139 return 0;
5140}
5141
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005142static void cpu_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005143{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005144 cpu_clock_event_stop(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005145}
5146
5147static void cpu_clock_event_read(struct perf_event *event)
5148{
5149 cpu_clock_event_update(event);
5150}
5151
5152static int cpu_clock_event_init(struct perf_event *event)
5153{
5154 if (event->attr.type != PERF_TYPE_SOFTWARE)
5155 return -ENOENT;
5156
5157 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5158 return -ENOENT;
5159
5160 return 0;
5161}
5162
5163static struct pmu perf_cpu_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005164 .task_ctx_nr = perf_sw_context,
5165
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005166 .event_init = cpu_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005167 .add = cpu_clock_event_add,
5168 .del = cpu_clock_event_del,
5169 .start = cpu_clock_event_start,
5170 .stop = cpu_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005171 .read = cpu_clock_event_read,
5172};
5173
5174/*
5175 * Software event: task time clock
5176 */
5177
5178static void task_clock_event_update(struct perf_event *event, u64 now)
5179{
5180 u64 prev;
5181 s64 delta;
5182
5183 prev = local64_xchg(&event->hw.prev_count, now);
5184 delta = now - prev;
5185 local64_add(delta, &event->count);
5186}
5187
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005188static void task_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005189{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005190 local64_set(&event->hw.prev_count, event->ctx->time);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005191 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005192}
5193
5194static void task_clock_event_stop(struct perf_event *event, int flags)
5195{
5196 perf_swevent_cancel_hrtimer(event);
5197 task_clock_event_update(event, event->ctx->time);
5198}
5199
5200static int task_clock_event_add(struct perf_event *event, int flags)
5201{
5202 if (flags & PERF_EF_START)
5203 task_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005204
5205 return 0;
5206}
5207
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005208static void task_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005209{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005210 task_clock_event_stop(event, PERF_EF_UPDATE);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005211}
5212
5213static void task_clock_event_read(struct perf_event *event)
5214{
5215 u64 time;
5216
5217 if (!in_nmi()) {
5218 update_context_time(event->ctx);
5219 time = event->ctx->time;
5220 } else {
5221 u64 now = perf_clock();
5222 u64 delta = now - event->ctx->timestamp;
5223 time = event->ctx->time + delta;
5224 }
5225
5226 task_clock_event_update(event, time);
5227}
5228
5229static int task_clock_event_init(struct perf_event *event)
5230{
5231 if (event->attr.type != PERF_TYPE_SOFTWARE)
5232 return -ENOENT;
5233
5234 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5235 return -ENOENT;
5236
5237 return 0;
5238}
5239
5240static struct pmu perf_task_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005241 .task_ctx_nr = perf_sw_context,
5242
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005243 .event_init = task_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005244 .add = task_clock_event_add,
5245 .del = task_clock_event_del,
5246 .start = task_clock_event_start,
5247 .stop = task_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005248 .read = task_clock_event_read,
5249};
5250
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005251static void perf_pmu_nop_void(struct pmu *pmu)
5252{
5253}
5254
5255static int perf_pmu_nop_int(struct pmu *pmu)
5256{
5257 return 0;
5258}
5259
5260static void perf_pmu_start_txn(struct pmu *pmu)
5261{
5262 perf_pmu_disable(pmu);
5263}
5264
5265static int perf_pmu_commit_txn(struct pmu *pmu)
5266{
5267 perf_pmu_enable(pmu);
5268 return 0;
5269}
5270
5271static void perf_pmu_cancel_txn(struct pmu *pmu)
5272{
5273 perf_pmu_enable(pmu);
5274}
5275
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005276/*
5277 * Ensures all contexts with the same task_ctx_nr have the same
5278 * pmu_cpu_context too.
5279 */
5280static void *find_pmu_context(int ctxn)
5281{
5282 struct pmu *pmu;
5283
5284 if (ctxn < 0)
5285 return NULL;
5286
5287 list_for_each_entry(pmu, &pmus, entry) {
5288 if (pmu->task_ctx_nr == ctxn)
5289 return pmu->pmu_cpu_context;
5290 }
5291
5292 return NULL;
5293}
5294
Peter Zijlstra51676952010-12-07 14:18:20 +01005295static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005296{
Peter Zijlstra51676952010-12-07 14:18:20 +01005297 int cpu;
5298
5299 for_each_possible_cpu(cpu) {
5300 struct perf_cpu_context *cpuctx;
5301
5302 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5303
5304 if (cpuctx->active_pmu == old_pmu)
5305 cpuctx->active_pmu = pmu;
5306 }
5307}
5308
5309static void free_pmu_context(struct pmu *pmu)
5310{
5311 struct pmu *i;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005312
5313 mutex_lock(&pmus_lock);
5314 /*
5315 * Like a real lame refcount.
5316 */
Peter Zijlstra51676952010-12-07 14:18:20 +01005317 list_for_each_entry(i, &pmus, entry) {
5318 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5319 update_pmu_context(i, pmu);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005320 goto out;
Peter Zijlstra51676952010-12-07 14:18:20 +01005321 }
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005322 }
5323
Peter Zijlstra51676952010-12-07 14:18:20 +01005324 free_percpu(pmu->pmu_cpu_context);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005325out:
5326 mutex_unlock(&pmus_lock);
5327}
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005328static struct idr pmu_idr;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005329
Peter Zijlstraabe43402010-11-17 23:17:37 +01005330static ssize_t
5331type_show(struct device *dev, struct device_attribute *attr, char *page)
5332{
5333 struct pmu *pmu = dev_get_drvdata(dev);
5334
5335 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5336}
5337
5338static struct device_attribute pmu_dev_attrs[] = {
5339 __ATTR_RO(type),
5340 __ATTR_NULL,
5341};
5342
5343static int pmu_bus_running;
5344static struct bus_type pmu_bus = {
5345 .name = "event_source",
5346 .dev_attrs = pmu_dev_attrs,
5347};
5348
5349static void pmu_dev_release(struct device *dev)
5350{
5351 kfree(dev);
5352}
5353
5354static int pmu_dev_alloc(struct pmu *pmu)
5355{
5356 int ret = -ENOMEM;
5357
5358 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5359 if (!pmu->dev)
5360 goto out;
5361
5362 device_initialize(pmu->dev);
5363 ret = dev_set_name(pmu->dev, "%s", pmu->name);
5364 if (ret)
5365 goto free_dev;
5366
5367 dev_set_drvdata(pmu->dev, pmu);
5368 pmu->dev->bus = &pmu_bus;
5369 pmu->dev->release = pmu_dev_release;
5370 ret = device_add(pmu->dev);
5371 if (ret)
5372 goto free_dev;
5373
5374out:
5375 return ret;
5376
5377free_dev:
5378 put_device(pmu->dev);
5379 goto out;
5380}
5381
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01005382static struct lock_class_key cpuctx_mutex;
5383
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005384int perf_pmu_register(struct pmu *pmu, char *name, int type)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005385{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005386 int cpu, ret;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005387
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005388 mutex_lock(&pmus_lock);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005389 ret = -ENOMEM;
5390 pmu->pmu_disable_count = alloc_percpu(int);
5391 if (!pmu->pmu_disable_count)
5392 goto unlock;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005393
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005394 pmu->type = -1;
5395 if (!name)
5396 goto skip_type;
5397 pmu->name = name;
5398
5399 if (type < 0) {
5400 int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5401 if (!err)
5402 goto free_pdc;
5403
5404 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5405 if (err) {
5406 ret = err;
5407 goto free_pdc;
5408 }
5409 }
5410 pmu->type = type;
5411
Peter Zijlstraabe43402010-11-17 23:17:37 +01005412 if (pmu_bus_running) {
5413 ret = pmu_dev_alloc(pmu);
5414 if (ret)
5415 goto free_idr;
5416 }
5417
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005418skip_type:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005419 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5420 if (pmu->pmu_cpu_context)
5421 goto got_cpu_context;
5422
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005423 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5424 if (!pmu->pmu_cpu_context)
Peter Zijlstraabe43402010-11-17 23:17:37 +01005425 goto free_dev;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005426
5427 for_each_possible_cpu(cpu) {
5428 struct perf_cpu_context *cpuctx;
5429
5430 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Peter Zijlstraeb184472010-09-07 15:55:13 +02005431 __perf_event_init_context(&cpuctx->ctx);
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01005432 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005433 cpuctx->ctx.type = cpu_context;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005434 cpuctx->ctx.pmu = pmu;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02005435 cpuctx->jiffies_interval = 1;
5436 INIT_LIST_HEAD(&cpuctx->rotation_list);
Peter Zijlstra51676952010-12-07 14:18:20 +01005437 cpuctx->active_pmu = pmu;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005438 }
5439
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005440got_cpu_context:
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005441 if (!pmu->start_txn) {
5442 if (pmu->pmu_enable) {
5443 /*
5444 * If we have pmu_enable/pmu_disable calls, install
5445 * transaction stubs that use that to try and batch
5446 * hardware accesses.
5447 */
5448 pmu->start_txn = perf_pmu_start_txn;
5449 pmu->commit_txn = perf_pmu_commit_txn;
5450 pmu->cancel_txn = perf_pmu_cancel_txn;
5451 } else {
5452 pmu->start_txn = perf_pmu_nop_void;
5453 pmu->commit_txn = perf_pmu_nop_int;
5454 pmu->cancel_txn = perf_pmu_nop_void;
5455 }
5456 }
5457
5458 if (!pmu->pmu_enable) {
5459 pmu->pmu_enable = perf_pmu_nop_void;
5460 pmu->pmu_disable = perf_pmu_nop_void;
5461 }
5462
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005463 list_add_rcu(&pmu->entry, &pmus);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005464 ret = 0;
5465unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005466 mutex_unlock(&pmus_lock);
5467
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005468 return ret;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005469
Peter Zijlstraabe43402010-11-17 23:17:37 +01005470free_dev:
5471 device_del(pmu->dev);
5472 put_device(pmu->dev);
5473
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005474free_idr:
5475 if (pmu->type >= PERF_TYPE_MAX)
5476 idr_remove(&pmu_idr, pmu->type);
5477
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005478free_pdc:
5479 free_percpu(pmu->pmu_disable_count);
5480 goto unlock;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005481}
5482
5483void perf_pmu_unregister(struct pmu *pmu)
5484{
5485 mutex_lock(&pmus_lock);
5486 list_del_rcu(&pmu->entry);
5487 mutex_unlock(&pmus_lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005488
5489 /*
Peter Zijlstracde8e882010-09-13 11:06:55 +02005490 * We dereference the pmu list under both SRCU and regular RCU, so
5491 * synchronize against both of those.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005492 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005493 synchronize_srcu(&pmus_srcu);
Peter Zijlstracde8e882010-09-13 11:06:55 +02005494 synchronize_rcu();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005495
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005496 free_percpu(pmu->pmu_disable_count);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005497 if (pmu->type >= PERF_TYPE_MAX)
5498 idr_remove(&pmu_idr, pmu->type);
Peter Zijlstraabe43402010-11-17 23:17:37 +01005499 device_del(pmu->dev);
5500 put_device(pmu->dev);
Peter Zijlstra51676952010-12-07 14:18:20 +01005501 free_pmu_context(pmu);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005502}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005503
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005504struct pmu *perf_init_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005505{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005506 struct pmu *pmu = NULL;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005507 int idx;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005508
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005509 idx = srcu_read_lock(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005510
5511 rcu_read_lock();
5512 pmu = idr_find(&pmu_idr, event->attr.type);
5513 rcu_read_unlock();
5514 if (pmu)
5515 goto unlock;
5516
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005517 list_for_each_entry_rcu(pmu, &pmus, entry) {
5518 int ret = pmu->event_init(event);
5519 if (!ret)
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005520 goto unlock;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005521
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005522 if (ret != -ENOENT) {
5523 pmu = ERR_PTR(ret);
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005524 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005525 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005526 }
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005527 pmu = ERR_PTR(-ENOENT);
5528unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005529 srcu_read_unlock(&pmus_srcu, idx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005530
5531 return pmu;
5532}
5533
5534/*
5535 * Allocate and initialize a event structure
5536 */
5537static struct perf_event *
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005538perf_event_alloc(struct perf_event_attr *attr, int cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005539 struct task_struct *task,
5540 struct perf_event *group_leader,
5541 struct perf_event *parent_event,
5542 perf_overflow_handler_t overflow_handler)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005543{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005544 struct pmu *pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005545 struct perf_event *event;
5546 struct hw_perf_event *hwc;
5547 long err;
5548
Oleg Nesterov66832eb2011-01-18 17:10:32 +01005549 if ((unsigned)cpu >= nr_cpu_ids) {
5550 if (!task || cpu != -1)
5551 return ERR_PTR(-EINVAL);
5552 }
5553
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005554 event = kzalloc(sizeof(*event), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005555 if (!event)
5556 return ERR_PTR(-ENOMEM);
5557
5558 /*
5559 * Single events are their own group leaders, with an
5560 * empty sibling list:
5561 */
5562 if (!group_leader)
5563 group_leader = event;
5564
5565 mutex_init(&event->child_mutex);
5566 INIT_LIST_HEAD(&event->child_list);
5567
5568 INIT_LIST_HEAD(&event->group_entry);
5569 INIT_LIST_HEAD(&event->event_entry);
5570 INIT_LIST_HEAD(&event->sibling_list);
5571 init_waitqueue_head(&event->waitq);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08005572 init_irq_work(&event->pending, perf_pending_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005573
5574 mutex_init(&event->mmap_mutex);
5575
5576 event->cpu = cpu;
5577 event->attr = *attr;
5578 event->group_leader = group_leader;
5579 event->pmu = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005580 event->oncpu = -1;
5581
5582 event->parent = parent_event;
5583
5584 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5585 event->id = atomic64_inc_return(&perf_event_id);
5586
5587 event->state = PERF_EVENT_STATE_INACTIVE;
5588
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005589 if (task) {
5590 event->attach_state = PERF_ATTACH_TASK;
5591#ifdef CONFIG_HAVE_HW_BREAKPOINT
5592 /*
5593 * hw_breakpoint is a bit difficult here..
5594 */
5595 if (attr->type == PERF_TYPE_BREAKPOINT)
5596 event->hw.bp_target = task;
5597#endif
5598 }
5599
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005600 if (!overflow_handler && parent_event)
5601 overflow_handler = parent_event->overflow_handler;
Oleg Nesterov66832eb2011-01-18 17:10:32 +01005602
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005603 event->overflow_handler = overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005604
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005605 if (attr->disabled)
5606 event->state = PERF_EVENT_STATE_OFF;
5607
5608 pmu = NULL;
5609
5610 hwc = &event->hw;
5611 hwc->sample_period = attr->sample_period;
5612 if (attr->freq && attr->sample_freq)
5613 hwc->sample_period = 1;
5614 hwc->last_period = hwc->sample_period;
5615
Peter Zijlstrae7850592010-05-21 14:43:08 +02005616 local64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005617
5618 /*
5619 * we currently do not support PERF_FORMAT_GROUP on inherited events
5620 */
5621 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5622 goto done;
5623
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005624 pmu = perf_init_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005625
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005626done:
5627 err = 0;
5628 if (!pmu)
5629 err = -EINVAL;
5630 else if (IS_ERR(pmu))
5631 err = PTR_ERR(pmu);
5632
5633 if (err) {
5634 if (event->ns)
5635 put_pid_ns(event->ns);
5636 kfree(event);
5637 return ERR_PTR(err);
5638 }
5639
5640 event->pmu = pmu;
5641
5642 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02005643 if (event->attach_state & PERF_ATTACH_TASK)
5644 jump_label_inc(&perf_task_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01005645 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005646 atomic_inc(&nr_mmap_events);
5647 if (event->attr.comm)
5648 atomic_inc(&nr_comm_events);
5649 if (event->attr.task)
5650 atomic_inc(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02005651 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5652 err = get_callchain_buffers();
5653 if (err) {
5654 free_event(event);
5655 return ERR_PTR(err);
5656 }
5657 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005658 }
5659
5660 return event;
5661}
5662
5663static int perf_copy_attr(struct perf_event_attr __user *uattr,
5664 struct perf_event_attr *attr)
5665{
5666 u32 size;
5667 int ret;
5668
5669 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5670 return -EFAULT;
5671
5672 /*
5673 * zero the full structure, so that a short copy will be nice.
5674 */
5675 memset(attr, 0, sizeof(*attr));
5676
5677 ret = get_user(size, &uattr->size);
5678 if (ret)
5679 return ret;
5680
5681 if (size > PAGE_SIZE) /* silly large */
5682 goto err_size;
5683
5684 if (!size) /* abi compat */
5685 size = PERF_ATTR_SIZE_VER0;
5686
5687 if (size < PERF_ATTR_SIZE_VER0)
5688 goto err_size;
5689
5690 /*
5691 * If we're handed a bigger struct than we know of,
5692 * ensure all the unknown bits are 0 - i.e. new
5693 * user-space does not rely on any kernel feature
5694 * extensions we dont know about yet.
5695 */
5696 if (size > sizeof(*attr)) {
5697 unsigned char __user *addr;
5698 unsigned char __user *end;
5699 unsigned char val;
5700
5701 addr = (void __user *)uattr + sizeof(*attr);
5702 end = (void __user *)uattr + size;
5703
5704 for (; addr < end; addr++) {
5705 ret = get_user(val, addr);
5706 if (ret)
5707 return ret;
5708 if (val)
5709 goto err_size;
5710 }
5711 size = sizeof(*attr);
5712 }
5713
5714 ret = copy_from_user(attr, uattr, size);
5715 if (ret)
5716 return -EFAULT;
5717
5718 /*
5719 * If the type exists, the corresponding creation will verify
5720 * the attr->config.
5721 */
5722 if (attr->type >= PERF_TYPE_MAX)
5723 return -EINVAL;
5724
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05305725 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005726 return -EINVAL;
5727
5728 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5729 return -EINVAL;
5730
5731 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5732 return -EINVAL;
5733
5734out:
5735 return ret;
5736
5737err_size:
5738 put_user(sizeof(*attr), &uattr->size);
5739 ret = -E2BIG;
5740 goto out;
5741}
5742
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005743static int
5744perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005745{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005746 struct perf_buffer *buffer = NULL, *old_buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005747 int ret = -EINVAL;
5748
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005749 if (!output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005750 goto set;
5751
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005752 /* don't allow circular references */
5753 if (event == output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005754 goto out;
5755
Peter Zijlstra0f139302010-05-20 14:35:15 +02005756 /*
5757 * Don't allow cross-cpu buffers
5758 */
5759 if (output_event->cpu != event->cpu)
5760 goto out;
5761
5762 /*
5763 * If its not a per-cpu buffer, it must be the same task.
5764 */
5765 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5766 goto out;
5767
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005768set:
5769 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005770 /* Can't redirect output if we've got an active mmap() */
5771 if (atomic_read(&event->mmap_count))
5772 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005773
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005774 if (output_event) {
5775 /* get the buffer we want to redirect to */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005776 buffer = perf_buffer_get(output_event);
5777 if (!buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005778 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005779 }
5780
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005781 old_buffer = event->buffer;
5782 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005783 ret = 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005784unlock:
5785 mutex_unlock(&event->mmap_mutex);
5786
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005787 if (old_buffer)
5788 perf_buffer_put(old_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005789out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005790 return ret;
5791}
5792
5793/**
5794 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5795 *
5796 * @attr_uptr: event_id type attributes for monitoring/sampling
5797 * @pid: target pid
5798 * @cpu: target cpu
5799 * @group_fd: group leader event fd
5800 */
5801SYSCALL_DEFINE5(perf_event_open,
5802 struct perf_event_attr __user *, attr_uptr,
5803 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5804{
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005805 struct perf_event *group_leader = NULL, *output_event = NULL;
5806 struct perf_event *event, *sibling;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005807 struct perf_event_attr attr;
5808 struct perf_event_context *ctx;
5809 struct file *event_file = NULL;
5810 struct file *group_file = NULL;
Matt Helsley38a81da2010-09-13 13:01:20 -07005811 struct task_struct *task = NULL;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005812 struct pmu *pmu;
Al Viroea635c62010-05-26 17:40:29 -04005813 int event_fd;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005814 int move_group = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005815 int fput_needed = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005816 int err;
5817
5818 /* for future expandability... */
5819 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5820 return -EINVAL;
5821
5822 err = perf_copy_attr(attr_uptr, &attr);
5823 if (err)
5824 return err;
5825
5826 if (!attr.exclude_kernel) {
5827 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5828 return -EACCES;
5829 }
5830
5831 if (attr.freq) {
5832 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5833 return -EINVAL;
5834 }
5835
Al Viroea635c62010-05-26 17:40:29 -04005836 event_fd = get_unused_fd_flags(O_RDWR);
5837 if (event_fd < 0)
5838 return event_fd;
5839
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005840 if (group_fd != -1) {
5841 group_leader = perf_fget_light(group_fd, &fput_needed);
5842 if (IS_ERR(group_leader)) {
5843 err = PTR_ERR(group_leader);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005844 goto err_fd;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005845 }
5846 group_file = group_leader->filp;
5847 if (flags & PERF_FLAG_FD_OUTPUT)
5848 output_event = group_leader;
5849 if (flags & PERF_FLAG_FD_NO_GROUP)
5850 group_leader = NULL;
5851 }
5852
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005853 if (pid != -1) {
5854 task = find_lively_task_by_vpid(pid);
5855 if (IS_ERR(task)) {
5856 err = PTR_ERR(task);
5857 goto err_group_fd;
5858 }
5859 }
5860
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005861 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005862 if (IS_ERR(event)) {
5863 err = PTR_ERR(event);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005864 goto err_task;
Stephane Eraniand14b12d2010-09-17 11:28:47 +02005865 }
5866
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005867 /*
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005868 * Special case software events and allow them to be part of
5869 * any hardware group.
5870 */
5871 pmu = event->pmu;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005872
5873 if (group_leader &&
5874 (is_software_event(event) != is_software_event(group_leader))) {
5875 if (is_software_event(event)) {
5876 /*
5877 * If event and group_leader are not both a software
5878 * event, and event is, then group leader is not.
5879 *
5880 * Allow the addition of software events to !software
5881 * groups, this is safe because software events never
5882 * fail to schedule.
5883 */
5884 pmu = group_leader->pmu;
5885 } else if (is_software_event(group_leader) &&
5886 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5887 /*
5888 * In case the group is a pure software group, and we
5889 * try to add a hardware event, move the whole group to
5890 * the hardware context.
5891 */
5892 move_group = 1;
5893 }
5894 }
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005895
5896 /*
5897 * Get the target context (task or percpu):
5898 */
Matt Helsley38a81da2010-09-13 13:01:20 -07005899 ctx = find_get_context(pmu, task, cpu);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005900 if (IS_ERR(ctx)) {
5901 err = PTR_ERR(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02005902 goto err_alloc;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005903 }
5904
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005905 /*
5906 * Look up the group leader (we will attach this event to it):
5907 */
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005908 if (group_leader) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005909 err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005910
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005911 /*
5912 * Do not allow a recursive hierarchy (this new sibling
5913 * becoming part of another group-sibling):
5914 */
5915 if (group_leader->group_leader != group_leader)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005916 goto err_context;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005917 /*
5918 * Do not allow to attach to a group in a different
5919 * task or CPU context:
5920 */
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005921 if (move_group) {
5922 if (group_leader->ctx->type != ctx->type)
5923 goto err_context;
5924 } else {
5925 if (group_leader->ctx != ctx)
5926 goto err_context;
5927 }
5928
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005929 /*
5930 * Only a group leader can be exclusive or pinned
5931 */
5932 if (attr.exclusive || attr.pinned)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005933 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005934 }
5935
5936 if (output_event) {
5937 err = perf_event_set_output(event, output_event);
5938 if (err)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005939 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005940 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005941
Al Viroea635c62010-05-26 17:40:29 -04005942 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5943 if (IS_ERR(event_file)) {
5944 err = PTR_ERR(event_file);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005945 goto err_context;
Al Viroea635c62010-05-26 17:40:29 -04005946 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005947
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005948 if (move_group) {
5949 struct perf_event_context *gctx = group_leader->ctx;
5950
5951 mutex_lock(&gctx->mutex);
5952 perf_event_remove_from_context(group_leader);
5953 list_for_each_entry(sibling, &group_leader->sibling_list,
5954 group_entry) {
5955 perf_event_remove_from_context(sibling);
5956 put_ctx(gctx);
5957 }
5958 mutex_unlock(&gctx->mutex);
5959 put_ctx(gctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005960 }
5961
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005962 event->filp = event_file;
5963 WARN_ON_ONCE(ctx->parent_ctx);
5964 mutex_lock(&ctx->mutex);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005965
5966 if (move_group) {
5967 perf_install_in_context(ctx, group_leader, cpu);
5968 get_ctx(ctx);
5969 list_for_each_entry(sibling, &group_leader->sibling_list,
5970 group_entry) {
5971 perf_install_in_context(ctx, sibling, cpu);
5972 get_ctx(ctx);
5973 }
5974 }
5975
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005976 perf_install_in_context(ctx, event, cpu);
5977 ++ctx->generation;
5978 mutex_unlock(&ctx->mutex);
5979
5980 event->owner = current;
Peter Zijlstra88821352010-11-09 19:01:43 +01005981
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005982 mutex_lock(&current->perf_event_mutex);
5983 list_add_tail(&event->owner_entry, &current->perf_event_list);
5984 mutex_unlock(&current->perf_event_mutex);
5985
Peter Zijlstra8a495422010-05-27 15:47:49 +02005986 /*
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02005987 * Precalculate sample_data sizes
5988 */
5989 perf_event__header_size(event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02005990 perf_event__id_header_size(event);
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02005991
5992 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +02005993 * Drop the reference on the group_event after placing the
5994 * new event on the sibling_list. This ensures destruction
5995 * of the group leader will find the pointer to itself in
5996 * perf_group_detach().
5997 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005998 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04005999 fd_install(event_fd, event_file);
6000 return event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006001
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006002err_context:
Al Viroea635c62010-05-26 17:40:29 -04006003 put_ctx(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02006004err_alloc:
6005 free_event(event);
Peter Zijlstrae7d0bc02010-10-14 16:54:51 +02006006err_task:
6007 if (task)
6008 put_task_struct(task);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02006009err_group_fd:
6010 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04006011err_fd:
6012 put_unused_fd(event_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006013 return err;
6014}
6015
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006016/**
6017 * perf_event_create_kernel_counter
6018 *
6019 * @attr: attributes of the counter to create
6020 * @cpu: cpu in which the counter is bound
Matt Helsley38a81da2010-09-13 13:01:20 -07006021 * @task: task to profile (NULL for percpu)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006022 */
6023struct perf_event *
6024perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Matt Helsley38a81da2010-09-13 13:01:20 -07006025 struct task_struct *task,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01006026 perf_overflow_handler_t overflow_handler)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006027{
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006028 struct perf_event_context *ctx;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006029 struct perf_event *event;
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006030 int err;
6031
6032 /*
6033 * Get the target context (task or percpu):
6034 */
6035
Peter Zijlstrad580ff82010-10-14 17:43:23 +02006036 event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006037 if (IS_ERR(event)) {
6038 err = PTR_ERR(event);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006039 goto err;
6040 }
6041
Matt Helsley38a81da2010-09-13 13:01:20 -07006042 ctx = find_get_context(event->pmu, task, cpu);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006043 if (IS_ERR(ctx)) {
6044 err = PTR_ERR(ctx);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006045 goto err_free;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006046 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006047
6048 event->filp = NULL;
6049 WARN_ON_ONCE(ctx->parent_ctx);
6050 mutex_lock(&ctx->mutex);
6051 perf_install_in_context(ctx, event, cpu);
6052 ++ctx->generation;
6053 mutex_unlock(&ctx->mutex);
6054
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006055 return event;
6056
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006057err_free:
6058 free_event(event);
6059err:
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006060 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006061}
6062EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6063
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006064static void sync_child_event(struct perf_event *child_event,
6065 struct task_struct *child)
6066{
6067 struct perf_event *parent_event = child_event->parent;
6068 u64 child_val;
6069
6070 if (child_event->attr.inherit_stat)
6071 perf_event_read_event(child_event, child);
6072
Peter Zijlstrab5e58792010-05-21 14:43:12 +02006073 child_val = perf_event_count(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006074
6075 /*
6076 * Add back the child's count to the parent's count:
6077 */
Peter Zijlstraa6e6dea2010-05-21 14:27:58 +02006078 atomic64_add(child_val, &parent_event->child_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006079 atomic64_add(child_event->total_time_enabled,
6080 &parent_event->child_total_time_enabled);
6081 atomic64_add(child_event->total_time_running,
6082 &parent_event->child_total_time_running);
6083
6084 /*
6085 * Remove this event from the parent's list
6086 */
6087 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6088 mutex_lock(&parent_event->child_mutex);
6089 list_del_init(&child_event->child_list);
6090 mutex_unlock(&parent_event->child_mutex);
6091
6092 /*
6093 * Release the parent event, if this was the last
6094 * reference to it.
6095 */
6096 fput(parent_event->filp);
6097}
6098
6099static void
6100__perf_event_exit_task(struct perf_event *child_event,
6101 struct perf_event_context *child_ctx,
6102 struct task_struct *child)
6103{
6104 struct perf_event *parent_event;
6105
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006106 perf_event_remove_from_context(child_event);
6107
6108 parent_event = child_event->parent;
6109 /*
6110 * It can happen that parent exits first, and has events
6111 * that are still around due to the child reference. These
6112 * events need to be zapped - but otherwise linger.
6113 */
6114 if (parent_event) {
6115 sync_child_event(child_event, child);
6116 free_event(child_event);
6117 }
6118}
6119
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006120static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006121{
6122 struct perf_event *child_event, *tmp;
6123 struct perf_event_context *child_ctx;
6124 unsigned long flags;
6125
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006126 if (likely(!child->perf_event_ctxp[ctxn])) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006127 perf_event_task(child, NULL, 0);
6128 return;
6129 }
6130
6131 local_irq_save(flags);
6132 /*
6133 * We can't reschedule here because interrupts are disabled,
6134 * and either child is current or it is a task that can't be
6135 * scheduled, so we are now safe from rescheduling changing
6136 * our context.
6137 */
Oleg Nesterov806839b2011-01-21 18:45:47 +01006138 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02006139 task_ctx_sched_out(child_ctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006140
6141 /*
6142 * Take the context lock here so that if find_get_context is
6143 * reading child->perf_event_ctxp, we wait until it has
6144 * incremented the context's refcount before we do put_ctx below.
6145 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01006146 raw_spin_lock(&child_ctx->lock);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006147 child->perf_event_ctxp[ctxn] = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006148 /*
6149 * If this context is a clone; unclone it so it can't get
6150 * swapped to another process while we're removing all
6151 * the events from it.
6152 */
6153 unclone_ctx(child_ctx);
Peter Zijlstra5e942bb2009-11-23 11:37:26 +01006154 update_context_time(child_ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01006155 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006156
6157 /*
6158 * Report the task dead after unscheduling the events so that we
6159 * won't get any samples after PERF_RECORD_EXIT. We can however still
6160 * get a few PERF_RECORD_READ events.
6161 */
6162 perf_event_task(child, child_ctx, 0);
6163
6164 /*
6165 * We can recurse on the same lock type through:
6166 *
6167 * __perf_event_exit_task()
6168 * sync_child_event()
6169 * fput(parent_event->filp)
6170 * perf_release()
6171 * mutex_lock(&ctx->mutex)
6172 *
6173 * But since its the parent context it won't be the same instance.
6174 */
Peter Zijlstraa0507c82010-05-06 15:42:53 +02006175 mutex_lock(&child_ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006176
6177again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006178 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6179 group_entry)
6180 __perf_event_exit_task(child_event, child_ctx, child);
6181
6182 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006183 group_entry)
6184 __perf_event_exit_task(child_event, child_ctx, child);
6185
6186 /*
6187 * If the last event was a group event, it will have appended all
6188 * its siblings to the list, but we obtained 'tmp' before that which
6189 * will still point to the list head terminating the iteration.
6190 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006191 if (!list_empty(&child_ctx->pinned_groups) ||
6192 !list_empty(&child_ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006193 goto again;
6194
6195 mutex_unlock(&child_ctx->mutex);
6196
6197 put_ctx(child_ctx);
6198}
6199
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006200/*
6201 * When a child task exits, feed back event values to parent events.
6202 */
6203void perf_event_exit_task(struct task_struct *child)
6204{
Peter Zijlstra88821352010-11-09 19:01:43 +01006205 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006206 int ctxn;
6207
Peter Zijlstra88821352010-11-09 19:01:43 +01006208 mutex_lock(&child->perf_event_mutex);
6209 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6210 owner_entry) {
6211 list_del_init(&event->owner_entry);
6212
6213 /*
6214 * Ensure the list deletion is visible before we clear
6215 * the owner, closes a race against perf_release() where
6216 * we need to serialize on the owner->perf_event_mutex.
6217 */
6218 smp_wmb();
6219 event->owner = NULL;
6220 }
6221 mutex_unlock(&child->perf_event_mutex);
6222
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006223 for_each_task_context_nr(ctxn)
6224 perf_event_exit_task_context(child, ctxn);
6225}
6226
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006227static void perf_free_event(struct perf_event *event,
6228 struct perf_event_context *ctx)
6229{
6230 struct perf_event *parent = event->parent;
6231
6232 if (WARN_ON_ONCE(!parent))
6233 return;
6234
6235 mutex_lock(&parent->child_mutex);
6236 list_del_init(&event->child_list);
6237 mutex_unlock(&parent->child_mutex);
6238
6239 fput(parent->filp);
6240
Peter Zijlstra8a495422010-05-27 15:47:49 +02006241 perf_group_detach(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006242 list_del_event(event, ctx);
6243 free_event(event);
6244}
6245
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006246/*
6247 * free an unexposed, unused context as created by inheritance by
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006248 * perf_event_init_task below, used by fork() in case of fail.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006249 */
6250void perf_event_free_task(struct task_struct *task)
6251{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006252 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006253 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006254 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006255
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006256 for_each_task_context_nr(ctxn) {
6257 ctx = task->perf_event_ctxp[ctxn];
6258 if (!ctx)
6259 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006260
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006261 mutex_lock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006262again:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006263 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6264 group_entry)
6265 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006266
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006267 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6268 group_entry)
6269 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006270
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006271 if (!list_empty(&ctx->pinned_groups) ||
6272 !list_empty(&ctx->flexible_groups))
6273 goto again;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006274
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006275 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006276
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006277 put_ctx(ctx);
6278 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006279}
6280
Peter Zijlstra4e231c72010-09-09 21:01:59 +02006281void perf_event_delayed_put(struct task_struct *task)
6282{
6283 int ctxn;
6284
6285 for_each_task_context_nr(ctxn)
6286 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6287}
6288
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006289/*
6290 * inherit a event from parent task to child task:
6291 */
6292static struct perf_event *
6293inherit_event(struct perf_event *parent_event,
6294 struct task_struct *parent,
6295 struct perf_event_context *parent_ctx,
6296 struct task_struct *child,
6297 struct perf_event *group_leader,
6298 struct perf_event_context *child_ctx)
6299{
6300 struct perf_event *child_event;
Peter Zijlstracee010e2010-09-10 12:51:54 +02006301 unsigned long flags;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006302
6303 /*
6304 * Instead of creating recursive hierarchies of events,
6305 * we link inherited events back to the original parent,
6306 * which has a filp for sure, which we use as the reference
6307 * count:
6308 */
6309 if (parent_event->parent)
6310 parent_event = parent_event->parent;
6311
6312 child_event = perf_event_alloc(&parent_event->attr,
6313 parent_event->cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02006314 child,
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006315 group_leader, parent_event,
6316 NULL);
6317 if (IS_ERR(child_event))
6318 return child_event;
6319 get_ctx(child_ctx);
6320
6321 /*
6322 * Make the child state follow the state of the parent event,
6323 * not its attr.disabled bit. We hold the parent's mutex,
6324 * so we won't race with perf_event_{en, dis}able_family.
6325 */
6326 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6327 child_event->state = PERF_EVENT_STATE_INACTIVE;
6328 else
6329 child_event->state = PERF_EVENT_STATE_OFF;
6330
6331 if (parent_event->attr.freq) {
6332 u64 sample_period = parent_event->hw.sample_period;
6333 struct hw_perf_event *hwc = &child_event->hw;
6334
6335 hwc->sample_period = sample_period;
6336 hwc->last_period = sample_period;
6337
6338 local64_set(&hwc->period_left, sample_period);
6339 }
6340
6341 child_event->ctx = child_ctx;
6342 child_event->overflow_handler = parent_event->overflow_handler;
6343
6344 /*
Thomas Gleixner614b6782010-12-03 16:24:32 -02006345 * Precalculate sample_data sizes
6346 */
6347 perf_event__header_size(child_event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02006348 perf_event__id_header_size(child_event);
Thomas Gleixner614b6782010-12-03 16:24:32 -02006349
6350 /*
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006351 * Link it up in the child's context:
6352 */
Peter Zijlstracee010e2010-09-10 12:51:54 +02006353 raw_spin_lock_irqsave(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006354 add_event_to_ctx(child_event, child_ctx);
Peter Zijlstracee010e2010-09-10 12:51:54 +02006355 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006356
6357 /*
6358 * Get a reference to the parent filp - we will fput it
6359 * when the child event exits. This is safe to do because
6360 * we are in the parent and we know that the filp still
6361 * exists and has a nonzero count:
6362 */
6363 atomic_long_inc(&parent_event->filp->f_count);
6364
6365 /*
6366 * Link this into the parent event's child list
6367 */
6368 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6369 mutex_lock(&parent_event->child_mutex);
6370 list_add_tail(&child_event->child_list, &parent_event->child_list);
6371 mutex_unlock(&parent_event->child_mutex);
6372
6373 return child_event;
6374}
6375
6376static int inherit_group(struct perf_event *parent_event,
6377 struct task_struct *parent,
6378 struct perf_event_context *parent_ctx,
6379 struct task_struct *child,
6380 struct perf_event_context *child_ctx)
6381{
6382 struct perf_event *leader;
6383 struct perf_event *sub;
6384 struct perf_event *child_ctr;
6385
6386 leader = inherit_event(parent_event, parent, parent_ctx,
6387 child, NULL, child_ctx);
6388 if (IS_ERR(leader))
6389 return PTR_ERR(leader);
6390 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6391 child_ctr = inherit_event(sub, parent, parent_ctx,
6392 child, leader, child_ctx);
6393 if (IS_ERR(child_ctr))
6394 return PTR_ERR(child_ctr);
6395 }
6396 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006397}
6398
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006399static int
6400inherit_task_group(struct perf_event *event, struct task_struct *parent,
6401 struct perf_event_context *parent_ctx,
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006402 struct task_struct *child, int ctxn,
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006403 int *inherited_all)
6404{
6405 int ret;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006406 struct perf_event_context *child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006407
6408 if (!event->attr.inherit) {
6409 *inherited_all = 0;
6410 return 0;
6411 }
6412
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006413 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006414 if (!child_ctx) {
6415 /*
6416 * This is executed from the parent task context, so
6417 * inherit events that have been marked for cloning.
6418 * First allocate and initialize a context for the
6419 * child.
6420 */
6421
Peter Zijlstraeb184472010-09-07 15:55:13 +02006422 child_ctx = alloc_perf_context(event->pmu, child);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006423 if (!child_ctx)
6424 return -ENOMEM;
6425
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006426 child->perf_event_ctxp[ctxn] = child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006427 }
6428
6429 ret = inherit_group(event, parent, parent_ctx,
6430 child, child_ctx);
6431
6432 if (ret)
6433 *inherited_all = 0;
6434
6435 return ret;
6436}
6437
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006438/*
6439 * Initialize the perf_event context in task_struct
6440 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006441int perf_event_init_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006442{
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006443 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006444 struct perf_event_context *cloned_ctx;
6445 struct perf_event *event;
6446 struct task_struct *parent = current;
6447 int inherited_all = 1;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006448 unsigned long flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006449 int ret = 0;
6450
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006451 if (likely(!parent->perf_event_ctxp[ctxn]))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006452 return 0;
6453
6454 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006455 * If the parent's context is a clone, pin it so it won't get
6456 * swapped under us.
6457 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006458 parent_ctx = perf_pin_task_context(parent, ctxn);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006459
6460 /*
6461 * No need to check if parent_ctx != NULL here; since we saw
6462 * it non-NULL earlier, the only reason for it to become NULL
6463 * is if we exit, and since we're currently in the middle of
6464 * a fork we can't be exiting at the same time.
6465 */
6466
6467 /*
6468 * Lock the parent list. No need to lock the child - not PID
6469 * hashed yet and not running, so nobody can access it.
6470 */
6471 mutex_lock(&parent_ctx->mutex);
6472
6473 /*
6474 * We dont have to disable NMIs - we are only looking at
6475 * the list, not manipulating it:
6476 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006477 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006478 ret = inherit_task_group(event, parent, parent_ctx,
6479 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006480 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006481 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006482 }
6483
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006484 /*
6485 * We can't hold ctx->lock when iterating the ->flexible_group list due
6486 * to allocations, but we need to prevent rotation because
6487 * rotate_ctx() will change the list from interrupt context.
6488 */
6489 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6490 parent_ctx->rotate_disable = 1;
6491 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6492
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006493 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006494 ret = inherit_task_group(event, parent, parent_ctx,
6495 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006496 if (ret)
6497 break;
6498 }
6499
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006500 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6501 parent_ctx->rotate_disable = 0;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006502
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006503 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006504
Peter Zijlstra05cbaa22009-12-30 16:00:35 +01006505 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006506 /*
6507 * Mark the child context as a clone of the parent
6508 * context, or of whatever the parent is a clone of.
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006509 *
6510 * Note that if the parent is a clone, the holding of
6511 * parent_ctx->lock avoids it from being uncloned.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006512 */
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006513 cloned_ctx = parent_ctx->parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006514 if (cloned_ctx) {
6515 child_ctx->parent_ctx = cloned_ctx;
6516 child_ctx->parent_gen = parent_ctx->parent_gen;
6517 } else {
6518 child_ctx->parent_ctx = parent_ctx;
6519 child_ctx->parent_gen = parent_ctx->generation;
6520 }
6521 get_ctx(child_ctx->parent_ctx);
6522 }
6523
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006524 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006525 mutex_unlock(&parent_ctx->mutex);
6526
6527 perf_unpin_context(parent_ctx);
6528
6529 return ret;
6530}
6531
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006532/*
6533 * Initialize the perf_event context in task_struct
6534 */
6535int perf_event_init_task(struct task_struct *child)
6536{
6537 int ctxn, ret;
6538
Oleg Nesterov8550d7c2011-01-19 19:22:28 +01006539 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
6540 mutex_init(&child->perf_event_mutex);
6541 INIT_LIST_HEAD(&child->perf_event_list);
6542
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006543 for_each_task_context_nr(ctxn) {
6544 ret = perf_event_init_context(child, ctxn);
6545 if (ret)
6546 return ret;
6547 }
6548
6549 return 0;
6550}
6551
Paul Mackerras220b1402010-03-10 20:45:52 +11006552static void __init perf_event_init_all_cpus(void)
6553{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006554 struct swevent_htable *swhash;
Paul Mackerras220b1402010-03-10 20:45:52 +11006555 int cpu;
Paul Mackerras220b1402010-03-10 20:45:52 +11006556
6557 for_each_possible_cpu(cpu) {
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006558 swhash = &per_cpu(swevent_htable, cpu);
6559 mutex_init(&swhash->hlist_mutex);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006560 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
Paul Mackerras220b1402010-03-10 20:45:52 +11006561 }
6562}
6563
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006564static void __cpuinit perf_event_init_cpu(int cpu)
6565{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006566 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006567
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006568 mutex_lock(&swhash->hlist_mutex);
6569 if (swhash->hlist_refcount > 0) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006570 struct swevent_hlist *hlist;
6571
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006572 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6573 WARN_ON(!hlist);
6574 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006575 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006576 mutex_unlock(&swhash->hlist_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006577}
6578
Peter Zijlstrac2774432010-12-08 15:29:02 +01006579#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006580static void perf_pmu_rotate_stop(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006581{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006582 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6583
6584 WARN_ON(!irqs_disabled());
6585
6586 list_del_init(&cpuctx->rotation_list);
6587}
6588
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006589static void __perf_event_exit_context(void *__info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006590{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006591 struct perf_event_context *ctx = __info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006592 struct perf_event *event, *tmp;
6593
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006594 perf_pmu_rotate_stop(ctx->pmu);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02006595
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006596 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6597 __perf_event_remove_from_context(event);
6598 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006599 __perf_event_remove_from_context(event);
6600}
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006601
6602static void perf_event_exit_cpu_context(int cpu)
6603{
6604 struct perf_event_context *ctx;
6605 struct pmu *pmu;
6606 int idx;
6607
6608 idx = srcu_read_lock(&pmus_srcu);
6609 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra917bdd12010-09-17 11:28:49 +02006610 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006611
6612 mutex_lock(&ctx->mutex);
6613 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6614 mutex_unlock(&ctx->mutex);
6615 }
6616 srcu_read_unlock(&pmus_srcu, idx);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006617}
6618
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006619static void perf_event_exit_cpu(int cpu)
6620{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006621 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006622
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006623 mutex_lock(&swhash->hlist_mutex);
6624 swevent_hlist_release(swhash);
6625 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006626
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006627 perf_event_exit_cpu_context(cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006628}
6629#else
6630static inline void perf_event_exit_cpu(int cpu) { }
6631#endif
6632
Peter Zijlstrac2774432010-12-08 15:29:02 +01006633static int
6634perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
6635{
6636 int cpu;
6637
6638 for_each_online_cpu(cpu)
6639 perf_event_exit_cpu(cpu);
6640
6641 return NOTIFY_OK;
6642}
6643
6644/*
6645 * Run the perf reboot notifier at the very last possible moment so that
6646 * the generic watchdog code runs as long as possible.
6647 */
6648static struct notifier_block perf_reboot_notifier = {
6649 .notifier_call = perf_reboot,
6650 .priority = INT_MIN,
6651};
6652
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006653static int __cpuinit
6654perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6655{
6656 unsigned int cpu = (long)hcpu;
6657
Peter Zijlstra5e116372010-06-11 13:35:08 +02006658 switch (action & ~CPU_TASKS_FROZEN) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006659
6660 case CPU_UP_PREPARE:
Peter Zijlstra5e116372010-06-11 13:35:08 +02006661 case CPU_DOWN_FAILED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006662 perf_event_init_cpu(cpu);
6663 break;
6664
Peter Zijlstra5e116372010-06-11 13:35:08 +02006665 case CPU_UP_CANCELED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006666 case CPU_DOWN_PREPARE:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006667 perf_event_exit_cpu(cpu);
6668 break;
6669
6670 default:
6671 break;
6672 }
6673
6674 return NOTIFY_OK;
6675}
6676
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006677void __init perf_event_init(void)
6678{
Jason Wessel3c502e72010-11-04 17:33:01 -05006679 int ret;
6680
Peter Zijlstra2e80a822010-11-17 23:17:36 +01006681 idr_init(&pmu_idr);
6682
Paul Mackerras220b1402010-03-10 20:45:52 +11006683 perf_event_init_all_cpus();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02006684 init_srcu_struct(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01006685 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
6686 perf_pmu_register(&perf_cpu_clock, NULL, -1);
6687 perf_pmu_register(&perf_task_clock, NULL, -1);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02006688 perf_tp_register();
6689 perf_cpu_notifier(perf_cpu_notify);
Peter Zijlstrac2774432010-12-08 15:29:02 +01006690 register_reboot_notifier(&perf_reboot_notifier);
Jason Wessel3c502e72010-11-04 17:33:01 -05006691
6692 ret = init_hw_breakpoint();
6693 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006694}
Peter Zijlstraabe43402010-11-17 23:17:37 +01006695
6696static int __init perf_event_sysfs_init(void)
6697{
6698 struct pmu *pmu;
6699 int ret;
6700
6701 mutex_lock(&pmus_lock);
6702
6703 ret = bus_register(&pmu_bus);
6704 if (ret)
6705 goto unlock;
6706
6707 list_for_each_entry(pmu, &pmus, entry) {
6708 if (!pmu->name || pmu->type < 0)
6709 continue;
6710
6711 ret = pmu_dev_alloc(pmu);
6712 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
6713 }
6714 pmu_bus_running = 1;
6715 ret = 0;
6716
6717unlock:
6718 mutex_unlock(&pmus_lock);
6719
6720 return ret;
6721}
6722device_initcall(perf_event_sysfs_init);