blob: fc2701c99207e82159c15db5eb4239d4db2527d1 [file] [log] [blame]
Frederic Weisbecker76369132011-05-19 19:55:04 +02001/*
2 * Performance events ring-buffer code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12#include <linux/perf_event.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
15
16#include "internal.h"
17
18static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
19 unsigned long offset, unsigned long head)
20{
21 unsigned long mask;
22
23 if (!rb->writable)
24 return true;
25
26 mask = perf_data_size(rb) - 1;
27
28 offset = (offset - tail) & mask;
29 head = (head - tail) & mask;
30
31 if ((int)(head - offset) < 0)
32 return false;
33
34 return true;
35}
36
37static void perf_output_wakeup(struct perf_output_handle *handle)
38{
39 atomic_set(&handle->rb->poll, POLL_IN);
40
41 if (handle->nmi) {
42 handle->event->pending_wakeup = 1;
43 irq_work_queue(&handle->event->pending);
44 } else
45 perf_event_wakeup(handle->event);
46}
47
48/*
49 * We need to ensure a later event_id doesn't publish a head when a former
50 * event isn't done writing. However since we need to deal with NMIs we
51 * cannot fully serialize things.
52 *
53 * We only publish the head (and generate a wakeup) when the outer-most
54 * event completes.
55 */
56static void perf_output_get_handle(struct perf_output_handle *handle)
57{
58 struct ring_buffer *rb = handle->rb;
59
60 preempt_disable();
61 local_inc(&rb->nest);
62 handle->wakeup = local_read(&rb->wakeup);
63}
64
65static void perf_output_put_handle(struct perf_output_handle *handle)
66{
67 struct ring_buffer *rb = handle->rb;
68 unsigned long head;
69
70again:
71 head = local_read(&rb->head);
72
73 /*
74 * IRQ/NMI can happen here, which means we can miss a head update.
75 */
76
77 if (!local_dec_and_test(&rb->nest))
78 goto out;
79
80 /*
81 * Publish the known good head. Rely on the full barrier implied
82 * by atomic_dec_and_test() order the rb->head read and this
83 * write.
84 */
85 rb->user_page->data_head = head;
86
87 /*
88 * Now check if we missed an update, rely on the (compiler)
89 * barrier in atomic_dec_and_test() to re-read rb->head.
90 */
91 if (unlikely(head != local_read(&rb->head))) {
92 local_inc(&rb->nest);
93 goto again;
94 }
95
96 if (handle->wakeup != local_read(&rb->wakeup))
97 perf_output_wakeup(handle);
98
99out:
100 preempt_enable();
101}
102
103int perf_output_begin(struct perf_output_handle *handle,
104 struct perf_event *event, unsigned int size,
105 int nmi, int sample)
106{
107 struct ring_buffer *rb;
108 unsigned long tail, offset, head;
109 int have_lost;
110 struct perf_sample_data sample_data;
111 struct {
112 struct perf_event_header header;
113 u64 id;
114 u64 lost;
115 } lost_event;
116
117 rcu_read_lock();
118 /*
119 * For inherited events we send all the output towards the parent.
120 */
121 if (event->parent)
122 event = event->parent;
123
124 rb = rcu_dereference(event->rb);
125 if (!rb)
126 goto out;
127
128 handle->rb = rb;
129 handle->event = event;
130 handle->nmi = nmi;
131 handle->sample = sample;
132
133 if (!rb->nr_pages)
134 goto out;
135
136 have_lost = local_read(&rb->lost);
137 if (have_lost) {
138 lost_event.header.size = sizeof(lost_event);
139 perf_event_header__init_id(&lost_event.header, &sample_data,
140 event);
141 size += lost_event.header.size;
142 }
143
144 perf_output_get_handle(handle);
145
146 do {
147 /*
148 * Userspace could choose to issue a mb() before updating the
149 * tail pointer. So that all reads will be completed before the
150 * write is issued.
151 */
152 tail = ACCESS_ONCE(rb->user_page->data_tail);
153 smp_rmb();
154 offset = head = local_read(&rb->head);
155 head += size;
156 if (unlikely(!perf_output_space(rb, tail, offset, head)))
157 goto fail;
158 } while (local_cmpxchg(&rb->head, offset, head) != offset);
159
160 if (head - local_read(&rb->wakeup) > rb->watermark)
161 local_add(rb->watermark, &rb->wakeup);
162
163 handle->page = offset >> (PAGE_SHIFT + page_order(rb));
164 handle->page &= rb->nr_pages - 1;
165 handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
166 handle->addr = rb->data_pages[handle->page];
167 handle->addr += handle->size;
168 handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
169
170 if (have_lost) {
171 lost_event.header.type = PERF_RECORD_LOST;
172 lost_event.header.misc = 0;
173 lost_event.id = event->id;
174 lost_event.lost = local_xchg(&rb->lost, 0);
175
176 perf_output_put(handle, lost_event);
177 perf_event__output_id_sample(event, handle, &sample_data);
178 }
179
180 return 0;
181
182fail:
183 local_inc(&rb->lost);
184 perf_output_put_handle(handle);
185out:
186 rcu_read_unlock();
187
188 return -ENOSPC;
189}
190
191void perf_output_copy(struct perf_output_handle *handle,
192 const void *buf, unsigned int len)
193{
194 __output_copy(handle, buf, len);
195}
196
197void perf_output_end(struct perf_output_handle *handle)
198{
199 struct perf_event *event = handle->event;
200 struct ring_buffer *rb = handle->rb;
201
Vince Weaver4ec83632011-06-01 15:15:36 -0400202 if (handle->sample && !event->attr.watermark) {
203 int wakeup_events = event->attr.wakeup_events;
Frederic Weisbecker76369132011-05-19 19:55:04 +0200204
Vince Weaver4ec83632011-06-01 15:15:36 -0400205 if (wakeup_events) {
206 int events = local_inc_return(&rb->events);
207 if (events >= wakeup_events) {
208 local_sub(wakeup_events, &rb->events);
209 local_inc(&rb->wakeup);
210 }
Frederic Weisbecker76369132011-05-19 19:55:04 +0200211 }
212 }
213
214 perf_output_put_handle(handle);
215 rcu_read_unlock();
216}
217
218static void
219ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
220{
221 long max_size = perf_data_size(rb);
222
223 if (watermark)
224 rb->watermark = min(max_size, watermark);
225
226 if (!rb->watermark)
227 rb->watermark = max_size / 2;
228
229 if (flags & RING_BUFFER_WRITABLE)
230 rb->writable = 1;
231
232 atomic_set(&rb->refcount, 1);
233}
234
235#ifndef CONFIG_PERF_USE_VMALLOC
236
237/*
238 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
239 */
240
241struct page *
242perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
243{
244 if (pgoff > rb->nr_pages)
245 return NULL;
246
247 if (pgoff == 0)
248 return virt_to_page(rb->user_page);
249
250 return virt_to_page(rb->data_pages[pgoff - 1]);
251}
252
253static void *perf_mmap_alloc_page(int cpu)
254{
255 struct page *page;
256 int node;
257
258 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
259 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
260 if (!page)
261 return NULL;
262
263 return page_address(page);
264}
265
266struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
267{
268 struct ring_buffer *rb;
269 unsigned long size;
270 int i;
271
272 size = sizeof(struct ring_buffer);
273 size += nr_pages * sizeof(void *);
274
275 rb = kzalloc(size, GFP_KERNEL);
276 if (!rb)
277 goto fail;
278
279 rb->user_page = perf_mmap_alloc_page(cpu);
280 if (!rb->user_page)
281 goto fail_user_page;
282
283 for (i = 0; i < nr_pages; i++) {
284 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
285 if (!rb->data_pages[i])
286 goto fail_data_pages;
287 }
288
289 rb->nr_pages = nr_pages;
290
291 ring_buffer_init(rb, watermark, flags);
292
293 return rb;
294
295fail_data_pages:
296 for (i--; i >= 0; i--)
297 free_page((unsigned long)rb->data_pages[i]);
298
299 free_page((unsigned long)rb->user_page);
300
301fail_user_page:
302 kfree(rb);
303
304fail:
305 return NULL;
306}
307
308static void perf_mmap_free_page(unsigned long addr)
309{
310 struct page *page = virt_to_page((void *)addr);
311
312 page->mapping = NULL;
313 __free_page(page);
314}
315
316void rb_free(struct ring_buffer *rb)
317{
318 int i;
319
320 perf_mmap_free_page((unsigned long)rb->user_page);
321 for (i = 0; i < rb->nr_pages; i++)
322 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
323 kfree(rb);
324}
325
326#else
327
328struct page *
329perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
330{
331 if (pgoff > (1UL << page_order(rb)))
332 return NULL;
333
334 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
335}
336
337static void perf_mmap_unmark_page(void *addr)
338{
339 struct page *page = vmalloc_to_page(addr);
340
341 page->mapping = NULL;
342}
343
344static void rb_free_work(struct work_struct *work)
345{
346 struct ring_buffer *rb;
347 void *base;
348 int i, nr;
349
350 rb = container_of(work, struct ring_buffer, work);
351 nr = 1 << page_order(rb);
352
353 base = rb->user_page;
354 for (i = 0; i < nr + 1; i++)
355 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
356
357 vfree(base);
358 kfree(rb);
359}
360
361void rb_free(struct ring_buffer *rb)
362{
363 schedule_work(&rb->work);
364}
365
366struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
367{
368 struct ring_buffer *rb;
369 unsigned long size;
370 void *all_buf;
371
372 size = sizeof(struct ring_buffer);
373 size += sizeof(void *);
374
375 rb = kzalloc(size, GFP_KERNEL);
376 if (!rb)
377 goto fail;
378
379 INIT_WORK(&rb->work, rb_free_work);
380
381 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
382 if (!all_buf)
383 goto fail_all_buf;
384
385 rb->user_page = all_buf;
386 rb->data_pages[0] = all_buf + PAGE_SIZE;
387 rb->page_order = ilog2(nr_pages);
388 rb->nr_pages = 1;
389
390 ring_buffer_init(rb, watermark, flags);
391
392 return rb;
393
394fail_all_buf:
395 kfree(rb);
396
397fail:
398 return NULL;
399}
400
401#endif