blob: b08ee9f00c8d2e918a4adc3883a535cd80be760e [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
Steven Rostedta3583242008-11-11 15:01:42 -050019#include "trace.h"
20
21/* Global flag to disable all recording to ring buffers */
22static int ring_buffers_off __read_mostly;
23
24/**
25 * tracing_on - enable all tracing buffers
26 *
27 * This function enables all tracing buffers that may have been
28 * disabled with tracing_off.
29 */
30void tracing_on(void)
31{
32 ring_buffers_off = 0;
33}
34
35/**
36 * tracing_off - turn off all tracing buffers
37 *
38 * This function stops all tracing buffers from recording data.
39 * It does not disable any overhead the tracers themselves may
40 * be causing. This function simply causes all recording to
41 * the ring buffers to fail.
42 */
43void tracing_off(void)
44{
45 ring_buffers_off = 1;
46}
47
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040048/* Up this if you want to test the TIME_EXTENTS and normalization */
49#define DEBUG_SHIFT 0
50
51/* FIXME!!! */
52u64 ring_buffer_time_stamp(int cpu)
53{
54 /* shift to debug/test normalization and TIME_EXTENTS */
55 return sched_clock() << DEBUG_SHIFT;
56}
57
58void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
59{
60 /* Just stupid testing the normalize function and deltas */
61 *ts >>= DEBUG_SHIFT;
62}
63
64#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
65#define RB_ALIGNMENT_SHIFT 2
66#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
67#define RB_MAX_SMALL_DATA 28
68
69enum {
70 RB_LEN_TIME_EXTEND = 8,
71 RB_LEN_TIME_STAMP = 16,
72};
73
74/* inline for ring buffer fast paths */
75static inline unsigned
76rb_event_length(struct ring_buffer_event *event)
77{
78 unsigned length;
79
80 switch (event->type) {
81 case RINGBUF_TYPE_PADDING:
82 /* undefined */
83 return -1;
84
85 case RINGBUF_TYPE_TIME_EXTEND:
86 return RB_LEN_TIME_EXTEND;
87
88 case RINGBUF_TYPE_TIME_STAMP:
89 return RB_LEN_TIME_STAMP;
90
91 case RINGBUF_TYPE_DATA:
92 if (event->len)
93 length = event->len << RB_ALIGNMENT_SHIFT;
94 else
95 length = event->array[0];
96 return length + RB_EVNT_HDR_SIZE;
97 default:
98 BUG();
99 }
100 /* not hit */
101 return 0;
102}
103
104/**
105 * ring_buffer_event_length - return the length of the event
106 * @event: the event to get the length of
107 */
108unsigned ring_buffer_event_length(struct ring_buffer_event *event)
109{
110 return rb_event_length(event);
111}
112
113/* inline for ring buffer fast paths */
114static inline void *
115rb_event_data(struct ring_buffer_event *event)
116{
117 BUG_ON(event->type != RINGBUF_TYPE_DATA);
118 /* If length is in len field, then array[0] has the data */
119 if (event->len)
120 return (void *)&event->array[0];
121 /* Otherwise length is in array[0] and array[1] has the data */
122 return (void *)&event->array[1];
123}
124
125/**
126 * ring_buffer_event_data - return the data of the event
127 * @event: the event to get the data from
128 */
129void *ring_buffer_event_data(struct ring_buffer_event *event)
130{
131 return rb_event_data(event);
132}
133
134#define for_each_buffer_cpu(buffer, cpu) \
135 for_each_cpu_mask(cpu, buffer->cpumask)
136
137#define TS_SHIFT 27
138#define TS_MASK ((1ULL << TS_SHIFT) - 1)
139#define TS_DELTA_TEST (~TS_MASK)
140
141/*
142 * This hack stolen from mm/slob.c.
143 * We can store per page timing information in the page frame of the page.
144 * Thanks to Peter Zijlstra for suggesting this idea.
145 */
146struct buffer_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400147 u64 time_stamp; /* page time stamp */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400148 local_t write; /* index for next write */
149 local_t commit; /* write commited index */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400150 unsigned read; /* index for next read */
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400151 struct list_head list; /* list of free pages */
152 void *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400153};
154
155/*
Steven Rostedted568292008-09-29 23:02:40 -0400156 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
157 * this issue out.
158 */
159static inline void free_buffer_page(struct buffer_page *bpage)
160{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400161 if (bpage->page)
Steven Rostedt6ae2a072008-10-13 10:22:06 -0400162 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400163 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400164}
165
166/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400167 * We need to fit the time_stamp delta into 27 bits.
168 */
169static inline int test_time_stamp(u64 delta)
170{
171 if (delta & TS_DELTA_TEST)
172 return 1;
173 return 0;
174}
175
176#define BUF_PAGE_SIZE PAGE_SIZE
177
178/*
179 * head_page == tail_page && head == tail then buffer is empty.
180 */
181struct ring_buffer_per_cpu {
182 int cpu;
183 struct ring_buffer *buffer;
184 spinlock_t lock;
185 struct lock_class_key lock_key;
186 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400187 struct buffer_page *head_page; /* read from head */
188 struct buffer_page *tail_page; /* write to tail */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400189 struct buffer_page *commit_page; /* commited pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400190 struct buffer_page *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400191 unsigned long overrun;
192 unsigned long entries;
193 u64 write_stamp;
194 u64 read_stamp;
195 atomic_t record_disabled;
196};
197
198struct ring_buffer {
199 unsigned long size;
200 unsigned pages;
201 unsigned flags;
202 int cpus;
203 cpumask_t cpumask;
204 atomic_t record_disabled;
205
206 struct mutex mutex;
207
208 struct ring_buffer_per_cpu **buffers;
209};
210
211struct ring_buffer_iter {
212 struct ring_buffer_per_cpu *cpu_buffer;
213 unsigned long head;
214 struct buffer_page *head_page;
215 u64 read_stamp;
216};
217
Steven Rostedtbf41a152008-10-04 02:00:59 -0400218#define RB_WARN_ON(buffer, cond) \
219 do { \
220 if (unlikely(cond)) { \
221 atomic_inc(&buffer->record_disabled); \
222 WARN_ON(1); \
223 } \
224 } while (0)
225
226#define RB_WARN_ON_RET(buffer, cond) \
227 do { \
228 if (unlikely(cond)) { \
229 atomic_inc(&buffer->record_disabled); \
230 WARN_ON(1); \
231 return -1; \
232 } \
233 } while (0)
234
235#define RB_WARN_ON_ONCE(buffer, cond) \
236 do { \
237 static int once; \
238 if (unlikely(cond) && !once) { \
239 once++; \
240 atomic_inc(&buffer->record_disabled); \
241 WARN_ON(1); \
242 } \
243 } while (0)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400244
245/**
246 * check_pages - integrity check of buffer pages
247 * @cpu_buffer: CPU buffer with pages to test
248 *
249 * As a safty measure we check to make sure the data pages have not
250 * been corrupted.
251 */
252static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
253{
254 struct list_head *head = &cpu_buffer->pages;
255 struct buffer_page *page, *tmp;
256
Steven Rostedtbf41a152008-10-04 02:00:59 -0400257 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
258 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400259
260 list_for_each_entry_safe(page, tmp, head, list) {
Steven Rostedtbf41a152008-10-04 02:00:59 -0400261 RB_WARN_ON_RET(cpu_buffer,
262 page->list.next->prev != &page->list);
263 RB_WARN_ON_RET(cpu_buffer,
264 page->list.prev->next != &page->list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400265 }
266
267 return 0;
268}
269
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400270static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
271 unsigned nr_pages)
272{
273 struct list_head *head = &cpu_buffer->pages;
274 struct buffer_page *page, *tmp;
275 unsigned long addr;
276 LIST_HEAD(pages);
277 unsigned i;
278
279 for (i = 0; i < nr_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400280 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400281 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400282 if (!page)
283 goto free_pages;
284 list_add(&page->list, &pages);
285
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400286 addr = __get_free_page(GFP_KERNEL);
287 if (!addr)
288 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400289 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400290 }
291
292 list_splice(&pages, head);
293
294 rb_check_pages(cpu_buffer);
295
296 return 0;
297
298 free_pages:
299 list_for_each_entry_safe(page, tmp, &pages, list) {
300 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400301 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400302 }
303 return -ENOMEM;
304}
305
306static struct ring_buffer_per_cpu *
307rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
308{
309 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400310 struct buffer_page *page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400311 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400312 int ret;
313
314 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
315 GFP_KERNEL, cpu_to_node(cpu));
316 if (!cpu_buffer)
317 return NULL;
318
319 cpu_buffer->cpu = cpu;
320 cpu_buffer->buffer = buffer;
321 spin_lock_init(&cpu_buffer->lock);
322 INIT_LIST_HEAD(&cpu_buffer->pages);
323
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400324 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
325 GFP_KERNEL, cpu_to_node(cpu));
326 if (!page)
327 goto fail_free_buffer;
328
329 cpu_buffer->reader_page = page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400330 addr = __get_free_page(GFP_KERNEL);
331 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400332 goto fail_free_reader;
333 page->page = (void *)addr;
334
Steven Rostedtd7690412008-10-01 00:29:53 -0400335 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400336
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400337 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
338 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400339 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400340
341 cpu_buffer->head_page
342 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400343 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400344
345 return cpu_buffer;
346
Steven Rostedtd7690412008-10-01 00:29:53 -0400347 fail_free_reader:
348 free_buffer_page(cpu_buffer->reader_page);
349
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400350 fail_free_buffer:
351 kfree(cpu_buffer);
352 return NULL;
353}
354
355static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
356{
357 struct list_head *head = &cpu_buffer->pages;
358 struct buffer_page *page, *tmp;
359
Steven Rostedtd7690412008-10-01 00:29:53 -0400360 list_del_init(&cpu_buffer->reader_page->list);
361 free_buffer_page(cpu_buffer->reader_page);
362
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400363 list_for_each_entry_safe(page, tmp, head, list) {
364 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400365 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400366 }
367 kfree(cpu_buffer);
368}
369
Steven Rostedta7b13742008-09-29 23:02:39 -0400370/*
371 * Causes compile errors if the struct buffer_page gets bigger
372 * than the struct page.
373 */
374extern int ring_buffer_page_too_big(void);
375
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400376/**
377 * ring_buffer_alloc - allocate a new ring_buffer
378 * @size: the size in bytes that is needed.
379 * @flags: attributes to set for the ring buffer.
380 *
381 * Currently the only flag that is available is the RB_FL_OVERWRITE
382 * flag. This flag means that the buffer will overwrite old data
383 * when the buffer wraps. If this flag is not set, the buffer will
384 * drop data when the tail hits the head.
385 */
386struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
387{
388 struct ring_buffer *buffer;
389 int bsize;
390 int cpu;
391
Steven Rostedta7b13742008-09-29 23:02:39 -0400392 /* Paranoid! Optimizes out when all is well */
393 if (sizeof(struct buffer_page) > sizeof(struct page))
394 ring_buffer_page_too_big();
395
396
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400397 /* keep it in its own cache line */
398 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
399 GFP_KERNEL);
400 if (!buffer)
401 return NULL;
402
403 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
404 buffer->flags = flags;
405
406 /* need at least two pages */
407 if (buffer->pages == 1)
408 buffer->pages++;
409
410 buffer->cpumask = cpu_possible_map;
411 buffer->cpus = nr_cpu_ids;
412
413 bsize = sizeof(void *) * nr_cpu_ids;
414 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
415 GFP_KERNEL);
416 if (!buffer->buffers)
417 goto fail_free_buffer;
418
419 for_each_buffer_cpu(buffer, cpu) {
420 buffer->buffers[cpu] =
421 rb_allocate_cpu_buffer(buffer, cpu);
422 if (!buffer->buffers[cpu])
423 goto fail_free_buffers;
424 }
425
426 mutex_init(&buffer->mutex);
427
428 return buffer;
429
430 fail_free_buffers:
431 for_each_buffer_cpu(buffer, cpu) {
432 if (buffer->buffers[cpu])
433 rb_free_cpu_buffer(buffer->buffers[cpu]);
434 }
435 kfree(buffer->buffers);
436
437 fail_free_buffer:
438 kfree(buffer);
439 return NULL;
440}
441
442/**
443 * ring_buffer_free - free a ring buffer.
444 * @buffer: the buffer to free.
445 */
446void
447ring_buffer_free(struct ring_buffer *buffer)
448{
449 int cpu;
450
451 for_each_buffer_cpu(buffer, cpu)
452 rb_free_cpu_buffer(buffer->buffers[cpu]);
453
454 kfree(buffer);
455}
456
457static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
458
459static void
460rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
461{
462 struct buffer_page *page;
463 struct list_head *p;
464 unsigned i;
465
466 atomic_inc(&cpu_buffer->record_disabled);
467 synchronize_sched();
468
469 for (i = 0; i < nr_pages; i++) {
470 BUG_ON(list_empty(&cpu_buffer->pages));
471 p = cpu_buffer->pages.next;
472 page = list_entry(p, struct buffer_page, list);
473 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400474 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400475 }
476 BUG_ON(list_empty(&cpu_buffer->pages));
477
478 rb_reset_cpu(cpu_buffer);
479
480 rb_check_pages(cpu_buffer);
481
482 atomic_dec(&cpu_buffer->record_disabled);
483
484}
485
486static void
487rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
488 struct list_head *pages, unsigned nr_pages)
489{
490 struct buffer_page *page;
491 struct list_head *p;
492 unsigned i;
493
494 atomic_inc(&cpu_buffer->record_disabled);
495 synchronize_sched();
496
497 for (i = 0; i < nr_pages; i++) {
498 BUG_ON(list_empty(pages));
499 p = pages->next;
500 page = list_entry(p, struct buffer_page, list);
501 list_del_init(&page->list);
502 list_add_tail(&page->list, &cpu_buffer->pages);
503 }
504 rb_reset_cpu(cpu_buffer);
505
506 rb_check_pages(cpu_buffer);
507
508 atomic_dec(&cpu_buffer->record_disabled);
509}
510
511/**
512 * ring_buffer_resize - resize the ring buffer
513 * @buffer: the buffer to resize.
514 * @size: the new size.
515 *
516 * The tracer is responsible for making sure that the buffer is
517 * not being used while changing the size.
518 * Note: We may be able to change the above requirement by using
519 * RCU synchronizations.
520 *
521 * Minimum size is 2 * BUF_PAGE_SIZE.
522 *
523 * Returns -1 on failure.
524 */
525int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
526{
527 struct ring_buffer_per_cpu *cpu_buffer;
528 unsigned nr_pages, rm_pages, new_pages;
529 struct buffer_page *page, *tmp;
530 unsigned long buffer_size;
531 unsigned long addr;
532 LIST_HEAD(pages);
533 int i, cpu;
534
535 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
536 size *= BUF_PAGE_SIZE;
537 buffer_size = buffer->pages * BUF_PAGE_SIZE;
538
539 /* we need a minimum of two pages */
540 if (size < BUF_PAGE_SIZE * 2)
541 size = BUF_PAGE_SIZE * 2;
542
543 if (size == buffer_size)
544 return size;
545
546 mutex_lock(&buffer->mutex);
547
548 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
549
550 if (size < buffer_size) {
551
552 /* easy case, just free pages */
553 BUG_ON(nr_pages >= buffer->pages);
554
555 rm_pages = buffer->pages - nr_pages;
556
557 for_each_buffer_cpu(buffer, cpu) {
558 cpu_buffer = buffer->buffers[cpu];
559 rb_remove_pages(cpu_buffer, rm_pages);
560 }
561 goto out;
562 }
563
564 /*
565 * This is a bit more difficult. We only want to add pages
566 * when we can allocate enough for all CPUs. We do this
567 * by allocating all the pages and storing them on a local
568 * link list. If we succeed in our allocation, then we
569 * add these pages to the cpu_buffers. Otherwise we just free
570 * them all and return -ENOMEM;
571 */
572 BUG_ON(nr_pages <= buffer->pages);
573 new_pages = nr_pages - buffer->pages;
574
575 for_each_buffer_cpu(buffer, cpu) {
576 for (i = 0; i < new_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400577 page = kzalloc_node(ALIGN(sizeof(*page),
578 cache_line_size()),
579 GFP_KERNEL, cpu_to_node(cpu));
580 if (!page)
581 goto free_pages;
582 list_add(&page->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400583 addr = __get_free_page(GFP_KERNEL);
584 if (!addr)
585 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400586 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400587 }
588 }
589
590 for_each_buffer_cpu(buffer, cpu) {
591 cpu_buffer = buffer->buffers[cpu];
592 rb_insert_pages(cpu_buffer, &pages, new_pages);
593 }
594
595 BUG_ON(!list_empty(&pages));
596
597 out:
598 buffer->pages = nr_pages;
599 mutex_unlock(&buffer->mutex);
600
601 return size;
602
603 free_pages:
604 list_for_each_entry_safe(page, tmp, &pages, list) {
605 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400606 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400607 }
608 return -ENOMEM;
609}
610
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400611static inline int rb_null_event(struct ring_buffer_event *event)
612{
613 return event->type == RINGBUF_TYPE_PADDING;
614}
615
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400616static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400617{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400618 return page->page + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400619}
620
621static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400622rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400623{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400624 return __rb_page_index(cpu_buffer->reader_page,
625 cpu_buffer->reader_page->read);
626}
627
628static inline struct ring_buffer_event *
629rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
630{
631 return __rb_page_index(cpu_buffer->head_page,
632 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400633}
634
635static inline struct ring_buffer_event *
636rb_iter_head_event(struct ring_buffer_iter *iter)
637{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400638 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400639}
640
Steven Rostedtbf41a152008-10-04 02:00:59 -0400641static inline unsigned rb_page_write(struct buffer_page *bpage)
642{
643 return local_read(&bpage->write);
644}
645
646static inline unsigned rb_page_commit(struct buffer_page *bpage)
647{
648 return local_read(&bpage->commit);
649}
650
651/* Size is determined by what has been commited */
652static inline unsigned rb_page_size(struct buffer_page *bpage)
653{
654 return rb_page_commit(bpage);
655}
656
657static inline unsigned
658rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
659{
660 return rb_page_commit(cpu_buffer->commit_page);
661}
662
663static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
664{
665 return rb_page_commit(cpu_buffer->head_page);
666}
667
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400668/*
669 * When the tail hits the head and the buffer is in overwrite mode,
670 * the head jumps to the next page and all content on the previous
671 * page is discarded. But before doing so, we update the overrun
672 * variable of the buffer.
673 */
674static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
675{
676 struct ring_buffer_event *event;
677 unsigned long head;
678
679 for (head = 0; head < rb_head_size(cpu_buffer);
680 head += rb_event_length(event)) {
681
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400682 event = __rb_page_index(cpu_buffer->head_page, head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400683 BUG_ON(rb_null_event(event));
684 /* Only count data entries */
685 if (event->type != RINGBUF_TYPE_DATA)
686 continue;
687 cpu_buffer->overrun++;
688 cpu_buffer->entries--;
689 }
690}
691
692static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
693 struct buffer_page **page)
694{
695 struct list_head *p = (*page)->list.next;
696
697 if (p == &cpu_buffer->pages)
698 p = p->next;
699
700 *page = list_entry(p, struct buffer_page, list);
701}
702
Steven Rostedtbf41a152008-10-04 02:00:59 -0400703static inline unsigned
704rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400705{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400706 unsigned long addr = (unsigned long)event;
707
708 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400709}
710
Steven Rostedtbf41a152008-10-04 02:00:59 -0400711static inline int
712rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
713 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400714{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400715 unsigned long addr = (unsigned long)event;
716 unsigned long index;
717
718 index = rb_event_index(event);
719 addr &= PAGE_MASK;
720
721 return cpu_buffer->commit_page->page == (void *)addr &&
722 rb_commit_index(cpu_buffer) == index;
723}
724
725static inline void
726rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
727 struct ring_buffer_event *event)
728{
729 unsigned long addr = (unsigned long)event;
730 unsigned long index;
731
732 index = rb_event_index(event);
733 addr &= PAGE_MASK;
734
735 while (cpu_buffer->commit_page->page != (void *)addr) {
736 RB_WARN_ON(cpu_buffer,
737 cpu_buffer->commit_page == cpu_buffer->tail_page);
738 cpu_buffer->commit_page->commit =
739 cpu_buffer->commit_page->write;
740 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
741 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
742 }
743
744 /* Now set the commit to the event's index */
745 local_set(&cpu_buffer->commit_page->commit, index);
746}
747
748static inline void
749rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
750{
751 /*
752 * We only race with interrupts and NMIs on this CPU.
753 * If we own the commit event, then we can commit
754 * all others that interrupted us, since the interruptions
755 * are in stack format (they finish before they come
756 * back to us). This allows us to do a simple loop to
757 * assign the commit to the tail.
758 */
759 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
760 cpu_buffer->commit_page->commit =
761 cpu_buffer->commit_page->write;
762 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
763 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
764 /* add barrier to keep gcc from optimizing too much */
765 barrier();
766 }
767 while (rb_commit_index(cpu_buffer) !=
768 rb_page_write(cpu_buffer->commit_page)) {
769 cpu_buffer->commit_page->commit =
770 cpu_buffer->commit_page->write;
771 barrier();
772 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400773}
774
Steven Rostedtd7690412008-10-01 00:29:53 -0400775static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400776{
Steven Rostedtd7690412008-10-01 00:29:53 -0400777 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400778 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -0400779}
780
781static inline void rb_inc_iter(struct ring_buffer_iter *iter)
782{
783 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
784
785 /*
786 * The iterator could be on the reader page (it starts there).
787 * But the head could have moved, since the reader was
788 * found. Check for this case and assign the iterator
789 * to the head page instead of next.
790 */
791 if (iter->head_page == cpu_buffer->reader_page)
792 iter->head_page = cpu_buffer->head_page;
793 else
794 rb_inc_page(cpu_buffer, &iter->head_page);
795
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400796 iter->read_stamp = iter->head_page->time_stamp;
797 iter->head = 0;
798}
799
800/**
801 * ring_buffer_update_event - update event type and data
802 * @event: the even to update
803 * @type: the type of event
804 * @length: the size of the event field in the ring buffer
805 *
806 * Update the type and data fields of the event. The length
807 * is the actual size that is written to the ring buffer,
808 * and with this, we can determine what to place into the
809 * data field.
810 */
811static inline void
812rb_update_event(struct ring_buffer_event *event,
813 unsigned type, unsigned length)
814{
815 event->type = type;
816
817 switch (type) {
818
819 case RINGBUF_TYPE_PADDING:
820 break;
821
822 case RINGBUF_TYPE_TIME_EXTEND:
823 event->len =
824 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
825 >> RB_ALIGNMENT_SHIFT;
826 break;
827
828 case RINGBUF_TYPE_TIME_STAMP:
829 event->len =
830 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
831 >> RB_ALIGNMENT_SHIFT;
832 break;
833
834 case RINGBUF_TYPE_DATA:
835 length -= RB_EVNT_HDR_SIZE;
836 if (length > RB_MAX_SMALL_DATA) {
837 event->len = 0;
838 event->array[0] = length;
839 } else
840 event->len =
841 (length + (RB_ALIGNMENT-1))
842 >> RB_ALIGNMENT_SHIFT;
843 break;
844 default:
845 BUG();
846 }
847}
848
849static inline unsigned rb_calculate_event_length(unsigned length)
850{
851 struct ring_buffer_event event; /* Used only for sizeof array */
852
853 /* zero length can cause confusions */
854 if (!length)
855 length = 1;
856
857 if (length > RB_MAX_SMALL_DATA)
858 length += sizeof(event.array[0]);
859
860 length += RB_EVNT_HDR_SIZE;
861 length = ALIGN(length, RB_ALIGNMENT);
862
863 return length;
864}
865
866static struct ring_buffer_event *
867__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
868 unsigned type, unsigned long length, u64 *ts)
869{
Steven Rostedtd7690412008-10-01 00:29:53 -0400870 struct buffer_page *tail_page, *head_page, *reader_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400871 unsigned long tail, write;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400872 struct ring_buffer *buffer = cpu_buffer->buffer;
873 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400874 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400875
876 tail_page = cpu_buffer->tail_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400877 write = local_add_return(length, &tail_page->write);
878 tail = write - length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400879
Steven Rostedtbf41a152008-10-04 02:00:59 -0400880 /* See if we shot pass the end of this buffer page */
881 if (write > BUF_PAGE_SIZE) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400882 struct buffer_page *next_page = tail_page;
883
Steven Rostedtbf41a152008-10-04 02:00:59 -0400884 spin_lock_irqsave(&cpu_buffer->lock, flags);
885
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400886 rb_inc_page(cpu_buffer, &next_page);
887
Steven Rostedtd7690412008-10-01 00:29:53 -0400888 head_page = cpu_buffer->head_page;
889 reader_page = cpu_buffer->reader_page;
890
891 /* we grabbed the lock before incrementing */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400892 RB_WARN_ON(cpu_buffer, next_page == reader_page);
893
894 /*
895 * If for some reason, we had an interrupt storm that made
896 * it all the way around the buffer, bail, and warn
897 * about it.
898 */
899 if (unlikely(next_page == cpu_buffer->commit_page)) {
900 WARN_ON_ONCE(1);
901 goto out_unlock;
902 }
Steven Rostedtd7690412008-10-01 00:29:53 -0400903
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400904 if (next_page == head_page) {
Steven Rostedtd7690412008-10-01 00:29:53 -0400905 if (!(buffer->flags & RB_FL_OVERWRITE)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -0400906 /* reset write */
907 if (tail <= BUF_PAGE_SIZE)
908 local_set(&tail_page->write, tail);
909 goto out_unlock;
Steven Rostedtd7690412008-10-01 00:29:53 -0400910 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400911
Steven Rostedtbf41a152008-10-04 02:00:59 -0400912 /* tail_page has not moved yet? */
913 if (tail_page == cpu_buffer->tail_page) {
914 /* count overflows */
915 rb_update_overflow(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400916
Steven Rostedtbf41a152008-10-04 02:00:59 -0400917 rb_inc_page(cpu_buffer, &head_page);
918 cpu_buffer->head_page = head_page;
919 cpu_buffer->head_page->read = 0;
920 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400921 }
922
Steven Rostedtbf41a152008-10-04 02:00:59 -0400923 /*
924 * If the tail page is still the same as what we think
925 * it is, then it is up to us to update the tail
926 * pointer.
927 */
928 if (tail_page == cpu_buffer->tail_page) {
929 local_set(&next_page->write, 0);
930 local_set(&next_page->commit, 0);
931 cpu_buffer->tail_page = next_page;
932
933 /* reread the time stamp */
934 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
935 cpu_buffer->tail_page->time_stamp = *ts;
936 }
937
938 /*
939 * The actual tail page has moved forward.
940 */
941 if (tail < BUF_PAGE_SIZE) {
942 /* Mark the rest of the page with padding */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400943 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400944 event->type = RINGBUF_TYPE_PADDING;
945 }
946
Steven Rostedtbf41a152008-10-04 02:00:59 -0400947 if (tail <= BUF_PAGE_SIZE)
948 /* Set the write back to the previous setting */
949 local_set(&tail_page->write, tail);
950
951 /*
952 * If this was a commit entry that failed,
953 * increment that too
954 */
955 if (tail_page == cpu_buffer->commit_page &&
956 tail == rb_commit_index(cpu_buffer)) {
957 rb_set_commit_to_write(cpu_buffer);
958 }
959
960 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
961
962 /* fail and let the caller try again */
963 return ERR_PTR(-EAGAIN);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400964 }
965
Steven Rostedtbf41a152008-10-04 02:00:59 -0400966 /* We reserved something on the buffer */
967
968 BUG_ON(write > BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400969
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400970 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400971 rb_update_event(event, type, length);
972
Steven Rostedtbf41a152008-10-04 02:00:59 -0400973 /*
974 * If this is a commit and the tail is zero, then update
975 * this page's time stamp.
976 */
977 if (!tail && rb_is_commit(cpu_buffer, event))
978 cpu_buffer->commit_page->time_stamp = *ts;
979
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400980 return event;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400981
982 out_unlock:
983 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
984 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400985}
986
987static int
988rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
989 u64 *ts, u64 *delta)
990{
991 struct ring_buffer_event *event;
992 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400993 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400994
995 if (unlikely(*delta > (1ULL << 59) && !once++)) {
996 printk(KERN_WARNING "Delta way too big! %llu"
997 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +1100998 (unsigned long long)*delta,
999 (unsigned long long)*ts,
1000 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001001 WARN_ON(1);
1002 }
1003
1004 /*
1005 * The delta is too big, we to add a
1006 * new timestamp.
1007 */
1008 event = __rb_reserve_next(cpu_buffer,
1009 RINGBUF_TYPE_TIME_EXTEND,
1010 RB_LEN_TIME_EXTEND,
1011 ts);
1012 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001013 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001014
Steven Rostedtbf41a152008-10-04 02:00:59 -04001015 if (PTR_ERR(event) == -EAGAIN)
1016 return -EAGAIN;
1017
1018 /* Only a commited time event can update the write stamp */
1019 if (rb_is_commit(cpu_buffer, event)) {
1020 /*
1021 * If this is the first on the page, then we need to
1022 * update the page itself, and just put in a zero.
1023 */
1024 if (rb_event_index(event)) {
1025 event->time_delta = *delta & TS_MASK;
1026 event->array[0] = *delta >> TS_SHIFT;
1027 } else {
1028 cpu_buffer->commit_page->time_stamp = *ts;
1029 event->time_delta = 0;
1030 event->array[0] = 0;
1031 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001032 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001033 /* let the caller know this was the commit */
1034 ret = 1;
1035 } else {
1036 /* Darn, this is just wasted space */
1037 event->time_delta = 0;
1038 event->array[0] = 0;
1039 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001040 }
1041
Steven Rostedtbf41a152008-10-04 02:00:59 -04001042 *delta = 0;
1043
1044 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001045}
1046
1047static struct ring_buffer_event *
1048rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1049 unsigned type, unsigned long length)
1050{
1051 struct ring_buffer_event *event;
1052 u64 ts, delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001053 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001054 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001055
Steven Rostedtbf41a152008-10-04 02:00:59 -04001056 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001057 /*
1058 * We allow for interrupts to reenter here and do a trace.
1059 * If one does, it will cause this original code to loop
1060 * back here. Even with heavy interrupts happening, this
1061 * should only happen a few times in a row. If this happens
1062 * 1000 times in a row, there must be either an interrupt
1063 * storm or we have something buggy.
1064 * Bail!
1065 */
1066 if (unlikely(++nr_loops > 1000)) {
1067 RB_WARN_ON(cpu_buffer, 1);
1068 return NULL;
1069 }
1070
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001071 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1072
Steven Rostedtbf41a152008-10-04 02:00:59 -04001073 /*
1074 * Only the first commit can update the timestamp.
1075 * Yes there is a race here. If an interrupt comes in
1076 * just after the conditional and it traces too, then it
1077 * will also check the deltas. More than one timestamp may
1078 * also be made. But only the entry that did the actual
1079 * commit will be something other than zero.
1080 */
1081 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1082 rb_page_write(cpu_buffer->tail_page) ==
1083 rb_commit_index(cpu_buffer)) {
1084
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001085 delta = ts - cpu_buffer->write_stamp;
1086
Steven Rostedtbf41a152008-10-04 02:00:59 -04001087 /* make sure this delta is calculated here */
1088 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001089
Steven Rostedtbf41a152008-10-04 02:00:59 -04001090 /* Did the write stamp get updated already? */
1091 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt4143c5c2008-11-10 21:46:01 -05001092 delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001093
1094 if (test_time_stamp(delta)) {
1095
1096 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1097
1098 if (commit == -EBUSY)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001099 return NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001100
1101 if (commit == -EAGAIN)
1102 goto again;
1103
1104 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001105 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001106 } else
1107 /* Non commits have zero deltas */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001108 delta = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001109
1110 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001111 if (PTR_ERR(event) == -EAGAIN)
1112 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001113
Steven Rostedtbf41a152008-10-04 02:00:59 -04001114 if (!event) {
1115 if (unlikely(commit))
1116 /*
1117 * Ouch! We needed a timestamp and it was commited. But
1118 * we didn't get our event reserved.
1119 */
1120 rb_set_commit_to_write(cpu_buffer);
1121 return NULL;
1122 }
1123
1124 /*
1125 * If the timestamp was commited, make the commit our entry
1126 * now so that we will update it when needed.
1127 */
1128 if (commit)
1129 rb_set_commit_event(cpu_buffer, event);
1130 else if (!rb_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001131 delta = 0;
1132
1133 event->time_delta = delta;
1134
1135 return event;
1136}
1137
Steven Rostedtbf41a152008-10-04 02:00:59 -04001138static DEFINE_PER_CPU(int, rb_need_resched);
1139
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001140/**
1141 * ring_buffer_lock_reserve - reserve a part of the buffer
1142 * @buffer: the ring buffer to reserve from
1143 * @length: the length of the data to reserve (excluding event header)
1144 * @flags: a pointer to save the interrupt flags
1145 *
1146 * Returns a reseverd event on the ring buffer to copy directly to.
1147 * The user of this interface will need to get the body to write into
1148 * and can use the ring_buffer_event_data() interface.
1149 *
1150 * The length is the length of the data needed, not the event length
1151 * which also includes the event header.
1152 *
1153 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1154 * If NULL is returned, then nothing has been allocated or locked.
1155 */
1156struct ring_buffer_event *
1157ring_buffer_lock_reserve(struct ring_buffer *buffer,
1158 unsigned long length,
1159 unsigned long *flags)
1160{
1161 struct ring_buffer_per_cpu *cpu_buffer;
1162 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001163 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001164
Steven Rostedta3583242008-11-11 15:01:42 -05001165 if (ring_buffers_off)
1166 return NULL;
1167
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001168 if (atomic_read(&buffer->record_disabled))
1169 return NULL;
1170
Steven Rostedtbf41a152008-10-04 02:00:59 -04001171 /* If we are tracing schedule, we don't want to recurse */
1172 resched = need_resched();
1173 preempt_disable_notrace();
1174
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001175 cpu = raw_smp_processor_id();
1176
1177 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001178 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001179
1180 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001181
1182 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001183 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001184
1185 length = rb_calculate_event_length(length);
1186 if (length > BUF_PAGE_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001187 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001188
1189 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1190 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001191 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001192
Steven Rostedtbf41a152008-10-04 02:00:59 -04001193 /*
1194 * Need to store resched state on this cpu.
1195 * Only the first needs to.
1196 */
1197
1198 if (preempt_count() == 1)
1199 per_cpu(rb_need_resched, cpu) = resched;
1200
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001201 return event;
1202
Steven Rostedtd7690412008-10-01 00:29:53 -04001203 out:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001204 if (resched)
1205 preempt_enable_notrace();
1206 else
1207 preempt_enable_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001208 return NULL;
1209}
1210
1211static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1212 struct ring_buffer_event *event)
1213{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001214 cpu_buffer->entries++;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001215
1216 /* Only process further if we own the commit */
1217 if (!rb_is_commit(cpu_buffer, event))
1218 return;
1219
1220 cpu_buffer->write_stamp += event->time_delta;
1221
1222 rb_set_commit_to_write(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001223}
1224
1225/**
1226 * ring_buffer_unlock_commit - commit a reserved
1227 * @buffer: The buffer to commit to
1228 * @event: The event pointer to commit.
1229 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1230 *
1231 * This commits the data to the ring buffer, and releases any locks held.
1232 *
1233 * Must be paired with ring_buffer_lock_reserve.
1234 */
1235int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1236 struct ring_buffer_event *event,
1237 unsigned long flags)
1238{
1239 struct ring_buffer_per_cpu *cpu_buffer;
1240 int cpu = raw_smp_processor_id();
1241
1242 cpu_buffer = buffer->buffers[cpu];
1243
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001244 rb_commit(cpu_buffer, event);
1245
Steven Rostedtbf41a152008-10-04 02:00:59 -04001246 /*
1247 * Only the last preempt count needs to restore preemption.
1248 */
1249 if (preempt_count() == 1) {
1250 if (per_cpu(rb_need_resched, cpu))
1251 preempt_enable_no_resched_notrace();
1252 else
1253 preempt_enable_notrace();
1254 } else
1255 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001256
1257 return 0;
1258}
1259
1260/**
1261 * ring_buffer_write - write data to the buffer without reserving
1262 * @buffer: The ring buffer to write to.
1263 * @length: The length of the data being written (excluding the event header)
1264 * @data: The data to write to the buffer.
1265 *
1266 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1267 * one function. If you already have the data to write to the buffer, it
1268 * may be easier to simply call this function.
1269 *
1270 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1271 * and not the length of the event which would hold the header.
1272 */
1273int ring_buffer_write(struct ring_buffer *buffer,
1274 unsigned long length,
1275 void *data)
1276{
1277 struct ring_buffer_per_cpu *cpu_buffer;
1278 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001279 unsigned long event_length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001280 void *body;
1281 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001282 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001283
Steven Rostedta3583242008-11-11 15:01:42 -05001284 if (ring_buffers_off)
1285 return -EBUSY;
1286
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001287 if (atomic_read(&buffer->record_disabled))
1288 return -EBUSY;
1289
Steven Rostedtbf41a152008-10-04 02:00:59 -04001290 resched = need_resched();
1291 preempt_disable_notrace();
1292
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001293 cpu = raw_smp_processor_id();
1294
1295 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001296 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001297
1298 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001299
1300 if (atomic_read(&cpu_buffer->record_disabled))
1301 goto out;
1302
1303 event_length = rb_calculate_event_length(length);
1304 event = rb_reserve_next_event(cpu_buffer,
1305 RINGBUF_TYPE_DATA, event_length);
1306 if (!event)
1307 goto out;
1308
1309 body = rb_event_data(event);
1310
1311 memcpy(body, data, length);
1312
1313 rb_commit(cpu_buffer, event);
1314
1315 ret = 0;
1316 out:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001317 if (resched)
1318 preempt_enable_no_resched_notrace();
1319 else
1320 preempt_enable_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001321
1322 return ret;
1323}
1324
Steven Rostedtbf41a152008-10-04 02:00:59 -04001325static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1326{
1327 struct buffer_page *reader = cpu_buffer->reader_page;
1328 struct buffer_page *head = cpu_buffer->head_page;
1329 struct buffer_page *commit = cpu_buffer->commit_page;
1330
1331 return reader->read == rb_page_commit(reader) &&
1332 (commit == reader ||
1333 (commit == head &&
1334 head->read == rb_page_commit(commit)));
1335}
1336
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001337/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001338 * ring_buffer_record_disable - stop all writes into the buffer
1339 * @buffer: The ring buffer to stop writes to.
1340 *
1341 * This prevents all writes to the buffer. Any attempt to write
1342 * to the buffer after this will fail and return NULL.
1343 *
1344 * The caller should call synchronize_sched() after this.
1345 */
1346void ring_buffer_record_disable(struct ring_buffer *buffer)
1347{
1348 atomic_inc(&buffer->record_disabled);
1349}
1350
1351/**
1352 * ring_buffer_record_enable - enable writes to the buffer
1353 * @buffer: The ring buffer to enable writes
1354 *
1355 * Note, multiple disables will need the same number of enables
1356 * to truely enable the writing (much like preempt_disable).
1357 */
1358void ring_buffer_record_enable(struct ring_buffer *buffer)
1359{
1360 atomic_dec(&buffer->record_disabled);
1361}
1362
1363/**
1364 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1365 * @buffer: The ring buffer to stop writes to.
1366 * @cpu: The CPU buffer to stop
1367 *
1368 * This prevents all writes to the buffer. Any attempt to write
1369 * to the buffer after this will fail and return NULL.
1370 *
1371 * The caller should call synchronize_sched() after this.
1372 */
1373void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1374{
1375 struct ring_buffer_per_cpu *cpu_buffer;
1376
1377 if (!cpu_isset(cpu, buffer->cpumask))
1378 return;
1379
1380 cpu_buffer = buffer->buffers[cpu];
1381 atomic_inc(&cpu_buffer->record_disabled);
1382}
1383
1384/**
1385 * ring_buffer_record_enable_cpu - enable writes to the buffer
1386 * @buffer: The ring buffer to enable writes
1387 * @cpu: The CPU to enable.
1388 *
1389 * Note, multiple disables will need the same number of enables
1390 * to truely enable the writing (much like preempt_disable).
1391 */
1392void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1393{
1394 struct ring_buffer_per_cpu *cpu_buffer;
1395
1396 if (!cpu_isset(cpu, buffer->cpumask))
1397 return;
1398
1399 cpu_buffer = buffer->buffers[cpu];
1400 atomic_dec(&cpu_buffer->record_disabled);
1401}
1402
1403/**
1404 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1405 * @buffer: The ring buffer
1406 * @cpu: The per CPU buffer to get the entries from.
1407 */
1408unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1409{
1410 struct ring_buffer_per_cpu *cpu_buffer;
1411
1412 if (!cpu_isset(cpu, buffer->cpumask))
1413 return 0;
1414
1415 cpu_buffer = buffer->buffers[cpu];
1416 return cpu_buffer->entries;
1417}
1418
1419/**
1420 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1421 * @buffer: The ring buffer
1422 * @cpu: The per CPU buffer to get the number of overruns from
1423 */
1424unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1425{
1426 struct ring_buffer_per_cpu *cpu_buffer;
1427
1428 if (!cpu_isset(cpu, buffer->cpumask))
1429 return 0;
1430
1431 cpu_buffer = buffer->buffers[cpu];
1432 return cpu_buffer->overrun;
1433}
1434
1435/**
1436 * ring_buffer_entries - get the number of entries in a buffer
1437 * @buffer: The ring buffer
1438 *
1439 * Returns the total number of entries in the ring buffer
1440 * (all CPU entries)
1441 */
1442unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1443{
1444 struct ring_buffer_per_cpu *cpu_buffer;
1445 unsigned long entries = 0;
1446 int cpu;
1447
1448 /* if you care about this being correct, lock the buffer */
1449 for_each_buffer_cpu(buffer, cpu) {
1450 cpu_buffer = buffer->buffers[cpu];
1451 entries += cpu_buffer->entries;
1452 }
1453
1454 return entries;
1455}
1456
1457/**
1458 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1459 * @buffer: The ring buffer
1460 *
1461 * Returns the total number of overruns in the ring buffer
1462 * (all CPU entries)
1463 */
1464unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1465{
1466 struct ring_buffer_per_cpu *cpu_buffer;
1467 unsigned long overruns = 0;
1468 int cpu;
1469
1470 /* if you care about this being correct, lock the buffer */
1471 for_each_buffer_cpu(buffer, cpu) {
1472 cpu_buffer = buffer->buffers[cpu];
1473 overruns += cpu_buffer->overrun;
1474 }
1475
1476 return overruns;
1477}
1478
1479/**
1480 * ring_buffer_iter_reset - reset an iterator
1481 * @iter: The iterator to reset
1482 *
1483 * Resets the iterator, so that it will start from the beginning
1484 * again.
1485 */
1486void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1487{
1488 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1489
Steven Rostedtd7690412008-10-01 00:29:53 -04001490 /* Iterator usage is expected to have record disabled */
1491 if (list_empty(&cpu_buffer->reader_page->list)) {
1492 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001493 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001494 } else {
1495 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001496 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001497 }
1498 if (iter->head)
1499 iter->read_stamp = cpu_buffer->read_stamp;
1500 else
1501 iter->read_stamp = iter->head_page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001502}
1503
1504/**
1505 * ring_buffer_iter_empty - check if an iterator has no more to read
1506 * @iter: The iterator to check
1507 */
1508int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1509{
1510 struct ring_buffer_per_cpu *cpu_buffer;
1511
1512 cpu_buffer = iter->cpu_buffer;
1513
Steven Rostedtbf41a152008-10-04 02:00:59 -04001514 return iter->head_page == cpu_buffer->commit_page &&
1515 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001516}
1517
1518static void
1519rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1520 struct ring_buffer_event *event)
1521{
1522 u64 delta;
1523
1524 switch (event->type) {
1525 case RINGBUF_TYPE_PADDING:
1526 return;
1527
1528 case RINGBUF_TYPE_TIME_EXTEND:
1529 delta = event->array[0];
1530 delta <<= TS_SHIFT;
1531 delta += event->time_delta;
1532 cpu_buffer->read_stamp += delta;
1533 return;
1534
1535 case RINGBUF_TYPE_TIME_STAMP:
1536 /* FIXME: not implemented */
1537 return;
1538
1539 case RINGBUF_TYPE_DATA:
1540 cpu_buffer->read_stamp += event->time_delta;
1541 return;
1542
1543 default:
1544 BUG();
1545 }
1546 return;
1547}
1548
1549static void
1550rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1551 struct ring_buffer_event *event)
1552{
1553 u64 delta;
1554
1555 switch (event->type) {
1556 case RINGBUF_TYPE_PADDING:
1557 return;
1558
1559 case RINGBUF_TYPE_TIME_EXTEND:
1560 delta = event->array[0];
1561 delta <<= TS_SHIFT;
1562 delta += event->time_delta;
1563 iter->read_stamp += delta;
1564 return;
1565
1566 case RINGBUF_TYPE_TIME_STAMP:
1567 /* FIXME: not implemented */
1568 return;
1569
1570 case RINGBUF_TYPE_DATA:
1571 iter->read_stamp += event->time_delta;
1572 return;
1573
1574 default:
1575 BUG();
1576 }
1577 return;
1578}
1579
Steven Rostedtd7690412008-10-01 00:29:53 -04001580static struct buffer_page *
1581rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001582{
Steven Rostedtd7690412008-10-01 00:29:53 -04001583 struct buffer_page *reader = NULL;
1584 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001585 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001586
1587 spin_lock_irqsave(&cpu_buffer->lock, flags);
1588
1589 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001590 /*
1591 * This should normally only loop twice. But because the
1592 * start of the reader inserts an empty page, it causes
1593 * a case where we will loop three times. There should be no
1594 * reason to loop four times (that I know of).
1595 */
1596 if (unlikely(++nr_loops > 3)) {
1597 RB_WARN_ON(cpu_buffer, 1);
1598 reader = NULL;
1599 goto out;
1600 }
1601
Steven Rostedtd7690412008-10-01 00:29:53 -04001602 reader = cpu_buffer->reader_page;
1603
1604 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001605 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04001606 goto out;
1607
1608 /* Never should we have an index greater than the size */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001609 RB_WARN_ON(cpu_buffer,
1610 cpu_buffer->reader_page->read > rb_page_size(reader));
Steven Rostedtd7690412008-10-01 00:29:53 -04001611
1612 /* check if we caught up to the tail */
1613 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001614 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04001615 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001616
1617 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04001618 * Splice the empty reader page into the list around the head.
1619 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001620 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001621
Steven Rostedtd7690412008-10-01 00:29:53 -04001622 reader = cpu_buffer->head_page;
1623 cpu_buffer->reader_page->list.next = reader->list.next;
1624 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001625
1626 local_set(&cpu_buffer->reader_page->write, 0);
1627 local_set(&cpu_buffer->reader_page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04001628
1629 /* Make the reader page now replace the head */
1630 reader->list.prev->next = &cpu_buffer->reader_page->list;
1631 reader->list.next->prev = &cpu_buffer->reader_page->list;
1632
1633 /*
1634 * If the tail is on the reader, then we must set the head
1635 * to the inserted page, otherwise we set it one before.
1636 */
1637 cpu_buffer->head_page = cpu_buffer->reader_page;
1638
Steven Rostedtbf41a152008-10-04 02:00:59 -04001639 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04001640 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1641
1642 /* Finally update the reader page to the new head */
1643 cpu_buffer->reader_page = reader;
1644 rb_reset_reader_page(cpu_buffer);
1645
1646 goto again;
1647
1648 out:
1649 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1650
1651 return reader;
1652}
1653
1654static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1655{
1656 struct ring_buffer_event *event;
1657 struct buffer_page *reader;
1658 unsigned length;
1659
1660 reader = rb_get_reader_page(cpu_buffer);
1661
1662 /* This function should not be called when buffer is empty */
1663 BUG_ON(!reader);
1664
1665 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001666
1667 if (event->type == RINGBUF_TYPE_DATA)
1668 cpu_buffer->entries--;
1669
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001670 rb_update_read_stamp(cpu_buffer, event);
1671
Steven Rostedtd7690412008-10-01 00:29:53 -04001672 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001673 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001674}
1675
1676static void rb_advance_iter(struct ring_buffer_iter *iter)
1677{
1678 struct ring_buffer *buffer;
1679 struct ring_buffer_per_cpu *cpu_buffer;
1680 struct ring_buffer_event *event;
1681 unsigned length;
1682
1683 cpu_buffer = iter->cpu_buffer;
1684 buffer = cpu_buffer->buffer;
1685
1686 /*
1687 * Check if we are at the end of the buffer.
1688 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001689 if (iter->head >= rb_page_size(iter->head_page)) {
1690 BUG_ON(iter->head_page == cpu_buffer->commit_page);
Steven Rostedtd7690412008-10-01 00:29:53 -04001691 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001692 return;
1693 }
1694
1695 event = rb_iter_head_event(iter);
1696
1697 length = rb_event_length(event);
1698
1699 /*
1700 * This should not be called to advance the header if we are
1701 * at the tail of the buffer.
1702 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001703 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1704 (iter->head + length > rb_commit_index(cpu_buffer)));
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001705
1706 rb_update_iter_read_stamp(iter, event);
1707
1708 iter->head += length;
1709
1710 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001711 if ((iter->head >= rb_page_size(iter->head_page)) &&
1712 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001713 rb_advance_iter(iter);
1714}
1715
1716/**
1717 * ring_buffer_peek - peek at the next event to be read
1718 * @buffer: The ring buffer to read
1719 * @cpu: The cpu to peak at
1720 * @ts: The timestamp counter of this event.
1721 *
1722 * This will return the event that will be read next, but does
1723 * not consume the data.
1724 */
1725struct ring_buffer_event *
1726ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1727{
1728 struct ring_buffer_per_cpu *cpu_buffer;
1729 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04001730 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001731 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001732
1733 if (!cpu_isset(cpu, buffer->cpumask))
1734 return NULL;
1735
1736 cpu_buffer = buffer->buffers[cpu];
1737
1738 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001739 /*
1740 * We repeat when a timestamp is encountered. It is possible
1741 * to get multiple timestamps from an interrupt entering just
1742 * as one timestamp is about to be written. The max times
1743 * that this can happen is the number of nested interrupts we
1744 * can have. Nesting 10 deep of interrupts is clearly
1745 * an anomaly.
1746 */
1747 if (unlikely(++nr_loops > 10)) {
1748 RB_WARN_ON(cpu_buffer, 1);
1749 return NULL;
1750 }
1751
Steven Rostedtd7690412008-10-01 00:29:53 -04001752 reader = rb_get_reader_page(cpu_buffer);
1753 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001754 return NULL;
1755
Steven Rostedtd7690412008-10-01 00:29:53 -04001756 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001757
1758 switch (event->type) {
1759 case RINGBUF_TYPE_PADDING:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001760 RB_WARN_ON(cpu_buffer, 1);
Steven Rostedtd7690412008-10-01 00:29:53 -04001761 rb_advance_reader(cpu_buffer);
1762 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001763
1764 case RINGBUF_TYPE_TIME_EXTEND:
1765 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04001766 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001767 goto again;
1768
1769 case RINGBUF_TYPE_TIME_STAMP:
1770 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04001771 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001772 goto again;
1773
1774 case RINGBUF_TYPE_DATA:
1775 if (ts) {
1776 *ts = cpu_buffer->read_stamp + event->time_delta;
1777 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1778 }
1779 return event;
1780
1781 default:
1782 BUG();
1783 }
1784
1785 return NULL;
1786}
1787
1788/**
1789 * ring_buffer_iter_peek - peek at the next event to be read
1790 * @iter: The ring buffer iterator
1791 * @ts: The timestamp counter of this event.
1792 *
1793 * This will return the event that will be read next, but does
1794 * not increment the iterator.
1795 */
1796struct ring_buffer_event *
1797ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1798{
1799 struct ring_buffer *buffer;
1800 struct ring_buffer_per_cpu *cpu_buffer;
1801 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001802 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001803
1804 if (ring_buffer_iter_empty(iter))
1805 return NULL;
1806
1807 cpu_buffer = iter->cpu_buffer;
1808 buffer = cpu_buffer->buffer;
1809
1810 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001811 /*
1812 * We repeat when a timestamp is encountered. It is possible
1813 * to get multiple timestamps from an interrupt entering just
1814 * as one timestamp is about to be written. The max times
1815 * that this can happen is the number of nested interrupts we
1816 * can have. Nesting 10 deep of interrupts is clearly
1817 * an anomaly.
1818 */
1819 if (unlikely(++nr_loops > 10)) {
1820 RB_WARN_ON(cpu_buffer, 1);
1821 return NULL;
1822 }
1823
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001824 if (rb_per_cpu_empty(cpu_buffer))
1825 return NULL;
1826
1827 event = rb_iter_head_event(iter);
1828
1829 switch (event->type) {
1830 case RINGBUF_TYPE_PADDING:
Steven Rostedtd7690412008-10-01 00:29:53 -04001831 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001832 goto again;
1833
1834 case RINGBUF_TYPE_TIME_EXTEND:
1835 /* Internal data, OK to advance */
1836 rb_advance_iter(iter);
1837 goto again;
1838
1839 case RINGBUF_TYPE_TIME_STAMP:
1840 /* FIXME: not implemented */
1841 rb_advance_iter(iter);
1842 goto again;
1843
1844 case RINGBUF_TYPE_DATA:
1845 if (ts) {
1846 *ts = iter->read_stamp + event->time_delta;
1847 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1848 }
1849 return event;
1850
1851 default:
1852 BUG();
1853 }
1854
1855 return NULL;
1856}
1857
1858/**
1859 * ring_buffer_consume - return an event and consume it
1860 * @buffer: The ring buffer to get the next event from
1861 *
1862 * Returns the next event in the ring buffer, and that event is consumed.
1863 * Meaning, that sequential reads will keep returning a different event,
1864 * and eventually empty the ring buffer if the producer is slower.
1865 */
1866struct ring_buffer_event *
1867ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1868{
1869 struct ring_buffer_per_cpu *cpu_buffer;
1870 struct ring_buffer_event *event;
1871
1872 if (!cpu_isset(cpu, buffer->cpumask))
1873 return NULL;
1874
1875 event = ring_buffer_peek(buffer, cpu, ts);
1876 if (!event)
1877 return NULL;
1878
1879 cpu_buffer = buffer->buffers[cpu];
Steven Rostedtd7690412008-10-01 00:29:53 -04001880 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001881
1882 return event;
1883}
1884
1885/**
1886 * ring_buffer_read_start - start a non consuming read of the buffer
1887 * @buffer: The ring buffer to read from
1888 * @cpu: The cpu buffer to iterate over
1889 *
1890 * This starts up an iteration through the buffer. It also disables
1891 * the recording to the buffer until the reading is finished.
1892 * This prevents the reading from being corrupted. This is not
1893 * a consuming read, so a producer is not expected.
1894 *
1895 * Must be paired with ring_buffer_finish.
1896 */
1897struct ring_buffer_iter *
1898ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1899{
1900 struct ring_buffer_per_cpu *cpu_buffer;
1901 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04001902 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001903
1904 if (!cpu_isset(cpu, buffer->cpumask))
1905 return NULL;
1906
1907 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1908 if (!iter)
1909 return NULL;
1910
1911 cpu_buffer = buffer->buffers[cpu];
1912
1913 iter->cpu_buffer = cpu_buffer;
1914
1915 atomic_inc(&cpu_buffer->record_disabled);
1916 synchronize_sched();
1917
Steven Rostedtd7690412008-10-01 00:29:53 -04001918 spin_lock_irqsave(&cpu_buffer->lock, flags);
1919 ring_buffer_iter_reset(iter);
1920 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001921
1922 return iter;
1923}
1924
1925/**
1926 * ring_buffer_finish - finish reading the iterator of the buffer
1927 * @iter: The iterator retrieved by ring_buffer_start
1928 *
1929 * This re-enables the recording to the buffer, and frees the
1930 * iterator.
1931 */
1932void
1933ring_buffer_read_finish(struct ring_buffer_iter *iter)
1934{
1935 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1936
1937 atomic_dec(&cpu_buffer->record_disabled);
1938 kfree(iter);
1939}
1940
1941/**
1942 * ring_buffer_read - read the next item in the ring buffer by the iterator
1943 * @iter: The ring buffer iterator
1944 * @ts: The time stamp of the event read.
1945 *
1946 * This reads the next event in the ring buffer and increments the iterator.
1947 */
1948struct ring_buffer_event *
1949ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1950{
1951 struct ring_buffer_event *event;
1952
1953 event = ring_buffer_iter_peek(iter, ts);
1954 if (!event)
1955 return NULL;
1956
1957 rb_advance_iter(iter);
1958
1959 return event;
1960}
1961
1962/**
1963 * ring_buffer_size - return the size of the ring buffer (in bytes)
1964 * @buffer: The ring buffer.
1965 */
1966unsigned long ring_buffer_size(struct ring_buffer *buffer)
1967{
1968 return BUF_PAGE_SIZE * buffer->pages;
1969}
1970
1971static void
1972rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1973{
1974 cpu_buffer->head_page
1975 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001976 local_set(&cpu_buffer->head_page->write, 0);
1977 local_set(&cpu_buffer->head_page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001978
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001979 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001980
1981 cpu_buffer->tail_page = cpu_buffer->head_page;
1982 cpu_buffer->commit_page = cpu_buffer->head_page;
1983
1984 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1985 local_set(&cpu_buffer->reader_page->write, 0);
1986 local_set(&cpu_buffer->reader_page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001987 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001988
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001989 cpu_buffer->overrun = 0;
1990 cpu_buffer->entries = 0;
1991}
1992
1993/**
1994 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1995 * @buffer: The ring buffer to reset a per cpu buffer of
1996 * @cpu: The CPU buffer to be reset
1997 */
1998void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1999{
2000 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2001 unsigned long flags;
2002
2003 if (!cpu_isset(cpu, buffer->cpumask))
2004 return;
2005
Steven Rostedtd7690412008-10-01 00:29:53 -04002006 spin_lock_irqsave(&cpu_buffer->lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002007
2008 rb_reset_cpu(cpu_buffer);
2009
Steven Rostedtd7690412008-10-01 00:29:53 -04002010 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002011}
2012
2013/**
2014 * ring_buffer_reset - reset a ring buffer
2015 * @buffer: The ring buffer to reset all cpu buffers
2016 */
2017void ring_buffer_reset(struct ring_buffer *buffer)
2018{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002019 int cpu;
2020
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002021 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002022 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002023}
2024
2025/**
2026 * rind_buffer_empty - is the ring buffer empty?
2027 * @buffer: The ring buffer to test
2028 */
2029int ring_buffer_empty(struct ring_buffer *buffer)
2030{
2031 struct ring_buffer_per_cpu *cpu_buffer;
2032 int cpu;
2033
2034 /* yes this is racy, but if you don't like the race, lock the buffer */
2035 for_each_buffer_cpu(buffer, cpu) {
2036 cpu_buffer = buffer->buffers[cpu];
2037 if (!rb_per_cpu_empty(cpu_buffer))
2038 return 0;
2039 }
2040 return 1;
2041}
2042
2043/**
2044 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2045 * @buffer: The ring buffer
2046 * @cpu: The CPU buffer to test
2047 */
2048int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2049{
2050 struct ring_buffer_per_cpu *cpu_buffer;
2051
2052 if (!cpu_isset(cpu, buffer->cpumask))
2053 return 1;
2054
2055 cpu_buffer = buffer->buffers[cpu];
2056 return rb_per_cpu_empty(cpu_buffer);
2057}
2058
2059/**
2060 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2061 * @buffer_a: One buffer to swap with
2062 * @buffer_b: The other buffer to swap with
2063 *
2064 * This function is useful for tracers that want to take a "snapshot"
2065 * of a CPU buffer and has another back up buffer lying around.
2066 * it is expected that the tracer handles the cpu buffer not being
2067 * used at the moment.
2068 */
2069int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2070 struct ring_buffer *buffer_b, int cpu)
2071{
2072 struct ring_buffer_per_cpu *cpu_buffer_a;
2073 struct ring_buffer_per_cpu *cpu_buffer_b;
2074
2075 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2076 !cpu_isset(cpu, buffer_b->cpumask))
2077 return -EINVAL;
2078
2079 /* At least make sure the two buffers are somewhat the same */
2080 if (buffer_a->size != buffer_b->size ||
2081 buffer_a->pages != buffer_b->pages)
2082 return -EINVAL;
2083
2084 cpu_buffer_a = buffer_a->buffers[cpu];
2085 cpu_buffer_b = buffer_b->buffers[cpu];
2086
2087 /*
2088 * We can't do a synchronize_sched here because this
2089 * function can be called in atomic context.
2090 * Normally this will be called from the same CPU as cpu.
2091 * If not it's up to the caller to protect this.
2092 */
2093 atomic_inc(&cpu_buffer_a->record_disabled);
2094 atomic_inc(&cpu_buffer_b->record_disabled);
2095
2096 buffer_a->buffers[cpu] = cpu_buffer_b;
2097 buffer_b->buffers[cpu] = cpu_buffer_a;
2098
2099 cpu_buffer_b->buffer = buffer_a;
2100 cpu_buffer_a->buffer = buffer_b;
2101
2102 atomic_dec(&cpu_buffer_a->record_disabled);
2103 atomic_dec(&cpu_buffer_b->record_disabled);
2104
2105 return 0;
2106}
2107
Steven Rostedta3583242008-11-11 15:01:42 -05002108static ssize_t
2109rb_simple_read(struct file *filp, char __user *ubuf,
2110 size_t cnt, loff_t *ppos)
2111{
2112 int *p = filp->private_data;
2113 char buf[64];
2114 int r;
2115
2116 /* !ring_buffers_off == tracing_on */
2117 r = sprintf(buf, "%d\n", !*p);
2118
2119 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2120}
2121
2122static ssize_t
2123rb_simple_write(struct file *filp, const char __user *ubuf,
2124 size_t cnt, loff_t *ppos)
2125{
2126 int *p = filp->private_data;
2127 char buf[64];
2128 long val;
2129 int ret;
2130
2131 if (cnt >= sizeof(buf))
2132 return -EINVAL;
2133
2134 if (copy_from_user(&buf, ubuf, cnt))
2135 return -EFAULT;
2136
2137 buf[cnt] = 0;
2138
2139 ret = strict_strtoul(buf, 10, &val);
2140 if (ret < 0)
2141 return ret;
2142
2143 /* !ring_buffers_off == tracing_on */
2144 *p = !val;
2145
2146 (*ppos)++;
2147
2148 return cnt;
2149}
2150
2151static struct file_operations rb_simple_fops = {
2152 .open = tracing_open_generic,
2153 .read = rb_simple_read,
2154 .write = rb_simple_write,
2155};
2156
2157
2158static __init int rb_init_debugfs(void)
2159{
2160 struct dentry *d_tracer;
2161 struct dentry *entry;
2162
2163 d_tracer = tracing_init_dentry();
2164
2165 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2166 &ring_buffers_off, &rb_simple_fops);
2167 if (!entry)
2168 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2169
2170 return 0;
2171}
2172
2173fs_initcall(rb_init_debugfs);