blob: 6b8dac02364f602a377220728ea0f3e77280b472 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
19/* Up this if you want to test the TIME_EXTENTS and normalization */
20#define DEBUG_SHIFT 0
21
22/* FIXME!!! */
23u64 ring_buffer_time_stamp(int cpu)
24{
25 /* shift to debug/test normalization and TIME_EXTENTS */
26 return sched_clock() << DEBUG_SHIFT;
27}
28
29void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
30{
31 /* Just stupid testing the normalize function and deltas */
32 *ts >>= DEBUG_SHIFT;
33}
34
35#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
36#define RB_ALIGNMENT_SHIFT 2
37#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
38#define RB_MAX_SMALL_DATA 28
39
40enum {
41 RB_LEN_TIME_EXTEND = 8,
42 RB_LEN_TIME_STAMP = 16,
43};
44
45/* inline for ring buffer fast paths */
46static inline unsigned
47rb_event_length(struct ring_buffer_event *event)
48{
49 unsigned length;
50
51 switch (event->type) {
52 case RINGBUF_TYPE_PADDING:
53 /* undefined */
54 return -1;
55
56 case RINGBUF_TYPE_TIME_EXTEND:
57 return RB_LEN_TIME_EXTEND;
58
59 case RINGBUF_TYPE_TIME_STAMP:
60 return RB_LEN_TIME_STAMP;
61
62 case RINGBUF_TYPE_DATA:
63 if (event->len)
64 length = event->len << RB_ALIGNMENT_SHIFT;
65 else
66 length = event->array[0];
67 return length + RB_EVNT_HDR_SIZE;
68 default:
69 BUG();
70 }
71 /* not hit */
72 return 0;
73}
74
75/**
76 * ring_buffer_event_length - return the length of the event
77 * @event: the event to get the length of
78 */
79unsigned ring_buffer_event_length(struct ring_buffer_event *event)
80{
81 return rb_event_length(event);
82}
83
84/* inline for ring buffer fast paths */
85static inline void *
86rb_event_data(struct ring_buffer_event *event)
87{
88 BUG_ON(event->type != RINGBUF_TYPE_DATA);
89 /* If length is in len field, then array[0] has the data */
90 if (event->len)
91 return (void *)&event->array[0];
92 /* Otherwise length is in array[0] and array[1] has the data */
93 return (void *)&event->array[1];
94}
95
96/**
97 * ring_buffer_event_data - return the data of the event
98 * @event: the event to get the data from
99 */
100void *ring_buffer_event_data(struct ring_buffer_event *event)
101{
102 return rb_event_data(event);
103}
104
105#define for_each_buffer_cpu(buffer, cpu) \
106 for_each_cpu_mask(cpu, buffer->cpumask)
107
108#define TS_SHIFT 27
109#define TS_MASK ((1ULL << TS_SHIFT) - 1)
110#define TS_DELTA_TEST (~TS_MASK)
111
112/*
113 * This hack stolen from mm/slob.c.
114 * We can store per page timing information in the page frame of the page.
115 * Thanks to Peter Zijlstra for suggesting this idea.
116 */
117struct buffer_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400118 u64 time_stamp; /* page time stamp */
119 unsigned size; /* size of page data */
120 struct list_head list; /* list of free pages */
121 void *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400122};
123
124/*
Steven Rostedted568292008-09-29 23:02:40 -0400125 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
126 * this issue out.
127 */
128static inline void free_buffer_page(struct buffer_page *bpage)
129{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400130 if (bpage->page)
131 __free_page(bpage->page);
132 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400133}
134
135/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400136 * We need to fit the time_stamp delta into 27 bits.
137 */
138static inline int test_time_stamp(u64 delta)
139{
140 if (delta & TS_DELTA_TEST)
141 return 1;
142 return 0;
143}
144
145#define BUF_PAGE_SIZE PAGE_SIZE
146
147/*
148 * head_page == tail_page && head == tail then buffer is empty.
149 */
150struct ring_buffer_per_cpu {
151 int cpu;
152 struct ring_buffer *buffer;
153 spinlock_t lock;
154 struct lock_class_key lock_key;
155 struct list_head pages;
156 unsigned long head; /* read from head */
157 unsigned long tail; /* write to tail */
Steven Rostedtd7690412008-10-01 00:29:53 -0400158 unsigned long reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400159 struct buffer_page *head_page;
160 struct buffer_page *tail_page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400161 struct buffer_page *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400162 unsigned long overrun;
163 unsigned long entries;
164 u64 write_stamp;
165 u64 read_stamp;
166 atomic_t record_disabled;
167};
168
169struct ring_buffer {
170 unsigned long size;
171 unsigned pages;
172 unsigned flags;
173 int cpus;
174 cpumask_t cpumask;
175 atomic_t record_disabled;
176
177 struct mutex mutex;
178
179 struct ring_buffer_per_cpu **buffers;
180};
181
182struct ring_buffer_iter {
183 struct ring_buffer_per_cpu *cpu_buffer;
184 unsigned long head;
185 struct buffer_page *head_page;
186 u64 read_stamp;
187};
188
189#define RB_WARN_ON(buffer, cond) \
190 if (unlikely(cond)) { \
191 atomic_inc(&buffer->record_disabled); \
192 WARN_ON(1); \
193 return -1; \
194 }
195
196/**
197 * check_pages - integrity check of buffer pages
198 * @cpu_buffer: CPU buffer with pages to test
199 *
200 * As a safty measure we check to make sure the data pages have not
201 * been corrupted.
202 */
203static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
204{
205 struct list_head *head = &cpu_buffer->pages;
206 struct buffer_page *page, *tmp;
207
208 RB_WARN_ON(cpu_buffer, head->next->prev != head);
209 RB_WARN_ON(cpu_buffer, head->prev->next != head);
210
211 list_for_each_entry_safe(page, tmp, head, list) {
212 RB_WARN_ON(cpu_buffer, page->list.next->prev != &page->list);
213 RB_WARN_ON(cpu_buffer, page->list.prev->next != &page->list);
214 }
215
216 return 0;
217}
218
219static unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
220{
221 return cpu_buffer->head_page->size;
222}
223
224static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
225 unsigned nr_pages)
226{
227 struct list_head *head = &cpu_buffer->pages;
228 struct buffer_page *page, *tmp;
229 unsigned long addr;
230 LIST_HEAD(pages);
231 unsigned i;
232
233 for (i = 0; i < nr_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400234 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400235 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400236 if (!page)
237 goto free_pages;
238 list_add(&page->list, &pages);
239
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400240 addr = __get_free_page(GFP_KERNEL);
241 if (!addr)
242 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400243 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400244 }
245
246 list_splice(&pages, head);
247
248 rb_check_pages(cpu_buffer);
249
250 return 0;
251
252 free_pages:
253 list_for_each_entry_safe(page, tmp, &pages, list) {
254 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400255 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400256 }
257 return -ENOMEM;
258}
259
260static struct ring_buffer_per_cpu *
261rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
262{
263 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400264 struct buffer_page *page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400265 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400266 int ret;
267
268 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
269 GFP_KERNEL, cpu_to_node(cpu));
270 if (!cpu_buffer)
271 return NULL;
272
273 cpu_buffer->cpu = cpu;
274 cpu_buffer->buffer = buffer;
275 spin_lock_init(&cpu_buffer->lock);
276 INIT_LIST_HEAD(&cpu_buffer->pages);
277
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400278 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
279 GFP_KERNEL, cpu_to_node(cpu));
280 if (!page)
281 goto fail_free_buffer;
282
283 cpu_buffer->reader_page = page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400284 addr = __get_free_page(GFP_KERNEL);
285 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400286 goto fail_free_reader;
287 page->page = (void *)addr;
288
Steven Rostedtd7690412008-10-01 00:29:53 -0400289 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
290 cpu_buffer->reader_page->size = 0;
291
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400292 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
293 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400294 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400295
296 cpu_buffer->head_page
297 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
298 cpu_buffer->tail_page
299 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
300
301 return cpu_buffer;
302
Steven Rostedtd7690412008-10-01 00:29:53 -0400303 fail_free_reader:
304 free_buffer_page(cpu_buffer->reader_page);
305
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400306 fail_free_buffer:
307 kfree(cpu_buffer);
308 return NULL;
309}
310
311static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
312{
313 struct list_head *head = &cpu_buffer->pages;
314 struct buffer_page *page, *tmp;
315
Steven Rostedtd7690412008-10-01 00:29:53 -0400316 list_del_init(&cpu_buffer->reader_page->list);
317 free_buffer_page(cpu_buffer->reader_page);
318
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400319 list_for_each_entry_safe(page, tmp, head, list) {
320 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400321 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400322 }
323 kfree(cpu_buffer);
324}
325
Steven Rostedta7b13742008-09-29 23:02:39 -0400326/*
327 * Causes compile errors if the struct buffer_page gets bigger
328 * than the struct page.
329 */
330extern int ring_buffer_page_too_big(void);
331
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400332/**
333 * ring_buffer_alloc - allocate a new ring_buffer
334 * @size: the size in bytes that is needed.
335 * @flags: attributes to set for the ring buffer.
336 *
337 * Currently the only flag that is available is the RB_FL_OVERWRITE
338 * flag. This flag means that the buffer will overwrite old data
339 * when the buffer wraps. If this flag is not set, the buffer will
340 * drop data when the tail hits the head.
341 */
342struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
343{
344 struct ring_buffer *buffer;
345 int bsize;
346 int cpu;
347
Steven Rostedta7b13742008-09-29 23:02:39 -0400348 /* Paranoid! Optimizes out when all is well */
349 if (sizeof(struct buffer_page) > sizeof(struct page))
350 ring_buffer_page_too_big();
351
352
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400353 /* keep it in its own cache line */
354 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
355 GFP_KERNEL);
356 if (!buffer)
357 return NULL;
358
359 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
360 buffer->flags = flags;
361
362 /* need at least two pages */
363 if (buffer->pages == 1)
364 buffer->pages++;
365
366 buffer->cpumask = cpu_possible_map;
367 buffer->cpus = nr_cpu_ids;
368
369 bsize = sizeof(void *) * nr_cpu_ids;
370 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
371 GFP_KERNEL);
372 if (!buffer->buffers)
373 goto fail_free_buffer;
374
375 for_each_buffer_cpu(buffer, cpu) {
376 buffer->buffers[cpu] =
377 rb_allocate_cpu_buffer(buffer, cpu);
378 if (!buffer->buffers[cpu])
379 goto fail_free_buffers;
380 }
381
382 mutex_init(&buffer->mutex);
383
384 return buffer;
385
386 fail_free_buffers:
387 for_each_buffer_cpu(buffer, cpu) {
388 if (buffer->buffers[cpu])
389 rb_free_cpu_buffer(buffer->buffers[cpu]);
390 }
391 kfree(buffer->buffers);
392
393 fail_free_buffer:
394 kfree(buffer);
395 return NULL;
396}
397
398/**
399 * ring_buffer_free - free a ring buffer.
400 * @buffer: the buffer to free.
401 */
402void
403ring_buffer_free(struct ring_buffer *buffer)
404{
405 int cpu;
406
407 for_each_buffer_cpu(buffer, cpu)
408 rb_free_cpu_buffer(buffer->buffers[cpu]);
409
410 kfree(buffer);
411}
412
413static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
414
415static void
416rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
417{
418 struct buffer_page *page;
419 struct list_head *p;
420 unsigned i;
421
422 atomic_inc(&cpu_buffer->record_disabled);
423 synchronize_sched();
424
425 for (i = 0; i < nr_pages; i++) {
426 BUG_ON(list_empty(&cpu_buffer->pages));
427 p = cpu_buffer->pages.next;
428 page = list_entry(p, struct buffer_page, list);
429 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400430 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400431 }
432 BUG_ON(list_empty(&cpu_buffer->pages));
433
434 rb_reset_cpu(cpu_buffer);
435
436 rb_check_pages(cpu_buffer);
437
438 atomic_dec(&cpu_buffer->record_disabled);
439
440}
441
442static void
443rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
444 struct list_head *pages, unsigned nr_pages)
445{
446 struct buffer_page *page;
447 struct list_head *p;
448 unsigned i;
449
450 atomic_inc(&cpu_buffer->record_disabled);
451 synchronize_sched();
452
453 for (i = 0; i < nr_pages; i++) {
454 BUG_ON(list_empty(pages));
455 p = pages->next;
456 page = list_entry(p, struct buffer_page, list);
457 list_del_init(&page->list);
458 list_add_tail(&page->list, &cpu_buffer->pages);
459 }
460 rb_reset_cpu(cpu_buffer);
461
462 rb_check_pages(cpu_buffer);
463
464 atomic_dec(&cpu_buffer->record_disabled);
465}
466
467/**
468 * ring_buffer_resize - resize the ring buffer
469 * @buffer: the buffer to resize.
470 * @size: the new size.
471 *
472 * The tracer is responsible for making sure that the buffer is
473 * not being used while changing the size.
474 * Note: We may be able to change the above requirement by using
475 * RCU synchronizations.
476 *
477 * Minimum size is 2 * BUF_PAGE_SIZE.
478 *
479 * Returns -1 on failure.
480 */
481int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
482{
483 struct ring_buffer_per_cpu *cpu_buffer;
484 unsigned nr_pages, rm_pages, new_pages;
485 struct buffer_page *page, *tmp;
486 unsigned long buffer_size;
487 unsigned long addr;
488 LIST_HEAD(pages);
489 int i, cpu;
490
491 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
492 size *= BUF_PAGE_SIZE;
493 buffer_size = buffer->pages * BUF_PAGE_SIZE;
494
495 /* we need a minimum of two pages */
496 if (size < BUF_PAGE_SIZE * 2)
497 size = BUF_PAGE_SIZE * 2;
498
499 if (size == buffer_size)
500 return size;
501
502 mutex_lock(&buffer->mutex);
503
504 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
505
506 if (size < buffer_size) {
507
508 /* easy case, just free pages */
509 BUG_ON(nr_pages >= buffer->pages);
510
511 rm_pages = buffer->pages - nr_pages;
512
513 for_each_buffer_cpu(buffer, cpu) {
514 cpu_buffer = buffer->buffers[cpu];
515 rb_remove_pages(cpu_buffer, rm_pages);
516 }
517 goto out;
518 }
519
520 /*
521 * This is a bit more difficult. We only want to add pages
522 * when we can allocate enough for all CPUs. We do this
523 * by allocating all the pages and storing them on a local
524 * link list. If we succeed in our allocation, then we
525 * add these pages to the cpu_buffers. Otherwise we just free
526 * them all and return -ENOMEM;
527 */
528 BUG_ON(nr_pages <= buffer->pages);
529 new_pages = nr_pages - buffer->pages;
530
531 for_each_buffer_cpu(buffer, cpu) {
532 for (i = 0; i < new_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400533 page = kzalloc_node(ALIGN(sizeof(*page),
534 cache_line_size()),
535 GFP_KERNEL, cpu_to_node(cpu));
536 if (!page)
537 goto free_pages;
538 list_add(&page->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400539 addr = __get_free_page(GFP_KERNEL);
540 if (!addr)
541 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400542 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400543 }
544 }
545
546 for_each_buffer_cpu(buffer, cpu) {
547 cpu_buffer = buffer->buffers[cpu];
548 rb_insert_pages(cpu_buffer, &pages, new_pages);
549 }
550
551 BUG_ON(!list_empty(&pages));
552
553 out:
554 buffer->pages = nr_pages;
555 mutex_unlock(&buffer->mutex);
556
557 return size;
558
559 free_pages:
560 list_for_each_entry_safe(page, tmp, &pages, list) {
561 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400562 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400563 }
564 return -ENOMEM;
565}
566
567static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
568{
Steven Rostedtd7690412008-10-01 00:29:53 -0400569 return (cpu_buffer->reader == cpu_buffer->reader_page->size &&
570 (cpu_buffer->tail_page == cpu_buffer->reader_page ||
571 (cpu_buffer->tail_page == cpu_buffer->head_page &&
572 cpu_buffer->head == cpu_buffer->tail)));
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400573}
574
575static inline int rb_null_event(struct ring_buffer_event *event)
576{
577 return event->type == RINGBUF_TYPE_PADDING;
578}
579
580static inline void *rb_page_index(struct buffer_page *page, unsigned index)
581{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400582 return page->page + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400583}
584
585static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400586rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400587{
Steven Rostedtd7690412008-10-01 00:29:53 -0400588 return rb_page_index(cpu_buffer->reader_page,
589 cpu_buffer->reader);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400590}
591
592static inline struct ring_buffer_event *
593rb_iter_head_event(struct ring_buffer_iter *iter)
594{
595 return rb_page_index(iter->head_page,
596 iter->head);
597}
598
599/*
600 * When the tail hits the head and the buffer is in overwrite mode,
601 * the head jumps to the next page and all content on the previous
602 * page is discarded. But before doing so, we update the overrun
603 * variable of the buffer.
604 */
605static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
606{
607 struct ring_buffer_event *event;
608 unsigned long head;
609
610 for (head = 0; head < rb_head_size(cpu_buffer);
611 head += rb_event_length(event)) {
612
613 event = rb_page_index(cpu_buffer->head_page, head);
614 BUG_ON(rb_null_event(event));
615 /* Only count data entries */
616 if (event->type != RINGBUF_TYPE_DATA)
617 continue;
618 cpu_buffer->overrun++;
619 cpu_buffer->entries--;
620 }
621}
622
623static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
624 struct buffer_page **page)
625{
626 struct list_head *p = (*page)->list.next;
627
628 if (p == &cpu_buffer->pages)
629 p = p->next;
630
631 *page = list_entry(p, struct buffer_page, list);
632}
633
634static inline void
635rb_add_stamp(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
636{
637 cpu_buffer->tail_page->time_stamp = *ts;
638 cpu_buffer->write_stamp = *ts;
639}
640
Steven Rostedtd7690412008-10-01 00:29:53 -0400641static void rb_reset_head_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400642{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400643 cpu_buffer->head = 0;
644}
645
Steven Rostedtd7690412008-10-01 00:29:53 -0400646static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400647{
Steven Rostedtd7690412008-10-01 00:29:53 -0400648 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
649 cpu_buffer->reader = 0;
650}
651
652static inline void rb_inc_iter(struct ring_buffer_iter *iter)
653{
654 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
655
656 /*
657 * The iterator could be on the reader page (it starts there).
658 * But the head could have moved, since the reader was
659 * found. Check for this case and assign the iterator
660 * to the head page instead of next.
661 */
662 if (iter->head_page == cpu_buffer->reader_page)
663 iter->head_page = cpu_buffer->head_page;
664 else
665 rb_inc_page(cpu_buffer, &iter->head_page);
666
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400667 iter->read_stamp = iter->head_page->time_stamp;
668 iter->head = 0;
669}
670
671/**
672 * ring_buffer_update_event - update event type and data
673 * @event: the even to update
674 * @type: the type of event
675 * @length: the size of the event field in the ring buffer
676 *
677 * Update the type and data fields of the event. The length
678 * is the actual size that is written to the ring buffer,
679 * and with this, we can determine what to place into the
680 * data field.
681 */
682static inline void
683rb_update_event(struct ring_buffer_event *event,
684 unsigned type, unsigned length)
685{
686 event->type = type;
687
688 switch (type) {
689
690 case RINGBUF_TYPE_PADDING:
691 break;
692
693 case RINGBUF_TYPE_TIME_EXTEND:
694 event->len =
695 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
696 >> RB_ALIGNMENT_SHIFT;
697 break;
698
699 case RINGBUF_TYPE_TIME_STAMP:
700 event->len =
701 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
702 >> RB_ALIGNMENT_SHIFT;
703 break;
704
705 case RINGBUF_TYPE_DATA:
706 length -= RB_EVNT_HDR_SIZE;
707 if (length > RB_MAX_SMALL_DATA) {
708 event->len = 0;
709 event->array[0] = length;
710 } else
711 event->len =
712 (length + (RB_ALIGNMENT-1))
713 >> RB_ALIGNMENT_SHIFT;
714 break;
715 default:
716 BUG();
717 }
718}
719
720static inline unsigned rb_calculate_event_length(unsigned length)
721{
722 struct ring_buffer_event event; /* Used only for sizeof array */
723
724 /* zero length can cause confusions */
725 if (!length)
726 length = 1;
727
728 if (length > RB_MAX_SMALL_DATA)
729 length += sizeof(event.array[0]);
730
731 length += RB_EVNT_HDR_SIZE;
732 length = ALIGN(length, RB_ALIGNMENT);
733
734 return length;
735}
736
737static struct ring_buffer_event *
738__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
739 unsigned type, unsigned long length, u64 *ts)
740{
Steven Rostedtd7690412008-10-01 00:29:53 -0400741 struct buffer_page *tail_page, *head_page, *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400742 unsigned long tail;
743 struct ring_buffer *buffer = cpu_buffer->buffer;
744 struct ring_buffer_event *event;
745
Steven Rostedtd7690412008-10-01 00:29:53 -0400746 /* No locking needed for tail page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400747 tail_page = cpu_buffer->tail_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400748 tail = cpu_buffer->tail;
749
750 if (tail + length > BUF_PAGE_SIZE) {
751 struct buffer_page *next_page = tail_page;
752
Steven Rostedtd7690412008-10-01 00:29:53 -0400753 spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400754 rb_inc_page(cpu_buffer, &next_page);
755
Steven Rostedtd7690412008-10-01 00:29:53 -0400756 head_page = cpu_buffer->head_page;
757 reader_page = cpu_buffer->reader_page;
758
759 /* we grabbed the lock before incrementing */
760 WARN_ON(next_page == reader_page);
761
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400762 if (next_page == head_page) {
Steven Rostedtd7690412008-10-01 00:29:53 -0400763 if (!(buffer->flags & RB_FL_OVERWRITE)) {
764 spin_unlock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400765 return NULL;
Steven Rostedtd7690412008-10-01 00:29:53 -0400766 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400767
768 /* count overflows */
769 rb_update_overflow(cpu_buffer);
770
771 rb_inc_page(cpu_buffer, &head_page);
772 cpu_buffer->head_page = head_page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400773 rb_reset_head_page(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400774 }
775
776 if (tail != BUF_PAGE_SIZE) {
777 event = rb_page_index(tail_page, tail);
778 /* page padding */
779 event->type = RINGBUF_TYPE_PADDING;
780 }
781
782 tail_page->size = tail;
783 tail_page = next_page;
784 tail_page->size = 0;
785 tail = 0;
786 cpu_buffer->tail_page = tail_page;
787 cpu_buffer->tail = tail;
788 rb_add_stamp(cpu_buffer, ts);
Steven Rostedtd7690412008-10-01 00:29:53 -0400789 spin_unlock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400790 }
791
792 BUG_ON(tail + length > BUF_PAGE_SIZE);
793
794 event = rb_page_index(tail_page, tail);
795 rb_update_event(event, type, length);
796
797 return event;
798}
799
800static int
801rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
802 u64 *ts, u64 *delta)
803{
804 struct ring_buffer_event *event;
805 static int once;
806
807 if (unlikely(*delta > (1ULL << 59) && !once++)) {
808 printk(KERN_WARNING "Delta way too big! %llu"
809 " ts=%llu write stamp = %llu\n",
810 *delta, *ts, cpu_buffer->write_stamp);
811 WARN_ON(1);
812 }
813
814 /*
815 * The delta is too big, we to add a
816 * new timestamp.
817 */
818 event = __rb_reserve_next(cpu_buffer,
819 RINGBUF_TYPE_TIME_EXTEND,
820 RB_LEN_TIME_EXTEND,
821 ts);
822 if (!event)
823 return -1;
824
825 /* check to see if we went to the next page */
826 if (cpu_buffer->tail) {
827 /* Still on same page, update timestamp */
828 event->time_delta = *delta & TS_MASK;
829 event->array[0] = *delta >> TS_SHIFT;
830 /* commit the time event */
831 cpu_buffer->tail +=
832 rb_event_length(event);
833 cpu_buffer->write_stamp = *ts;
834 *delta = 0;
835 }
836
837 return 0;
838}
839
840static struct ring_buffer_event *
841rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
842 unsigned type, unsigned long length)
843{
844 struct ring_buffer_event *event;
845 u64 ts, delta;
846
847 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
848
849 if (cpu_buffer->tail) {
850 delta = ts - cpu_buffer->write_stamp;
851
852 if (test_time_stamp(delta)) {
853 int ret;
854
855 ret = rb_add_time_stamp(cpu_buffer, &ts, &delta);
856 if (ret < 0)
857 return NULL;
858 }
859 } else {
Steven Rostedtd7690412008-10-01 00:29:53 -0400860 spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400861 rb_add_stamp(cpu_buffer, &ts);
Steven Rostedtd7690412008-10-01 00:29:53 -0400862 spin_unlock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400863 delta = 0;
864 }
865
866 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
867 if (!event)
868 return NULL;
869
870 /* If the reserve went to the next page, our delta is zero */
871 if (!cpu_buffer->tail)
872 delta = 0;
873
874 event->time_delta = delta;
875
876 return event;
877}
878
879/**
880 * ring_buffer_lock_reserve - reserve a part of the buffer
881 * @buffer: the ring buffer to reserve from
882 * @length: the length of the data to reserve (excluding event header)
883 * @flags: a pointer to save the interrupt flags
884 *
885 * Returns a reseverd event on the ring buffer to copy directly to.
886 * The user of this interface will need to get the body to write into
887 * and can use the ring_buffer_event_data() interface.
888 *
889 * The length is the length of the data needed, not the event length
890 * which also includes the event header.
891 *
892 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
893 * If NULL is returned, then nothing has been allocated or locked.
894 */
895struct ring_buffer_event *
896ring_buffer_lock_reserve(struct ring_buffer *buffer,
897 unsigned long length,
898 unsigned long *flags)
899{
900 struct ring_buffer_per_cpu *cpu_buffer;
901 struct ring_buffer_event *event;
902 int cpu;
903
904 if (atomic_read(&buffer->record_disabled))
905 return NULL;
906
Steven Rostedt70255b52008-10-01 00:29:52 -0400907 local_irq_save(*flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400908 cpu = raw_smp_processor_id();
909
910 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -0400911 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400912
913 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400914
915 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -0400916 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400917
918 length = rb_calculate_event_length(length);
919 if (length > BUF_PAGE_SIZE)
920 return NULL;
921
922 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
923 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -0400924 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400925
926 return event;
927
Steven Rostedtd7690412008-10-01 00:29:53 -0400928 out:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400929 local_irq_restore(*flags);
930 return NULL;
931}
932
933static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
934 struct ring_buffer_event *event)
935{
936 cpu_buffer->tail += rb_event_length(event);
937 cpu_buffer->tail_page->size = cpu_buffer->tail;
938 cpu_buffer->write_stamp += event->time_delta;
939 cpu_buffer->entries++;
940}
941
942/**
943 * ring_buffer_unlock_commit - commit a reserved
944 * @buffer: The buffer to commit to
945 * @event: The event pointer to commit.
946 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
947 *
948 * This commits the data to the ring buffer, and releases any locks held.
949 *
950 * Must be paired with ring_buffer_lock_reserve.
951 */
952int ring_buffer_unlock_commit(struct ring_buffer *buffer,
953 struct ring_buffer_event *event,
954 unsigned long flags)
955{
956 struct ring_buffer_per_cpu *cpu_buffer;
957 int cpu = raw_smp_processor_id();
958
959 cpu_buffer = buffer->buffers[cpu];
960
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400961 rb_commit(cpu_buffer, event);
962
Steven Rostedt70255b52008-10-01 00:29:52 -0400963 local_irq_restore(flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400964
965 return 0;
966}
967
968/**
969 * ring_buffer_write - write data to the buffer without reserving
970 * @buffer: The ring buffer to write to.
971 * @length: The length of the data being written (excluding the event header)
972 * @data: The data to write to the buffer.
973 *
974 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
975 * one function. If you already have the data to write to the buffer, it
976 * may be easier to simply call this function.
977 *
978 * Note, like ring_buffer_lock_reserve, the length is the length of the data
979 * and not the length of the event which would hold the header.
980 */
981int ring_buffer_write(struct ring_buffer *buffer,
982 unsigned long length,
983 void *data)
984{
985 struct ring_buffer_per_cpu *cpu_buffer;
986 struct ring_buffer_event *event;
987 unsigned long event_length, flags;
988 void *body;
989 int ret = -EBUSY;
990 int cpu;
991
992 if (atomic_read(&buffer->record_disabled))
993 return -EBUSY;
994
995 local_irq_save(flags);
996 cpu = raw_smp_processor_id();
997
998 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -0400999 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001000
1001 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001002
1003 if (atomic_read(&cpu_buffer->record_disabled))
1004 goto out;
1005
1006 event_length = rb_calculate_event_length(length);
1007 event = rb_reserve_next_event(cpu_buffer,
1008 RINGBUF_TYPE_DATA, event_length);
1009 if (!event)
1010 goto out;
1011
1012 body = rb_event_data(event);
1013
1014 memcpy(body, data, length);
1015
1016 rb_commit(cpu_buffer, event);
1017
1018 ret = 0;
1019 out:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001020 local_irq_restore(flags);
1021
1022 return ret;
1023}
1024
1025/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001026 * ring_buffer_record_disable - stop all writes into the buffer
1027 * @buffer: The ring buffer to stop writes to.
1028 *
1029 * This prevents all writes to the buffer. Any attempt to write
1030 * to the buffer after this will fail and return NULL.
1031 *
1032 * The caller should call synchronize_sched() after this.
1033 */
1034void ring_buffer_record_disable(struct ring_buffer *buffer)
1035{
1036 atomic_inc(&buffer->record_disabled);
1037}
1038
1039/**
1040 * ring_buffer_record_enable - enable writes to the buffer
1041 * @buffer: The ring buffer to enable writes
1042 *
1043 * Note, multiple disables will need the same number of enables
1044 * to truely enable the writing (much like preempt_disable).
1045 */
1046void ring_buffer_record_enable(struct ring_buffer *buffer)
1047{
1048 atomic_dec(&buffer->record_disabled);
1049}
1050
1051/**
1052 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1053 * @buffer: The ring buffer to stop writes to.
1054 * @cpu: The CPU buffer to stop
1055 *
1056 * This prevents all writes to the buffer. Any attempt to write
1057 * to the buffer after this will fail and return NULL.
1058 *
1059 * The caller should call synchronize_sched() after this.
1060 */
1061void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1062{
1063 struct ring_buffer_per_cpu *cpu_buffer;
1064
1065 if (!cpu_isset(cpu, buffer->cpumask))
1066 return;
1067
1068 cpu_buffer = buffer->buffers[cpu];
1069 atomic_inc(&cpu_buffer->record_disabled);
1070}
1071
1072/**
1073 * ring_buffer_record_enable_cpu - enable writes to the buffer
1074 * @buffer: The ring buffer to enable writes
1075 * @cpu: The CPU to enable.
1076 *
1077 * Note, multiple disables will need the same number of enables
1078 * to truely enable the writing (much like preempt_disable).
1079 */
1080void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1081{
1082 struct ring_buffer_per_cpu *cpu_buffer;
1083
1084 if (!cpu_isset(cpu, buffer->cpumask))
1085 return;
1086
1087 cpu_buffer = buffer->buffers[cpu];
1088 atomic_dec(&cpu_buffer->record_disabled);
1089}
1090
1091/**
1092 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1093 * @buffer: The ring buffer
1094 * @cpu: The per CPU buffer to get the entries from.
1095 */
1096unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1097{
1098 struct ring_buffer_per_cpu *cpu_buffer;
1099
1100 if (!cpu_isset(cpu, buffer->cpumask))
1101 return 0;
1102
1103 cpu_buffer = buffer->buffers[cpu];
1104 return cpu_buffer->entries;
1105}
1106
1107/**
1108 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1109 * @buffer: The ring buffer
1110 * @cpu: The per CPU buffer to get the number of overruns from
1111 */
1112unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1113{
1114 struct ring_buffer_per_cpu *cpu_buffer;
1115
1116 if (!cpu_isset(cpu, buffer->cpumask))
1117 return 0;
1118
1119 cpu_buffer = buffer->buffers[cpu];
1120 return cpu_buffer->overrun;
1121}
1122
1123/**
1124 * ring_buffer_entries - get the number of entries in a buffer
1125 * @buffer: The ring buffer
1126 *
1127 * Returns the total number of entries in the ring buffer
1128 * (all CPU entries)
1129 */
1130unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1131{
1132 struct ring_buffer_per_cpu *cpu_buffer;
1133 unsigned long entries = 0;
1134 int cpu;
1135
1136 /* if you care about this being correct, lock the buffer */
1137 for_each_buffer_cpu(buffer, cpu) {
1138 cpu_buffer = buffer->buffers[cpu];
1139 entries += cpu_buffer->entries;
1140 }
1141
1142 return entries;
1143}
1144
1145/**
1146 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1147 * @buffer: The ring buffer
1148 *
1149 * Returns the total number of overruns in the ring buffer
1150 * (all CPU entries)
1151 */
1152unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1153{
1154 struct ring_buffer_per_cpu *cpu_buffer;
1155 unsigned long overruns = 0;
1156 int cpu;
1157
1158 /* if you care about this being correct, lock the buffer */
1159 for_each_buffer_cpu(buffer, cpu) {
1160 cpu_buffer = buffer->buffers[cpu];
1161 overruns += cpu_buffer->overrun;
1162 }
1163
1164 return overruns;
1165}
1166
1167/**
1168 * ring_buffer_iter_reset - reset an iterator
1169 * @iter: The iterator to reset
1170 *
1171 * Resets the iterator, so that it will start from the beginning
1172 * again.
1173 */
1174void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1175{
1176 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1177
Steven Rostedtd7690412008-10-01 00:29:53 -04001178 /* Iterator usage is expected to have record disabled */
1179 if (list_empty(&cpu_buffer->reader_page->list)) {
1180 iter->head_page = cpu_buffer->head_page;
1181 iter->head = cpu_buffer->head;
1182 } else {
1183 iter->head_page = cpu_buffer->reader_page;
1184 iter->head = cpu_buffer->reader;
1185 }
1186 if (iter->head)
1187 iter->read_stamp = cpu_buffer->read_stamp;
1188 else
1189 iter->read_stamp = iter->head_page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001190}
1191
1192/**
1193 * ring_buffer_iter_empty - check if an iterator has no more to read
1194 * @iter: The iterator to check
1195 */
1196int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1197{
1198 struct ring_buffer_per_cpu *cpu_buffer;
1199
1200 cpu_buffer = iter->cpu_buffer;
1201
1202 return iter->head_page == cpu_buffer->tail_page &&
1203 iter->head == cpu_buffer->tail;
1204}
1205
1206static void
1207rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1208 struct ring_buffer_event *event)
1209{
1210 u64 delta;
1211
1212 switch (event->type) {
1213 case RINGBUF_TYPE_PADDING:
1214 return;
1215
1216 case RINGBUF_TYPE_TIME_EXTEND:
1217 delta = event->array[0];
1218 delta <<= TS_SHIFT;
1219 delta += event->time_delta;
1220 cpu_buffer->read_stamp += delta;
1221 return;
1222
1223 case RINGBUF_TYPE_TIME_STAMP:
1224 /* FIXME: not implemented */
1225 return;
1226
1227 case RINGBUF_TYPE_DATA:
1228 cpu_buffer->read_stamp += event->time_delta;
1229 return;
1230
1231 default:
1232 BUG();
1233 }
1234 return;
1235}
1236
1237static void
1238rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1239 struct ring_buffer_event *event)
1240{
1241 u64 delta;
1242
1243 switch (event->type) {
1244 case RINGBUF_TYPE_PADDING:
1245 return;
1246
1247 case RINGBUF_TYPE_TIME_EXTEND:
1248 delta = event->array[0];
1249 delta <<= TS_SHIFT;
1250 delta += event->time_delta;
1251 iter->read_stamp += delta;
1252 return;
1253
1254 case RINGBUF_TYPE_TIME_STAMP:
1255 /* FIXME: not implemented */
1256 return;
1257
1258 case RINGBUF_TYPE_DATA:
1259 iter->read_stamp += event->time_delta;
1260 return;
1261
1262 default:
1263 BUG();
1264 }
1265 return;
1266}
1267
Steven Rostedtd7690412008-10-01 00:29:53 -04001268static struct buffer_page *
1269rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001270{
Steven Rostedtd7690412008-10-01 00:29:53 -04001271 struct buffer_page *reader = NULL;
1272 unsigned long flags;
1273
1274 spin_lock_irqsave(&cpu_buffer->lock, flags);
1275
1276 again:
1277 reader = cpu_buffer->reader_page;
1278
1279 /* If there's more to read, return this page */
1280 if (cpu_buffer->reader < reader->size)
1281 goto out;
1282
1283 /* Never should we have an index greater than the size */
1284 WARN_ON(cpu_buffer->reader > reader->size);
1285
1286 /* check if we caught up to the tail */
1287 reader = NULL;
1288 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1289 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001290
1291 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04001292 * Splice the empty reader page into the list around the head.
1293 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001294 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001295
Steven Rostedtd7690412008-10-01 00:29:53 -04001296 reader = cpu_buffer->head_page;
1297 cpu_buffer->reader_page->list.next = reader->list.next;
1298 cpu_buffer->reader_page->list.prev = reader->list.prev;
1299 cpu_buffer->reader_page->size = 0;
1300
1301 /* Make the reader page now replace the head */
1302 reader->list.prev->next = &cpu_buffer->reader_page->list;
1303 reader->list.next->prev = &cpu_buffer->reader_page->list;
1304
1305 /*
1306 * If the tail is on the reader, then we must set the head
1307 * to the inserted page, otherwise we set it one before.
1308 */
1309 cpu_buffer->head_page = cpu_buffer->reader_page;
1310
1311 if (cpu_buffer->tail_page != reader)
1312 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1313
1314 /* Finally update the reader page to the new head */
1315 cpu_buffer->reader_page = reader;
1316 rb_reset_reader_page(cpu_buffer);
1317
1318 goto again;
1319
1320 out:
1321 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1322
1323 return reader;
1324}
1325
1326static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1327{
1328 struct ring_buffer_event *event;
1329 struct buffer_page *reader;
1330 unsigned length;
1331
1332 reader = rb_get_reader_page(cpu_buffer);
1333
1334 /* This function should not be called when buffer is empty */
1335 BUG_ON(!reader);
1336
1337 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001338
1339 if (event->type == RINGBUF_TYPE_DATA)
1340 cpu_buffer->entries--;
1341
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001342 rb_update_read_stamp(cpu_buffer, event);
1343
Steven Rostedtd7690412008-10-01 00:29:53 -04001344 length = rb_event_length(event);
1345 cpu_buffer->reader += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001346}
1347
1348static void rb_advance_iter(struct ring_buffer_iter *iter)
1349{
1350 struct ring_buffer *buffer;
1351 struct ring_buffer_per_cpu *cpu_buffer;
1352 struct ring_buffer_event *event;
1353 unsigned length;
1354
1355 cpu_buffer = iter->cpu_buffer;
1356 buffer = cpu_buffer->buffer;
1357
1358 /*
1359 * Check if we are at the end of the buffer.
1360 */
1361 if (iter->head >= iter->head_page->size) {
1362 BUG_ON(iter->head_page == cpu_buffer->tail_page);
Steven Rostedtd7690412008-10-01 00:29:53 -04001363 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001364 return;
1365 }
1366
1367 event = rb_iter_head_event(iter);
1368
1369 length = rb_event_length(event);
1370
1371 /*
1372 * This should not be called to advance the header if we are
1373 * at the tail of the buffer.
1374 */
1375 BUG_ON((iter->head_page == cpu_buffer->tail_page) &&
1376 (iter->head + length > cpu_buffer->tail));
1377
1378 rb_update_iter_read_stamp(iter, event);
1379
1380 iter->head += length;
1381
1382 /* check for end of page padding */
1383 if ((iter->head >= iter->head_page->size) &&
1384 (iter->head_page != cpu_buffer->tail_page))
1385 rb_advance_iter(iter);
1386}
1387
1388/**
1389 * ring_buffer_peek - peek at the next event to be read
1390 * @buffer: The ring buffer to read
1391 * @cpu: The cpu to peak at
1392 * @ts: The timestamp counter of this event.
1393 *
1394 * This will return the event that will be read next, but does
1395 * not consume the data.
1396 */
1397struct ring_buffer_event *
1398ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1399{
1400 struct ring_buffer_per_cpu *cpu_buffer;
1401 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04001402 struct buffer_page *reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001403
1404 if (!cpu_isset(cpu, buffer->cpumask))
1405 return NULL;
1406
1407 cpu_buffer = buffer->buffers[cpu];
1408
1409 again:
Steven Rostedtd7690412008-10-01 00:29:53 -04001410 reader = rb_get_reader_page(cpu_buffer);
1411 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001412 return NULL;
1413
Steven Rostedtd7690412008-10-01 00:29:53 -04001414 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001415
1416 switch (event->type) {
1417 case RINGBUF_TYPE_PADDING:
Steven Rostedtd7690412008-10-01 00:29:53 -04001418 WARN_ON(1);
1419 rb_advance_reader(cpu_buffer);
1420 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001421
1422 case RINGBUF_TYPE_TIME_EXTEND:
1423 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04001424 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001425 goto again;
1426
1427 case RINGBUF_TYPE_TIME_STAMP:
1428 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04001429 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001430 goto again;
1431
1432 case RINGBUF_TYPE_DATA:
1433 if (ts) {
1434 *ts = cpu_buffer->read_stamp + event->time_delta;
1435 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1436 }
1437 return event;
1438
1439 default:
1440 BUG();
1441 }
1442
1443 return NULL;
1444}
1445
1446/**
1447 * ring_buffer_iter_peek - peek at the next event to be read
1448 * @iter: The ring buffer iterator
1449 * @ts: The timestamp counter of this event.
1450 *
1451 * This will return the event that will be read next, but does
1452 * not increment the iterator.
1453 */
1454struct ring_buffer_event *
1455ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1456{
1457 struct ring_buffer *buffer;
1458 struct ring_buffer_per_cpu *cpu_buffer;
1459 struct ring_buffer_event *event;
1460
1461 if (ring_buffer_iter_empty(iter))
1462 return NULL;
1463
1464 cpu_buffer = iter->cpu_buffer;
1465 buffer = cpu_buffer->buffer;
1466
1467 again:
1468 if (rb_per_cpu_empty(cpu_buffer))
1469 return NULL;
1470
1471 event = rb_iter_head_event(iter);
1472
1473 switch (event->type) {
1474 case RINGBUF_TYPE_PADDING:
Steven Rostedtd7690412008-10-01 00:29:53 -04001475 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001476 goto again;
1477
1478 case RINGBUF_TYPE_TIME_EXTEND:
1479 /* Internal data, OK to advance */
1480 rb_advance_iter(iter);
1481 goto again;
1482
1483 case RINGBUF_TYPE_TIME_STAMP:
1484 /* FIXME: not implemented */
1485 rb_advance_iter(iter);
1486 goto again;
1487
1488 case RINGBUF_TYPE_DATA:
1489 if (ts) {
1490 *ts = iter->read_stamp + event->time_delta;
1491 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1492 }
1493 return event;
1494
1495 default:
1496 BUG();
1497 }
1498
1499 return NULL;
1500}
1501
1502/**
1503 * ring_buffer_consume - return an event and consume it
1504 * @buffer: The ring buffer to get the next event from
1505 *
1506 * Returns the next event in the ring buffer, and that event is consumed.
1507 * Meaning, that sequential reads will keep returning a different event,
1508 * and eventually empty the ring buffer if the producer is slower.
1509 */
1510struct ring_buffer_event *
1511ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1512{
1513 struct ring_buffer_per_cpu *cpu_buffer;
1514 struct ring_buffer_event *event;
1515
1516 if (!cpu_isset(cpu, buffer->cpumask))
1517 return NULL;
1518
1519 event = ring_buffer_peek(buffer, cpu, ts);
1520 if (!event)
1521 return NULL;
1522
1523 cpu_buffer = buffer->buffers[cpu];
Steven Rostedtd7690412008-10-01 00:29:53 -04001524 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001525
1526 return event;
1527}
1528
1529/**
1530 * ring_buffer_read_start - start a non consuming read of the buffer
1531 * @buffer: The ring buffer to read from
1532 * @cpu: The cpu buffer to iterate over
1533 *
1534 * This starts up an iteration through the buffer. It also disables
1535 * the recording to the buffer until the reading is finished.
1536 * This prevents the reading from being corrupted. This is not
1537 * a consuming read, so a producer is not expected.
1538 *
1539 * Must be paired with ring_buffer_finish.
1540 */
1541struct ring_buffer_iter *
1542ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1543{
1544 struct ring_buffer_per_cpu *cpu_buffer;
1545 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04001546 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001547
1548 if (!cpu_isset(cpu, buffer->cpumask))
1549 return NULL;
1550
1551 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1552 if (!iter)
1553 return NULL;
1554
1555 cpu_buffer = buffer->buffers[cpu];
1556
1557 iter->cpu_buffer = cpu_buffer;
1558
1559 atomic_inc(&cpu_buffer->record_disabled);
1560 synchronize_sched();
1561
Steven Rostedtd7690412008-10-01 00:29:53 -04001562 spin_lock_irqsave(&cpu_buffer->lock, flags);
1563 ring_buffer_iter_reset(iter);
1564 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001565
1566 return iter;
1567}
1568
1569/**
1570 * ring_buffer_finish - finish reading the iterator of the buffer
1571 * @iter: The iterator retrieved by ring_buffer_start
1572 *
1573 * This re-enables the recording to the buffer, and frees the
1574 * iterator.
1575 */
1576void
1577ring_buffer_read_finish(struct ring_buffer_iter *iter)
1578{
1579 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1580
1581 atomic_dec(&cpu_buffer->record_disabled);
1582 kfree(iter);
1583}
1584
1585/**
1586 * ring_buffer_read - read the next item in the ring buffer by the iterator
1587 * @iter: The ring buffer iterator
1588 * @ts: The time stamp of the event read.
1589 *
1590 * This reads the next event in the ring buffer and increments the iterator.
1591 */
1592struct ring_buffer_event *
1593ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1594{
1595 struct ring_buffer_event *event;
1596
1597 event = ring_buffer_iter_peek(iter, ts);
1598 if (!event)
1599 return NULL;
1600
1601 rb_advance_iter(iter);
1602
1603 return event;
1604}
1605
1606/**
1607 * ring_buffer_size - return the size of the ring buffer (in bytes)
1608 * @buffer: The ring buffer.
1609 */
1610unsigned long ring_buffer_size(struct ring_buffer *buffer)
1611{
1612 return BUF_PAGE_SIZE * buffer->pages;
1613}
1614
1615static void
1616rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1617{
1618 cpu_buffer->head_page
1619 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtd7690412008-10-01 00:29:53 -04001620 cpu_buffer->head_page->size = 0;
1621 cpu_buffer->tail_page = cpu_buffer->head_page;
1622 cpu_buffer->tail_page->size = 0;
1623 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1624 cpu_buffer->reader_page->size = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001625
Steven Rostedtd7690412008-10-01 00:29:53 -04001626 cpu_buffer->head = cpu_buffer->tail = cpu_buffer->reader = 0;
1627
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001628 cpu_buffer->overrun = 0;
1629 cpu_buffer->entries = 0;
1630}
1631
1632/**
1633 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1634 * @buffer: The ring buffer to reset a per cpu buffer of
1635 * @cpu: The CPU buffer to be reset
1636 */
1637void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1638{
1639 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1640 unsigned long flags;
1641
1642 if (!cpu_isset(cpu, buffer->cpumask))
1643 return;
1644
Steven Rostedtd7690412008-10-01 00:29:53 -04001645 spin_lock_irqsave(&cpu_buffer->lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001646
1647 rb_reset_cpu(cpu_buffer);
1648
Steven Rostedtd7690412008-10-01 00:29:53 -04001649 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001650}
1651
1652/**
1653 * ring_buffer_reset - reset a ring buffer
1654 * @buffer: The ring buffer to reset all cpu buffers
1655 */
1656void ring_buffer_reset(struct ring_buffer *buffer)
1657{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001658 int cpu;
1659
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001660 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04001661 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001662}
1663
1664/**
1665 * rind_buffer_empty - is the ring buffer empty?
1666 * @buffer: The ring buffer to test
1667 */
1668int ring_buffer_empty(struct ring_buffer *buffer)
1669{
1670 struct ring_buffer_per_cpu *cpu_buffer;
1671 int cpu;
1672
1673 /* yes this is racy, but if you don't like the race, lock the buffer */
1674 for_each_buffer_cpu(buffer, cpu) {
1675 cpu_buffer = buffer->buffers[cpu];
1676 if (!rb_per_cpu_empty(cpu_buffer))
1677 return 0;
1678 }
1679 return 1;
1680}
1681
1682/**
1683 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
1684 * @buffer: The ring buffer
1685 * @cpu: The CPU buffer to test
1686 */
1687int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
1688{
1689 struct ring_buffer_per_cpu *cpu_buffer;
1690
1691 if (!cpu_isset(cpu, buffer->cpumask))
1692 return 1;
1693
1694 cpu_buffer = buffer->buffers[cpu];
1695 return rb_per_cpu_empty(cpu_buffer);
1696}
1697
1698/**
1699 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
1700 * @buffer_a: One buffer to swap with
1701 * @buffer_b: The other buffer to swap with
1702 *
1703 * This function is useful for tracers that want to take a "snapshot"
1704 * of a CPU buffer and has another back up buffer lying around.
1705 * it is expected that the tracer handles the cpu buffer not being
1706 * used at the moment.
1707 */
1708int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
1709 struct ring_buffer *buffer_b, int cpu)
1710{
1711 struct ring_buffer_per_cpu *cpu_buffer_a;
1712 struct ring_buffer_per_cpu *cpu_buffer_b;
1713
1714 if (!cpu_isset(cpu, buffer_a->cpumask) ||
1715 !cpu_isset(cpu, buffer_b->cpumask))
1716 return -EINVAL;
1717
1718 /* At least make sure the two buffers are somewhat the same */
1719 if (buffer_a->size != buffer_b->size ||
1720 buffer_a->pages != buffer_b->pages)
1721 return -EINVAL;
1722
1723 cpu_buffer_a = buffer_a->buffers[cpu];
1724 cpu_buffer_b = buffer_b->buffers[cpu];
1725
1726 /*
1727 * We can't do a synchronize_sched here because this
1728 * function can be called in atomic context.
1729 * Normally this will be called from the same CPU as cpu.
1730 * If not it's up to the caller to protect this.
1731 */
1732 atomic_inc(&cpu_buffer_a->record_disabled);
1733 atomic_inc(&cpu_buffer_b->record_disabled);
1734
1735 buffer_a->buffers[cpu] = cpu_buffer_b;
1736 buffer_b->buffers[cpu] = cpu_buffer_a;
1737
1738 cpu_buffer_b->buffer = buffer_a;
1739 cpu_buffer_a->buffer = buffer_b;
1740
1741 atomic_dec(&cpu_buffer_a->record_disabled);
1742 atomic_dec(&cpu_buffer_b->record_disabled);
1743
1744 return 0;
1745}
1746