blob: a2dea5008826bc65fed9249ae416b13a531374e6 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
Steven Rostedt182e9f52008-11-03 23:15:56 -050019#include "trace.h"
20
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040021/* Up this if you want to test the TIME_EXTENTS and normalization */
22#define DEBUG_SHIFT 0
23
24/* FIXME!!! */
25u64 ring_buffer_time_stamp(int cpu)
26{
27 /* shift to debug/test normalization and TIME_EXTENTS */
28 return sched_clock() << DEBUG_SHIFT;
29}
30
31void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
32{
33 /* Just stupid testing the normalize function and deltas */
34 *ts >>= DEBUG_SHIFT;
35}
36
37#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
38#define RB_ALIGNMENT_SHIFT 2
39#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
40#define RB_MAX_SMALL_DATA 28
41
42enum {
43 RB_LEN_TIME_EXTEND = 8,
44 RB_LEN_TIME_STAMP = 16,
45};
46
47/* inline for ring buffer fast paths */
48static inline unsigned
49rb_event_length(struct ring_buffer_event *event)
50{
51 unsigned length;
52
53 switch (event->type) {
54 case RINGBUF_TYPE_PADDING:
55 /* undefined */
56 return -1;
57
58 case RINGBUF_TYPE_TIME_EXTEND:
59 return RB_LEN_TIME_EXTEND;
60
61 case RINGBUF_TYPE_TIME_STAMP:
62 return RB_LEN_TIME_STAMP;
63
64 case RINGBUF_TYPE_DATA:
65 if (event->len)
66 length = event->len << RB_ALIGNMENT_SHIFT;
67 else
68 length = event->array[0];
69 return length + RB_EVNT_HDR_SIZE;
70 default:
71 BUG();
72 }
73 /* not hit */
74 return 0;
75}
76
77/**
78 * ring_buffer_event_length - return the length of the event
79 * @event: the event to get the length of
80 */
81unsigned ring_buffer_event_length(struct ring_buffer_event *event)
82{
83 return rb_event_length(event);
84}
85
86/* inline for ring buffer fast paths */
87static inline void *
88rb_event_data(struct ring_buffer_event *event)
89{
90 BUG_ON(event->type != RINGBUF_TYPE_DATA);
91 /* If length is in len field, then array[0] has the data */
92 if (event->len)
93 return (void *)&event->array[0];
94 /* Otherwise length is in array[0] and array[1] has the data */
95 return (void *)&event->array[1];
96}
97
98/**
99 * ring_buffer_event_data - return the data of the event
100 * @event: the event to get the data from
101 */
102void *ring_buffer_event_data(struct ring_buffer_event *event)
103{
104 return rb_event_data(event);
105}
106
107#define for_each_buffer_cpu(buffer, cpu) \
108 for_each_cpu_mask(cpu, buffer->cpumask)
109
110#define TS_SHIFT 27
111#define TS_MASK ((1ULL << TS_SHIFT) - 1)
112#define TS_DELTA_TEST (~TS_MASK)
113
114/*
115 * This hack stolen from mm/slob.c.
116 * We can store per page timing information in the page frame of the page.
117 * Thanks to Peter Zijlstra for suggesting this idea.
118 */
119struct buffer_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400120 u64 time_stamp; /* page time stamp */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400121 local_t write; /* index for next write */
122 local_t commit; /* write commited index */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400123 unsigned read; /* index for next read */
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400124 struct list_head list; /* list of free pages */
125 void *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400126};
127
128/*
Steven Rostedted568292008-09-29 23:02:40 -0400129 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
130 * this issue out.
131 */
132static inline void free_buffer_page(struct buffer_page *bpage)
133{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400134 if (bpage->page)
Steven Rostedt6ae2a072008-10-13 10:22:06 -0400135 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400136 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400137}
138
139/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400140 * We need to fit the time_stamp delta into 27 bits.
141 */
142static inline int test_time_stamp(u64 delta)
143{
144 if (delta & TS_DELTA_TEST)
145 return 1;
146 return 0;
147}
148
149#define BUF_PAGE_SIZE PAGE_SIZE
150
151/*
152 * head_page == tail_page && head == tail then buffer is empty.
153 */
154struct ring_buffer_per_cpu {
155 int cpu;
156 struct ring_buffer *buffer;
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500157 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400158 struct lock_class_key lock_key;
159 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400160 struct buffer_page *head_page; /* read from head */
161 struct buffer_page *tail_page; /* write to tail */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400162 struct buffer_page *commit_page; /* commited pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400163 struct buffer_page *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400164 unsigned long overrun;
165 unsigned long entries;
166 u64 write_stamp;
167 u64 read_stamp;
168 atomic_t record_disabled;
169};
170
171struct ring_buffer {
172 unsigned long size;
173 unsigned pages;
174 unsigned flags;
175 int cpus;
176 cpumask_t cpumask;
177 atomic_t record_disabled;
178
179 struct mutex mutex;
180
181 struct ring_buffer_per_cpu **buffers;
182};
183
184struct ring_buffer_iter {
185 struct ring_buffer_per_cpu *cpu_buffer;
186 unsigned long head;
187 struct buffer_page *head_page;
188 u64 read_stamp;
189};
190
Steven Rostedtbf41a152008-10-04 02:00:59 -0400191#define RB_WARN_ON(buffer, cond) \
192 do { \
193 if (unlikely(cond)) { \
194 atomic_inc(&buffer->record_disabled); \
195 WARN_ON(1); \
196 } \
197 } while (0)
198
199#define RB_WARN_ON_RET(buffer, cond) \
200 do { \
201 if (unlikely(cond)) { \
202 atomic_inc(&buffer->record_disabled); \
203 WARN_ON(1); \
204 return -1; \
205 } \
206 } while (0)
207
208#define RB_WARN_ON_ONCE(buffer, cond) \
209 do { \
210 static int once; \
211 if (unlikely(cond) && !once) { \
212 once++; \
213 atomic_inc(&buffer->record_disabled); \
214 WARN_ON(1); \
215 } \
216 } while (0)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400217
218/**
219 * check_pages - integrity check of buffer pages
220 * @cpu_buffer: CPU buffer with pages to test
221 *
222 * As a safty measure we check to make sure the data pages have not
223 * been corrupted.
224 */
225static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
226{
227 struct list_head *head = &cpu_buffer->pages;
228 struct buffer_page *page, *tmp;
229
Steven Rostedtbf41a152008-10-04 02:00:59 -0400230 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
231 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400232
233 list_for_each_entry_safe(page, tmp, head, list) {
Steven Rostedtbf41a152008-10-04 02:00:59 -0400234 RB_WARN_ON_RET(cpu_buffer,
235 page->list.next->prev != &page->list);
236 RB_WARN_ON_RET(cpu_buffer,
237 page->list.prev->next != &page->list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400238 }
239
240 return 0;
241}
242
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400243static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
244 unsigned nr_pages)
245{
246 struct list_head *head = &cpu_buffer->pages;
247 struct buffer_page *page, *tmp;
248 unsigned long addr;
249 LIST_HEAD(pages);
250 unsigned i;
251
252 for (i = 0; i < nr_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400253 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
Steven Rostedtaa1e0e3b2008-10-02 19:18:09 -0400254 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400255 if (!page)
256 goto free_pages;
257 list_add(&page->list, &pages);
258
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400259 addr = __get_free_page(GFP_KERNEL);
260 if (!addr)
261 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400262 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400263 }
264
265 list_splice(&pages, head);
266
267 rb_check_pages(cpu_buffer);
268
269 return 0;
270
271 free_pages:
272 list_for_each_entry_safe(page, tmp, &pages, list) {
273 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400274 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400275 }
276 return -ENOMEM;
277}
278
279static struct ring_buffer_per_cpu *
280rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
281{
282 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400283 struct buffer_page *page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400284 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400285 int ret;
286
287 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
288 GFP_KERNEL, cpu_to_node(cpu));
289 if (!cpu_buffer)
290 return NULL;
291
292 cpu_buffer->cpu = cpu;
293 cpu_buffer->buffer = buffer;
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500294 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400295 INIT_LIST_HEAD(&cpu_buffer->pages);
296
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400297 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
298 GFP_KERNEL, cpu_to_node(cpu));
299 if (!page)
300 goto fail_free_buffer;
301
302 cpu_buffer->reader_page = page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400303 addr = __get_free_page(GFP_KERNEL);
304 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400305 goto fail_free_reader;
306 page->page = (void *)addr;
307
Steven Rostedtd7690412008-10-01 00:29:53 -0400308 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400309
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400310 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
311 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400312 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400313
314 cpu_buffer->head_page
315 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400316 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400317
318 return cpu_buffer;
319
Steven Rostedtd7690412008-10-01 00:29:53 -0400320 fail_free_reader:
321 free_buffer_page(cpu_buffer->reader_page);
322
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400323 fail_free_buffer:
324 kfree(cpu_buffer);
325 return NULL;
326}
327
328static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
329{
330 struct list_head *head = &cpu_buffer->pages;
331 struct buffer_page *page, *tmp;
332
Steven Rostedtd7690412008-10-01 00:29:53 -0400333 list_del_init(&cpu_buffer->reader_page->list);
334 free_buffer_page(cpu_buffer->reader_page);
335
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400336 list_for_each_entry_safe(page, tmp, head, list) {
337 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400338 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400339 }
340 kfree(cpu_buffer);
341}
342
Steven Rostedta7b13742008-09-29 23:02:39 -0400343/*
344 * Causes compile errors if the struct buffer_page gets bigger
345 * than the struct page.
346 */
347extern int ring_buffer_page_too_big(void);
348
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400349/**
350 * ring_buffer_alloc - allocate a new ring_buffer
351 * @size: the size in bytes that is needed.
352 * @flags: attributes to set for the ring buffer.
353 *
354 * Currently the only flag that is available is the RB_FL_OVERWRITE
355 * flag. This flag means that the buffer will overwrite old data
356 * when the buffer wraps. If this flag is not set, the buffer will
357 * drop data when the tail hits the head.
358 */
359struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
360{
361 struct ring_buffer *buffer;
362 int bsize;
363 int cpu;
364
Steven Rostedta7b13742008-09-29 23:02:39 -0400365 /* Paranoid! Optimizes out when all is well */
366 if (sizeof(struct buffer_page) > sizeof(struct page))
367 ring_buffer_page_too_big();
368
369
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400370 /* keep it in its own cache line */
371 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
372 GFP_KERNEL);
373 if (!buffer)
374 return NULL;
375
376 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
377 buffer->flags = flags;
378
379 /* need at least two pages */
380 if (buffer->pages == 1)
381 buffer->pages++;
382
383 buffer->cpumask = cpu_possible_map;
384 buffer->cpus = nr_cpu_ids;
385
386 bsize = sizeof(void *) * nr_cpu_ids;
387 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
388 GFP_KERNEL);
389 if (!buffer->buffers)
390 goto fail_free_buffer;
391
392 for_each_buffer_cpu(buffer, cpu) {
393 buffer->buffers[cpu] =
394 rb_allocate_cpu_buffer(buffer, cpu);
395 if (!buffer->buffers[cpu])
396 goto fail_free_buffers;
397 }
398
399 mutex_init(&buffer->mutex);
400
401 return buffer;
402
403 fail_free_buffers:
404 for_each_buffer_cpu(buffer, cpu) {
405 if (buffer->buffers[cpu])
406 rb_free_cpu_buffer(buffer->buffers[cpu]);
407 }
408 kfree(buffer->buffers);
409
410 fail_free_buffer:
411 kfree(buffer);
412 return NULL;
413}
414
415/**
416 * ring_buffer_free - free a ring buffer.
417 * @buffer: the buffer to free.
418 */
419void
420ring_buffer_free(struct ring_buffer *buffer)
421{
422 int cpu;
423
424 for_each_buffer_cpu(buffer, cpu)
425 rb_free_cpu_buffer(buffer->buffers[cpu]);
426
427 kfree(buffer);
428}
429
430static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
431
432static void
433rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
434{
435 struct buffer_page *page;
436 struct list_head *p;
437 unsigned i;
438
439 atomic_inc(&cpu_buffer->record_disabled);
440 synchronize_sched();
441
442 for (i = 0; i < nr_pages; i++) {
443 BUG_ON(list_empty(&cpu_buffer->pages));
444 p = cpu_buffer->pages.next;
445 page = list_entry(p, struct buffer_page, list);
446 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400447 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400448 }
449 BUG_ON(list_empty(&cpu_buffer->pages));
450
451 rb_reset_cpu(cpu_buffer);
452
453 rb_check_pages(cpu_buffer);
454
455 atomic_dec(&cpu_buffer->record_disabled);
456
457}
458
459static void
460rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
461 struct list_head *pages, unsigned nr_pages)
462{
463 struct buffer_page *page;
464 struct list_head *p;
465 unsigned i;
466
467 atomic_inc(&cpu_buffer->record_disabled);
468 synchronize_sched();
469
470 for (i = 0; i < nr_pages; i++) {
471 BUG_ON(list_empty(pages));
472 p = pages->next;
473 page = list_entry(p, struct buffer_page, list);
474 list_del_init(&page->list);
475 list_add_tail(&page->list, &cpu_buffer->pages);
476 }
477 rb_reset_cpu(cpu_buffer);
478
479 rb_check_pages(cpu_buffer);
480
481 atomic_dec(&cpu_buffer->record_disabled);
482}
483
484/**
485 * ring_buffer_resize - resize the ring buffer
486 * @buffer: the buffer to resize.
487 * @size: the new size.
488 *
489 * The tracer is responsible for making sure that the buffer is
490 * not being used while changing the size.
491 * Note: We may be able to change the above requirement by using
492 * RCU synchronizations.
493 *
494 * Minimum size is 2 * BUF_PAGE_SIZE.
495 *
496 * Returns -1 on failure.
497 */
498int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
499{
500 struct ring_buffer_per_cpu *cpu_buffer;
501 unsigned nr_pages, rm_pages, new_pages;
502 struct buffer_page *page, *tmp;
503 unsigned long buffer_size;
504 unsigned long addr;
505 LIST_HEAD(pages);
506 int i, cpu;
507
508 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
509 size *= BUF_PAGE_SIZE;
510 buffer_size = buffer->pages * BUF_PAGE_SIZE;
511
512 /* we need a minimum of two pages */
513 if (size < BUF_PAGE_SIZE * 2)
514 size = BUF_PAGE_SIZE * 2;
515
516 if (size == buffer_size)
517 return size;
518
519 mutex_lock(&buffer->mutex);
520
521 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
522
523 if (size < buffer_size) {
524
525 /* easy case, just free pages */
526 BUG_ON(nr_pages >= buffer->pages);
527
528 rm_pages = buffer->pages - nr_pages;
529
530 for_each_buffer_cpu(buffer, cpu) {
531 cpu_buffer = buffer->buffers[cpu];
532 rb_remove_pages(cpu_buffer, rm_pages);
533 }
534 goto out;
535 }
536
537 /*
538 * This is a bit more difficult. We only want to add pages
539 * when we can allocate enough for all CPUs. We do this
540 * by allocating all the pages and storing them on a local
541 * link list. If we succeed in our allocation, then we
542 * add these pages to the cpu_buffers. Otherwise we just free
543 * them all and return -ENOMEM;
544 */
545 BUG_ON(nr_pages <= buffer->pages);
546 new_pages = nr_pages - buffer->pages;
547
548 for_each_buffer_cpu(buffer, cpu) {
549 for (i = 0; i < new_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400550 page = kzalloc_node(ALIGN(sizeof(*page),
551 cache_line_size()),
552 GFP_KERNEL, cpu_to_node(cpu));
553 if (!page)
554 goto free_pages;
555 list_add(&page->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400556 addr = __get_free_page(GFP_KERNEL);
557 if (!addr)
558 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400559 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400560 }
561 }
562
563 for_each_buffer_cpu(buffer, cpu) {
564 cpu_buffer = buffer->buffers[cpu];
565 rb_insert_pages(cpu_buffer, &pages, new_pages);
566 }
567
568 BUG_ON(!list_empty(&pages));
569
570 out:
571 buffer->pages = nr_pages;
572 mutex_unlock(&buffer->mutex);
573
574 return size;
575
576 free_pages:
577 list_for_each_entry_safe(page, tmp, &pages, list) {
578 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400579 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400580 }
581 return -ENOMEM;
582}
583
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400584static inline int rb_null_event(struct ring_buffer_event *event)
585{
586 return event->type == RINGBUF_TYPE_PADDING;
587}
588
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400589static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400590{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400591 return page->page + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400592}
593
594static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400595rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400596{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400597 return __rb_page_index(cpu_buffer->reader_page,
598 cpu_buffer->reader_page->read);
599}
600
601static inline struct ring_buffer_event *
602rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
603{
604 return __rb_page_index(cpu_buffer->head_page,
605 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400606}
607
608static inline struct ring_buffer_event *
609rb_iter_head_event(struct ring_buffer_iter *iter)
610{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400611 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400612}
613
Steven Rostedtbf41a152008-10-04 02:00:59 -0400614static inline unsigned rb_page_write(struct buffer_page *bpage)
615{
616 return local_read(&bpage->write);
617}
618
619static inline unsigned rb_page_commit(struct buffer_page *bpage)
620{
621 return local_read(&bpage->commit);
622}
623
624/* Size is determined by what has been commited */
625static inline unsigned rb_page_size(struct buffer_page *bpage)
626{
627 return rb_page_commit(bpage);
628}
629
630static inline unsigned
631rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
632{
633 return rb_page_commit(cpu_buffer->commit_page);
634}
635
636static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
637{
638 return rb_page_commit(cpu_buffer->head_page);
639}
640
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400641/*
642 * When the tail hits the head and the buffer is in overwrite mode,
643 * the head jumps to the next page and all content on the previous
644 * page is discarded. But before doing so, we update the overrun
645 * variable of the buffer.
646 */
647static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
648{
649 struct ring_buffer_event *event;
650 unsigned long head;
651
652 for (head = 0; head < rb_head_size(cpu_buffer);
653 head += rb_event_length(event)) {
654
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400655 event = __rb_page_index(cpu_buffer->head_page, head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400656 BUG_ON(rb_null_event(event));
657 /* Only count data entries */
658 if (event->type != RINGBUF_TYPE_DATA)
659 continue;
660 cpu_buffer->overrun++;
661 cpu_buffer->entries--;
662 }
663}
664
665static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
666 struct buffer_page **page)
667{
668 struct list_head *p = (*page)->list.next;
669
670 if (p == &cpu_buffer->pages)
671 p = p->next;
672
673 *page = list_entry(p, struct buffer_page, list);
674}
675
Steven Rostedtbf41a152008-10-04 02:00:59 -0400676static inline unsigned
677rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400678{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400679 unsigned long addr = (unsigned long)event;
680
681 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400682}
683
Steven Rostedtbf41a152008-10-04 02:00:59 -0400684static inline int
685rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
686 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400687{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400688 unsigned long addr = (unsigned long)event;
689 unsigned long index;
690
691 index = rb_event_index(event);
692 addr &= PAGE_MASK;
693
694 return cpu_buffer->commit_page->page == (void *)addr &&
695 rb_commit_index(cpu_buffer) == index;
696}
697
698static inline void
699rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
700 struct ring_buffer_event *event)
701{
702 unsigned long addr = (unsigned long)event;
703 unsigned long index;
704
705 index = rb_event_index(event);
706 addr &= PAGE_MASK;
707
708 while (cpu_buffer->commit_page->page != (void *)addr) {
709 RB_WARN_ON(cpu_buffer,
710 cpu_buffer->commit_page == cpu_buffer->tail_page);
711 cpu_buffer->commit_page->commit =
712 cpu_buffer->commit_page->write;
713 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
714 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
715 }
716
717 /* Now set the commit to the event's index */
718 local_set(&cpu_buffer->commit_page->commit, index);
719}
720
721static inline void
722rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
723{
724 /*
725 * We only race with interrupts and NMIs on this CPU.
726 * If we own the commit event, then we can commit
727 * all others that interrupted us, since the interruptions
728 * are in stack format (they finish before they come
729 * back to us). This allows us to do a simple loop to
730 * assign the commit to the tail.
731 */
732 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
733 cpu_buffer->commit_page->commit =
734 cpu_buffer->commit_page->write;
735 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
736 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
737 /* add barrier to keep gcc from optimizing too much */
738 barrier();
739 }
740 while (rb_commit_index(cpu_buffer) !=
741 rb_page_write(cpu_buffer->commit_page)) {
742 cpu_buffer->commit_page->commit =
743 cpu_buffer->commit_page->write;
744 barrier();
745 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400746}
747
Steven Rostedtd7690412008-10-01 00:29:53 -0400748static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400749{
Steven Rostedtd7690412008-10-01 00:29:53 -0400750 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400751 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -0400752}
753
754static inline void rb_inc_iter(struct ring_buffer_iter *iter)
755{
756 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
757
758 /*
759 * The iterator could be on the reader page (it starts there).
760 * But the head could have moved, since the reader was
761 * found. Check for this case and assign the iterator
762 * to the head page instead of next.
763 */
764 if (iter->head_page == cpu_buffer->reader_page)
765 iter->head_page = cpu_buffer->head_page;
766 else
767 rb_inc_page(cpu_buffer, &iter->head_page);
768
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400769 iter->read_stamp = iter->head_page->time_stamp;
770 iter->head = 0;
771}
772
773/**
774 * ring_buffer_update_event - update event type and data
775 * @event: the even to update
776 * @type: the type of event
777 * @length: the size of the event field in the ring buffer
778 *
779 * Update the type and data fields of the event. The length
780 * is the actual size that is written to the ring buffer,
781 * and with this, we can determine what to place into the
782 * data field.
783 */
784static inline void
785rb_update_event(struct ring_buffer_event *event,
786 unsigned type, unsigned length)
787{
788 event->type = type;
789
790 switch (type) {
791
792 case RINGBUF_TYPE_PADDING:
793 break;
794
795 case RINGBUF_TYPE_TIME_EXTEND:
796 event->len =
797 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
798 >> RB_ALIGNMENT_SHIFT;
799 break;
800
801 case RINGBUF_TYPE_TIME_STAMP:
802 event->len =
803 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
804 >> RB_ALIGNMENT_SHIFT;
805 break;
806
807 case RINGBUF_TYPE_DATA:
808 length -= RB_EVNT_HDR_SIZE;
809 if (length > RB_MAX_SMALL_DATA) {
810 event->len = 0;
811 event->array[0] = length;
812 } else
813 event->len =
814 (length + (RB_ALIGNMENT-1))
815 >> RB_ALIGNMENT_SHIFT;
816 break;
817 default:
818 BUG();
819 }
820}
821
822static inline unsigned rb_calculate_event_length(unsigned length)
823{
824 struct ring_buffer_event event; /* Used only for sizeof array */
825
826 /* zero length can cause confusions */
827 if (!length)
828 length = 1;
829
830 if (length > RB_MAX_SMALL_DATA)
831 length += sizeof(event.array[0]);
832
833 length += RB_EVNT_HDR_SIZE;
834 length = ALIGN(length, RB_ALIGNMENT);
835
836 return length;
837}
838
839static struct ring_buffer_event *
840__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
841 unsigned type, unsigned long length, u64 *ts)
842{
Steven Rostedtd7690412008-10-01 00:29:53 -0400843 struct buffer_page *tail_page, *head_page, *reader_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400844 unsigned long tail, write;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400845 struct ring_buffer *buffer = cpu_buffer->buffer;
846 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400847 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400848
849 tail_page = cpu_buffer->tail_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400850 write = local_add_return(length, &tail_page->write);
851 tail = write - length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400852
Steven Rostedtbf41a152008-10-04 02:00:59 -0400853 /* See if we shot pass the end of this buffer page */
854 if (write > BUF_PAGE_SIZE) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400855 struct buffer_page *next_page = tail_page;
856
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500857 local_irq_save(flags);
858 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400859
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400860 rb_inc_page(cpu_buffer, &next_page);
861
Steven Rostedtd7690412008-10-01 00:29:53 -0400862 head_page = cpu_buffer->head_page;
863 reader_page = cpu_buffer->reader_page;
864
865 /* we grabbed the lock before incrementing */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400866 RB_WARN_ON(cpu_buffer, next_page == reader_page);
867
868 /*
869 * If for some reason, we had an interrupt storm that made
870 * it all the way around the buffer, bail, and warn
871 * about it.
872 */
873 if (unlikely(next_page == cpu_buffer->commit_page)) {
874 WARN_ON_ONCE(1);
875 goto out_unlock;
876 }
Steven Rostedtd7690412008-10-01 00:29:53 -0400877
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400878 if (next_page == head_page) {
Steven Rostedtd7690412008-10-01 00:29:53 -0400879 if (!(buffer->flags & RB_FL_OVERWRITE)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -0400880 /* reset write */
881 if (tail <= BUF_PAGE_SIZE)
882 local_set(&tail_page->write, tail);
883 goto out_unlock;
Steven Rostedtd7690412008-10-01 00:29:53 -0400884 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400885
Steven Rostedtbf41a152008-10-04 02:00:59 -0400886 /* tail_page has not moved yet? */
887 if (tail_page == cpu_buffer->tail_page) {
888 /* count overflows */
889 rb_update_overflow(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400890
Steven Rostedtbf41a152008-10-04 02:00:59 -0400891 rb_inc_page(cpu_buffer, &head_page);
892 cpu_buffer->head_page = head_page;
893 cpu_buffer->head_page->read = 0;
894 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400895 }
896
Steven Rostedtbf41a152008-10-04 02:00:59 -0400897 /*
898 * If the tail page is still the same as what we think
899 * it is, then it is up to us to update the tail
900 * pointer.
901 */
902 if (tail_page == cpu_buffer->tail_page) {
903 local_set(&next_page->write, 0);
904 local_set(&next_page->commit, 0);
905 cpu_buffer->tail_page = next_page;
906
907 /* reread the time stamp */
908 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
909 cpu_buffer->tail_page->time_stamp = *ts;
910 }
911
912 /*
913 * The actual tail page has moved forward.
914 */
915 if (tail < BUF_PAGE_SIZE) {
916 /* Mark the rest of the page with padding */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400917 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400918 event->type = RINGBUF_TYPE_PADDING;
919 }
920
Steven Rostedtbf41a152008-10-04 02:00:59 -0400921 if (tail <= BUF_PAGE_SIZE)
922 /* Set the write back to the previous setting */
923 local_set(&tail_page->write, tail);
924
925 /*
926 * If this was a commit entry that failed,
927 * increment that too
928 */
929 if (tail_page == cpu_buffer->commit_page &&
930 tail == rb_commit_index(cpu_buffer)) {
931 rb_set_commit_to_write(cpu_buffer);
932 }
933
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500934 __raw_spin_unlock(&cpu_buffer->lock);
935 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400936
937 /* fail and let the caller try again */
938 return ERR_PTR(-EAGAIN);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400939 }
940
Steven Rostedtbf41a152008-10-04 02:00:59 -0400941 /* We reserved something on the buffer */
942
943 BUG_ON(write > BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400944
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400945 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400946 rb_update_event(event, type, length);
947
Steven Rostedtbf41a152008-10-04 02:00:59 -0400948 /*
949 * If this is a commit and the tail is zero, then update
950 * this page's time stamp.
951 */
952 if (!tail && rb_is_commit(cpu_buffer, event))
953 cpu_buffer->commit_page->time_stamp = *ts;
954
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400955 return event;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400956
957 out_unlock:
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500958 __raw_spin_unlock(&cpu_buffer->lock);
959 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400960 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400961}
962
963static int
964rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
965 u64 *ts, u64 *delta)
966{
967 struct ring_buffer_event *event;
968 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400969 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400970
971 if (unlikely(*delta > (1ULL << 59) && !once++)) {
972 printk(KERN_WARNING "Delta way too big! %llu"
973 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +1100974 (unsigned long long)*delta,
975 (unsigned long long)*ts,
976 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400977 WARN_ON(1);
978 }
979
980 /*
981 * The delta is too big, we to add a
982 * new timestamp.
983 */
984 event = __rb_reserve_next(cpu_buffer,
985 RINGBUF_TYPE_TIME_EXTEND,
986 RB_LEN_TIME_EXTEND,
987 ts);
988 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -0400989 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400990
Steven Rostedtbf41a152008-10-04 02:00:59 -0400991 if (PTR_ERR(event) == -EAGAIN)
992 return -EAGAIN;
993
994 /* Only a commited time event can update the write stamp */
995 if (rb_is_commit(cpu_buffer, event)) {
996 /*
997 * If this is the first on the page, then we need to
998 * update the page itself, and just put in a zero.
999 */
1000 if (rb_event_index(event)) {
1001 event->time_delta = *delta & TS_MASK;
1002 event->array[0] = *delta >> TS_SHIFT;
1003 } else {
1004 cpu_buffer->commit_page->time_stamp = *ts;
1005 event->time_delta = 0;
1006 event->array[0] = 0;
1007 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001008 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001009 /* let the caller know this was the commit */
1010 ret = 1;
1011 } else {
1012 /* Darn, this is just wasted space */
1013 event->time_delta = 0;
1014 event->array[0] = 0;
1015 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001016 }
1017
Steven Rostedtbf41a152008-10-04 02:00:59 -04001018 *delta = 0;
1019
1020 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001021}
1022
1023static struct ring_buffer_event *
1024rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1025 unsigned type, unsigned long length)
1026{
1027 struct ring_buffer_event *event;
1028 u64 ts, delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001029 int commit = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001030
Steven Rostedtbf41a152008-10-04 02:00:59 -04001031 again:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001032 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1033
Steven Rostedtbf41a152008-10-04 02:00:59 -04001034 /*
1035 * Only the first commit can update the timestamp.
1036 * Yes there is a race here. If an interrupt comes in
1037 * just after the conditional and it traces too, then it
1038 * will also check the deltas. More than one timestamp may
1039 * also be made. But only the entry that did the actual
1040 * commit will be something other than zero.
1041 */
1042 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1043 rb_page_write(cpu_buffer->tail_page) ==
1044 rb_commit_index(cpu_buffer)) {
1045
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001046 delta = ts - cpu_buffer->write_stamp;
1047
Steven Rostedtbf41a152008-10-04 02:00:59 -04001048 /* make sure this delta is calculated here */
1049 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001050
Steven Rostedtbf41a152008-10-04 02:00:59 -04001051 /* Did the write stamp get updated already? */
1052 if (unlikely(ts < cpu_buffer->write_stamp))
1053 goto again;
1054
1055 if (test_time_stamp(delta)) {
1056
1057 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1058
1059 if (commit == -EBUSY)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001060 return NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001061
1062 if (commit == -EAGAIN)
1063 goto again;
1064
1065 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001066 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001067 } else
1068 /* Non commits have zero deltas */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001069 delta = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001070
1071 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001072 if (PTR_ERR(event) == -EAGAIN)
1073 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001074
Steven Rostedtbf41a152008-10-04 02:00:59 -04001075 if (!event) {
1076 if (unlikely(commit))
1077 /*
1078 * Ouch! We needed a timestamp and it was commited. But
1079 * we didn't get our event reserved.
1080 */
1081 rb_set_commit_to_write(cpu_buffer);
1082 return NULL;
1083 }
1084
1085 /*
1086 * If the timestamp was commited, make the commit our entry
1087 * now so that we will update it when needed.
1088 */
1089 if (commit)
1090 rb_set_commit_event(cpu_buffer, event);
1091 else if (!rb_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001092 delta = 0;
1093
1094 event->time_delta = delta;
1095
1096 return event;
1097}
1098
Steven Rostedtbf41a152008-10-04 02:00:59 -04001099static DEFINE_PER_CPU(int, rb_need_resched);
1100
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001101/**
1102 * ring_buffer_lock_reserve - reserve a part of the buffer
1103 * @buffer: the ring buffer to reserve from
1104 * @length: the length of the data to reserve (excluding event header)
1105 * @flags: a pointer to save the interrupt flags
1106 *
1107 * Returns a reseverd event on the ring buffer to copy directly to.
1108 * The user of this interface will need to get the body to write into
1109 * and can use the ring_buffer_event_data() interface.
1110 *
1111 * The length is the length of the data needed, not the event length
1112 * which also includes the event header.
1113 *
1114 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1115 * If NULL is returned, then nothing has been allocated or locked.
1116 */
1117struct ring_buffer_event *
1118ring_buffer_lock_reserve(struct ring_buffer *buffer,
1119 unsigned long length,
1120 unsigned long *flags)
1121{
1122 struct ring_buffer_per_cpu *cpu_buffer;
1123 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001124 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001125
1126 if (atomic_read(&buffer->record_disabled))
1127 return NULL;
1128
Steven Rostedtbf41a152008-10-04 02:00:59 -04001129 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001130 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001131
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001132 cpu = raw_smp_processor_id();
1133
1134 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001135 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001136
1137 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001138
1139 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001140 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001141
1142 length = rb_calculate_event_length(length);
1143 if (length > BUF_PAGE_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001144 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001145
1146 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1147 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001148 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001149
Steven Rostedtbf41a152008-10-04 02:00:59 -04001150 /*
1151 * Need to store resched state on this cpu.
1152 * Only the first needs to.
1153 */
1154
1155 if (preempt_count() == 1)
1156 per_cpu(rb_need_resched, cpu) = resched;
1157
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001158 return event;
1159
Steven Rostedtd7690412008-10-01 00:29:53 -04001160 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001161 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001162 return NULL;
1163}
1164
1165static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1166 struct ring_buffer_event *event)
1167{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001168 cpu_buffer->entries++;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001169
1170 /* Only process further if we own the commit */
1171 if (!rb_is_commit(cpu_buffer, event))
1172 return;
1173
1174 cpu_buffer->write_stamp += event->time_delta;
1175
1176 rb_set_commit_to_write(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001177}
1178
1179/**
1180 * ring_buffer_unlock_commit - commit a reserved
1181 * @buffer: The buffer to commit to
1182 * @event: The event pointer to commit.
1183 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1184 *
1185 * This commits the data to the ring buffer, and releases any locks held.
1186 *
1187 * Must be paired with ring_buffer_lock_reserve.
1188 */
1189int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1190 struct ring_buffer_event *event,
1191 unsigned long flags)
1192{
1193 struct ring_buffer_per_cpu *cpu_buffer;
1194 int cpu = raw_smp_processor_id();
1195
1196 cpu_buffer = buffer->buffers[cpu];
1197
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001198 rb_commit(cpu_buffer, event);
1199
Steven Rostedtbf41a152008-10-04 02:00:59 -04001200 /*
1201 * Only the last preempt count needs to restore preemption.
1202 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001203 if (preempt_count() == 1)
1204 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1205 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04001206 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001207
1208 return 0;
1209}
1210
1211/**
1212 * ring_buffer_write - write data to the buffer without reserving
1213 * @buffer: The ring buffer to write to.
1214 * @length: The length of the data being written (excluding the event header)
1215 * @data: The data to write to the buffer.
1216 *
1217 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1218 * one function. If you already have the data to write to the buffer, it
1219 * may be easier to simply call this function.
1220 *
1221 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1222 * and not the length of the event which would hold the header.
1223 */
1224int ring_buffer_write(struct ring_buffer *buffer,
1225 unsigned long length,
1226 void *data)
1227{
1228 struct ring_buffer_per_cpu *cpu_buffer;
1229 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001230 unsigned long event_length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001231 void *body;
1232 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001233 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001234
1235 if (atomic_read(&buffer->record_disabled))
1236 return -EBUSY;
1237
Steven Rostedt182e9f52008-11-03 23:15:56 -05001238 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001239
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001240 cpu = raw_smp_processor_id();
1241
1242 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001243 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001244
1245 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001246
1247 if (atomic_read(&cpu_buffer->record_disabled))
1248 goto out;
1249
1250 event_length = rb_calculate_event_length(length);
1251 event = rb_reserve_next_event(cpu_buffer,
1252 RINGBUF_TYPE_DATA, event_length);
1253 if (!event)
1254 goto out;
1255
1256 body = rb_event_data(event);
1257
1258 memcpy(body, data, length);
1259
1260 rb_commit(cpu_buffer, event);
1261
1262 ret = 0;
1263 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001264 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001265
1266 return ret;
1267}
1268
Steven Rostedtbf41a152008-10-04 02:00:59 -04001269static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1270{
1271 struct buffer_page *reader = cpu_buffer->reader_page;
1272 struct buffer_page *head = cpu_buffer->head_page;
1273 struct buffer_page *commit = cpu_buffer->commit_page;
1274
1275 return reader->read == rb_page_commit(reader) &&
1276 (commit == reader ||
1277 (commit == head &&
1278 head->read == rb_page_commit(commit)));
1279}
1280
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001281/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001282 * ring_buffer_record_disable - stop all writes into the buffer
1283 * @buffer: The ring buffer to stop writes to.
1284 *
1285 * This prevents all writes to the buffer. Any attempt to write
1286 * to the buffer after this will fail and return NULL.
1287 *
1288 * The caller should call synchronize_sched() after this.
1289 */
1290void ring_buffer_record_disable(struct ring_buffer *buffer)
1291{
1292 atomic_inc(&buffer->record_disabled);
1293}
1294
1295/**
1296 * ring_buffer_record_enable - enable writes to the buffer
1297 * @buffer: The ring buffer to enable writes
1298 *
1299 * Note, multiple disables will need the same number of enables
1300 * to truely enable the writing (much like preempt_disable).
1301 */
1302void ring_buffer_record_enable(struct ring_buffer *buffer)
1303{
1304 atomic_dec(&buffer->record_disabled);
1305}
1306
1307/**
1308 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1309 * @buffer: The ring buffer to stop writes to.
1310 * @cpu: The CPU buffer to stop
1311 *
1312 * This prevents all writes to the buffer. Any attempt to write
1313 * to the buffer after this will fail and return NULL.
1314 *
1315 * The caller should call synchronize_sched() after this.
1316 */
1317void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1318{
1319 struct ring_buffer_per_cpu *cpu_buffer;
1320
1321 if (!cpu_isset(cpu, buffer->cpumask))
1322 return;
1323
1324 cpu_buffer = buffer->buffers[cpu];
1325 atomic_inc(&cpu_buffer->record_disabled);
1326}
1327
1328/**
1329 * ring_buffer_record_enable_cpu - enable writes to the buffer
1330 * @buffer: The ring buffer to enable writes
1331 * @cpu: The CPU to enable.
1332 *
1333 * Note, multiple disables will need the same number of enables
1334 * to truely enable the writing (much like preempt_disable).
1335 */
1336void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1337{
1338 struct ring_buffer_per_cpu *cpu_buffer;
1339
1340 if (!cpu_isset(cpu, buffer->cpumask))
1341 return;
1342
1343 cpu_buffer = buffer->buffers[cpu];
1344 atomic_dec(&cpu_buffer->record_disabled);
1345}
1346
1347/**
1348 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1349 * @buffer: The ring buffer
1350 * @cpu: The per CPU buffer to get the entries from.
1351 */
1352unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1353{
1354 struct ring_buffer_per_cpu *cpu_buffer;
1355
1356 if (!cpu_isset(cpu, buffer->cpumask))
1357 return 0;
1358
1359 cpu_buffer = buffer->buffers[cpu];
1360 return cpu_buffer->entries;
1361}
1362
1363/**
1364 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1365 * @buffer: The ring buffer
1366 * @cpu: The per CPU buffer to get the number of overruns from
1367 */
1368unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1369{
1370 struct ring_buffer_per_cpu *cpu_buffer;
1371
1372 if (!cpu_isset(cpu, buffer->cpumask))
1373 return 0;
1374
1375 cpu_buffer = buffer->buffers[cpu];
1376 return cpu_buffer->overrun;
1377}
1378
1379/**
1380 * ring_buffer_entries - get the number of entries in a buffer
1381 * @buffer: The ring buffer
1382 *
1383 * Returns the total number of entries in the ring buffer
1384 * (all CPU entries)
1385 */
1386unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1387{
1388 struct ring_buffer_per_cpu *cpu_buffer;
1389 unsigned long entries = 0;
1390 int cpu;
1391
1392 /* if you care about this being correct, lock the buffer */
1393 for_each_buffer_cpu(buffer, cpu) {
1394 cpu_buffer = buffer->buffers[cpu];
1395 entries += cpu_buffer->entries;
1396 }
1397
1398 return entries;
1399}
1400
1401/**
1402 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1403 * @buffer: The ring buffer
1404 *
1405 * Returns the total number of overruns in the ring buffer
1406 * (all CPU entries)
1407 */
1408unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1409{
1410 struct ring_buffer_per_cpu *cpu_buffer;
1411 unsigned long overruns = 0;
1412 int cpu;
1413
1414 /* if you care about this being correct, lock the buffer */
1415 for_each_buffer_cpu(buffer, cpu) {
1416 cpu_buffer = buffer->buffers[cpu];
1417 overruns += cpu_buffer->overrun;
1418 }
1419
1420 return overruns;
1421}
1422
1423/**
1424 * ring_buffer_iter_reset - reset an iterator
1425 * @iter: The iterator to reset
1426 *
1427 * Resets the iterator, so that it will start from the beginning
1428 * again.
1429 */
1430void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1431{
1432 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1433
Steven Rostedtd7690412008-10-01 00:29:53 -04001434 /* Iterator usage is expected to have record disabled */
1435 if (list_empty(&cpu_buffer->reader_page->list)) {
1436 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001437 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001438 } else {
1439 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001440 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001441 }
1442 if (iter->head)
1443 iter->read_stamp = cpu_buffer->read_stamp;
1444 else
1445 iter->read_stamp = iter->head_page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001446}
1447
1448/**
1449 * ring_buffer_iter_empty - check if an iterator has no more to read
1450 * @iter: The iterator to check
1451 */
1452int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1453{
1454 struct ring_buffer_per_cpu *cpu_buffer;
1455
1456 cpu_buffer = iter->cpu_buffer;
1457
Steven Rostedtbf41a152008-10-04 02:00:59 -04001458 return iter->head_page == cpu_buffer->commit_page &&
1459 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001460}
1461
1462static void
1463rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1464 struct ring_buffer_event *event)
1465{
1466 u64 delta;
1467
1468 switch (event->type) {
1469 case RINGBUF_TYPE_PADDING:
1470 return;
1471
1472 case RINGBUF_TYPE_TIME_EXTEND:
1473 delta = event->array[0];
1474 delta <<= TS_SHIFT;
1475 delta += event->time_delta;
1476 cpu_buffer->read_stamp += delta;
1477 return;
1478
1479 case RINGBUF_TYPE_TIME_STAMP:
1480 /* FIXME: not implemented */
1481 return;
1482
1483 case RINGBUF_TYPE_DATA:
1484 cpu_buffer->read_stamp += event->time_delta;
1485 return;
1486
1487 default:
1488 BUG();
1489 }
1490 return;
1491}
1492
1493static void
1494rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1495 struct ring_buffer_event *event)
1496{
1497 u64 delta;
1498
1499 switch (event->type) {
1500 case RINGBUF_TYPE_PADDING:
1501 return;
1502
1503 case RINGBUF_TYPE_TIME_EXTEND:
1504 delta = event->array[0];
1505 delta <<= TS_SHIFT;
1506 delta += event->time_delta;
1507 iter->read_stamp += delta;
1508 return;
1509
1510 case RINGBUF_TYPE_TIME_STAMP:
1511 /* FIXME: not implemented */
1512 return;
1513
1514 case RINGBUF_TYPE_DATA:
1515 iter->read_stamp += event->time_delta;
1516 return;
1517
1518 default:
1519 BUG();
1520 }
1521 return;
1522}
1523
Steven Rostedtd7690412008-10-01 00:29:53 -04001524static struct buffer_page *
1525rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001526{
Steven Rostedtd7690412008-10-01 00:29:53 -04001527 struct buffer_page *reader = NULL;
1528 unsigned long flags;
1529
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001530 local_irq_save(flags);
1531 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04001532
1533 again:
1534 reader = cpu_buffer->reader_page;
1535
1536 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001537 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04001538 goto out;
1539
1540 /* Never should we have an index greater than the size */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001541 RB_WARN_ON(cpu_buffer,
1542 cpu_buffer->reader_page->read > rb_page_size(reader));
Steven Rostedtd7690412008-10-01 00:29:53 -04001543
1544 /* check if we caught up to the tail */
1545 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001546 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04001547 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001548
1549 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04001550 * Splice the empty reader page into the list around the head.
1551 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001552 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001553
Steven Rostedtd7690412008-10-01 00:29:53 -04001554 reader = cpu_buffer->head_page;
1555 cpu_buffer->reader_page->list.next = reader->list.next;
1556 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001557
1558 local_set(&cpu_buffer->reader_page->write, 0);
1559 local_set(&cpu_buffer->reader_page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04001560
1561 /* Make the reader page now replace the head */
1562 reader->list.prev->next = &cpu_buffer->reader_page->list;
1563 reader->list.next->prev = &cpu_buffer->reader_page->list;
1564
1565 /*
1566 * If the tail is on the reader, then we must set the head
1567 * to the inserted page, otherwise we set it one before.
1568 */
1569 cpu_buffer->head_page = cpu_buffer->reader_page;
1570
Steven Rostedtbf41a152008-10-04 02:00:59 -04001571 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04001572 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1573
1574 /* Finally update the reader page to the new head */
1575 cpu_buffer->reader_page = reader;
1576 rb_reset_reader_page(cpu_buffer);
1577
1578 goto again;
1579
1580 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001581 __raw_spin_unlock(&cpu_buffer->lock);
1582 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04001583
1584 return reader;
1585}
1586
1587static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1588{
1589 struct ring_buffer_event *event;
1590 struct buffer_page *reader;
1591 unsigned length;
1592
1593 reader = rb_get_reader_page(cpu_buffer);
1594
1595 /* This function should not be called when buffer is empty */
1596 BUG_ON(!reader);
1597
1598 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001599
1600 if (event->type == RINGBUF_TYPE_DATA)
1601 cpu_buffer->entries--;
1602
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001603 rb_update_read_stamp(cpu_buffer, event);
1604
Steven Rostedtd7690412008-10-01 00:29:53 -04001605 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001606 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001607}
1608
1609static void rb_advance_iter(struct ring_buffer_iter *iter)
1610{
1611 struct ring_buffer *buffer;
1612 struct ring_buffer_per_cpu *cpu_buffer;
1613 struct ring_buffer_event *event;
1614 unsigned length;
1615
1616 cpu_buffer = iter->cpu_buffer;
1617 buffer = cpu_buffer->buffer;
1618
1619 /*
1620 * Check if we are at the end of the buffer.
1621 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001622 if (iter->head >= rb_page_size(iter->head_page)) {
1623 BUG_ON(iter->head_page == cpu_buffer->commit_page);
Steven Rostedtd7690412008-10-01 00:29:53 -04001624 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001625 return;
1626 }
1627
1628 event = rb_iter_head_event(iter);
1629
1630 length = rb_event_length(event);
1631
1632 /*
1633 * This should not be called to advance the header if we are
1634 * at the tail of the buffer.
1635 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001636 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1637 (iter->head + length > rb_commit_index(cpu_buffer)));
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001638
1639 rb_update_iter_read_stamp(iter, event);
1640
1641 iter->head += length;
1642
1643 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001644 if ((iter->head >= rb_page_size(iter->head_page)) &&
1645 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001646 rb_advance_iter(iter);
1647}
1648
1649/**
1650 * ring_buffer_peek - peek at the next event to be read
1651 * @buffer: The ring buffer to read
1652 * @cpu: The cpu to peak at
1653 * @ts: The timestamp counter of this event.
1654 *
1655 * This will return the event that will be read next, but does
1656 * not consume the data.
1657 */
1658struct ring_buffer_event *
1659ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1660{
1661 struct ring_buffer_per_cpu *cpu_buffer;
1662 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04001663 struct buffer_page *reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001664
1665 if (!cpu_isset(cpu, buffer->cpumask))
1666 return NULL;
1667
1668 cpu_buffer = buffer->buffers[cpu];
1669
1670 again:
Steven Rostedtd7690412008-10-01 00:29:53 -04001671 reader = rb_get_reader_page(cpu_buffer);
1672 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001673 return NULL;
1674
Steven Rostedtd7690412008-10-01 00:29:53 -04001675 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001676
1677 switch (event->type) {
1678 case RINGBUF_TYPE_PADDING:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001679 RB_WARN_ON(cpu_buffer, 1);
Steven Rostedtd7690412008-10-01 00:29:53 -04001680 rb_advance_reader(cpu_buffer);
1681 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001682
1683 case RINGBUF_TYPE_TIME_EXTEND:
1684 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04001685 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001686 goto again;
1687
1688 case RINGBUF_TYPE_TIME_STAMP:
1689 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04001690 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001691 goto again;
1692
1693 case RINGBUF_TYPE_DATA:
1694 if (ts) {
1695 *ts = cpu_buffer->read_stamp + event->time_delta;
1696 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1697 }
1698 return event;
1699
1700 default:
1701 BUG();
1702 }
1703
1704 return NULL;
1705}
1706
1707/**
1708 * ring_buffer_iter_peek - peek at the next event to be read
1709 * @iter: The ring buffer iterator
1710 * @ts: The timestamp counter of this event.
1711 *
1712 * This will return the event that will be read next, but does
1713 * not increment the iterator.
1714 */
1715struct ring_buffer_event *
1716ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1717{
1718 struct ring_buffer *buffer;
1719 struct ring_buffer_per_cpu *cpu_buffer;
1720 struct ring_buffer_event *event;
1721
1722 if (ring_buffer_iter_empty(iter))
1723 return NULL;
1724
1725 cpu_buffer = iter->cpu_buffer;
1726 buffer = cpu_buffer->buffer;
1727
1728 again:
1729 if (rb_per_cpu_empty(cpu_buffer))
1730 return NULL;
1731
1732 event = rb_iter_head_event(iter);
1733
1734 switch (event->type) {
1735 case RINGBUF_TYPE_PADDING:
Steven Rostedtd7690412008-10-01 00:29:53 -04001736 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001737 goto again;
1738
1739 case RINGBUF_TYPE_TIME_EXTEND:
1740 /* Internal data, OK to advance */
1741 rb_advance_iter(iter);
1742 goto again;
1743
1744 case RINGBUF_TYPE_TIME_STAMP:
1745 /* FIXME: not implemented */
1746 rb_advance_iter(iter);
1747 goto again;
1748
1749 case RINGBUF_TYPE_DATA:
1750 if (ts) {
1751 *ts = iter->read_stamp + event->time_delta;
1752 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1753 }
1754 return event;
1755
1756 default:
1757 BUG();
1758 }
1759
1760 return NULL;
1761}
1762
1763/**
1764 * ring_buffer_consume - return an event and consume it
1765 * @buffer: The ring buffer to get the next event from
1766 *
1767 * Returns the next event in the ring buffer, and that event is consumed.
1768 * Meaning, that sequential reads will keep returning a different event,
1769 * and eventually empty the ring buffer if the producer is slower.
1770 */
1771struct ring_buffer_event *
1772ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1773{
1774 struct ring_buffer_per_cpu *cpu_buffer;
1775 struct ring_buffer_event *event;
1776
1777 if (!cpu_isset(cpu, buffer->cpumask))
1778 return NULL;
1779
1780 event = ring_buffer_peek(buffer, cpu, ts);
1781 if (!event)
1782 return NULL;
1783
1784 cpu_buffer = buffer->buffers[cpu];
Steven Rostedtd7690412008-10-01 00:29:53 -04001785 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001786
1787 return event;
1788}
1789
1790/**
1791 * ring_buffer_read_start - start a non consuming read of the buffer
1792 * @buffer: The ring buffer to read from
1793 * @cpu: The cpu buffer to iterate over
1794 *
1795 * This starts up an iteration through the buffer. It also disables
1796 * the recording to the buffer until the reading is finished.
1797 * This prevents the reading from being corrupted. This is not
1798 * a consuming read, so a producer is not expected.
1799 *
1800 * Must be paired with ring_buffer_finish.
1801 */
1802struct ring_buffer_iter *
1803ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1804{
1805 struct ring_buffer_per_cpu *cpu_buffer;
1806 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04001807 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001808
1809 if (!cpu_isset(cpu, buffer->cpumask))
1810 return NULL;
1811
1812 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1813 if (!iter)
1814 return NULL;
1815
1816 cpu_buffer = buffer->buffers[cpu];
1817
1818 iter->cpu_buffer = cpu_buffer;
1819
1820 atomic_inc(&cpu_buffer->record_disabled);
1821 synchronize_sched();
1822
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001823 local_irq_save(flags);
1824 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04001825 ring_buffer_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001826 __raw_spin_unlock(&cpu_buffer->lock);
1827 local_irq_restore(flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001828
1829 return iter;
1830}
1831
1832/**
1833 * ring_buffer_finish - finish reading the iterator of the buffer
1834 * @iter: The iterator retrieved by ring_buffer_start
1835 *
1836 * This re-enables the recording to the buffer, and frees the
1837 * iterator.
1838 */
1839void
1840ring_buffer_read_finish(struct ring_buffer_iter *iter)
1841{
1842 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1843
1844 atomic_dec(&cpu_buffer->record_disabled);
1845 kfree(iter);
1846}
1847
1848/**
1849 * ring_buffer_read - read the next item in the ring buffer by the iterator
1850 * @iter: The ring buffer iterator
1851 * @ts: The time stamp of the event read.
1852 *
1853 * This reads the next event in the ring buffer and increments the iterator.
1854 */
1855struct ring_buffer_event *
1856ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1857{
1858 struct ring_buffer_event *event;
1859
1860 event = ring_buffer_iter_peek(iter, ts);
1861 if (!event)
1862 return NULL;
1863
1864 rb_advance_iter(iter);
1865
1866 return event;
1867}
1868
1869/**
1870 * ring_buffer_size - return the size of the ring buffer (in bytes)
1871 * @buffer: The ring buffer.
1872 */
1873unsigned long ring_buffer_size(struct ring_buffer *buffer)
1874{
1875 return BUF_PAGE_SIZE * buffer->pages;
1876}
1877
1878static void
1879rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1880{
1881 cpu_buffer->head_page
1882 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001883 local_set(&cpu_buffer->head_page->write, 0);
1884 local_set(&cpu_buffer->head_page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001885
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001886 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001887
1888 cpu_buffer->tail_page = cpu_buffer->head_page;
1889 cpu_buffer->commit_page = cpu_buffer->head_page;
1890
1891 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1892 local_set(&cpu_buffer->reader_page->write, 0);
1893 local_set(&cpu_buffer->reader_page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001894 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001895
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001896 cpu_buffer->overrun = 0;
1897 cpu_buffer->entries = 0;
1898}
1899
1900/**
1901 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1902 * @buffer: The ring buffer to reset a per cpu buffer of
1903 * @cpu: The CPU buffer to be reset
1904 */
1905void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1906{
1907 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1908 unsigned long flags;
1909
1910 if (!cpu_isset(cpu, buffer->cpumask))
1911 return;
1912
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001913 local_irq_save(flags);
1914 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001915
1916 rb_reset_cpu(cpu_buffer);
1917
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001918 __raw_spin_unlock(&cpu_buffer->lock);
1919 local_irq_restore(flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001920}
1921
1922/**
1923 * ring_buffer_reset - reset a ring buffer
1924 * @buffer: The ring buffer to reset all cpu buffers
1925 */
1926void ring_buffer_reset(struct ring_buffer *buffer)
1927{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001928 int cpu;
1929
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001930 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04001931 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001932}
1933
1934/**
1935 * rind_buffer_empty - is the ring buffer empty?
1936 * @buffer: The ring buffer to test
1937 */
1938int ring_buffer_empty(struct ring_buffer *buffer)
1939{
1940 struct ring_buffer_per_cpu *cpu_buffer;
1941 int cpu;
1942
1943 /* yes this is racy, but if you don't like the race, lock the buffer */
1944 for_each_buffer_cpu(buffer, cpu) {
1945 cpu_buffer = buffer->buffers[cpu];
1946 if (!rb_per_cpu_empty(cpu_buffer))
1947 return 0;
1948 }
1949 return 1;
1950}
1951
1952/**
1953 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
1954 * @buffer: The ring buffer
1955 * @cpu: The CPU buffer to test
1956 */
1957int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
1958{
1959 struct ring_buffer_per_cpu *cpu_buffer;
1960
1961 if (!cpu_isset(cpu, buffer->cpumask))
1962 return 1;
1963
1964 cpu_buffer = buffer->buffers[cpu];
1965 return rb_per_cpu_empty(cpu_buffer);
1966}
1967
1968/**
1969 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
1970 * @buffer_a: One buffer to swap with
1971 * @buffer_b: The other buffer to swap with
1972 *
1973 * This function is useful for tracers that want to take a "snapshot"
1974 * of a CPU buffer and has another back up buffer lying around.
1975 * it is expected that the tracer handles the cpu buffer not being
1976 * used at the moment.
1977 */
1978int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
1979 struct ring_buffer *buffer_b, int cpu)
1980{
1981 struct ring_buffer_per_cpu *cpu_buffer_a;
1982 struct ring_buffer_per_cpu *cpu_buffer_b;
1983
1984 if (!cpu_isset(cpu, buffer_a->cpumask) ||
1985 !cpu_isset(cpu, buffer_b->cpumask))
1986 return -EINVAL;
1987
1988 /* At least make sure the two buffers are somewhat the same */
1989 if (buffer_a->size != buffer_b->size ||
1990 buffer_a->pages != buffer_b->pages)
1991 return -EINVAL;
1992
1993 cpu_buffer_a = buffer_a->buffers[cpu];
1994 cpu_buffer_b = buffer_b->buffers[cpu];
1995
1996 /*
1997 * We can't do a synchronize_sched here because this
1998 * function can be called in atomic context.
1999 * Normally this will be called from the same CPU as cpu.
2000 * If not it's up to the caller to protect this.
2001 */
2002 atomic_inc(&cpu_buffer_a->record_disabled);
2003 atomic_inc(&cpu_buffer_b->record_disabled);
2004
2005 buffer_a->buffers[cpu] = cpu_buffer_b;
2006 buffer_b->buffers[cpu] = cpu_buffer_a;
2007
2008 cpu_buffer_b->buffer = buffer_a;
2009 cpu_buffer_a->buffer = buffer_b;
2010
2011 atomic_dec(&cpu_buffer_a->record_disabled);
2012 atomic_dec(&cpu_buffer_b->record_disabled);
2013
2014 return 0;
2015}
2016