blob: 8c5cacb25a184da5ad949f559c050e1b8cf55acd [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
Steven Rostedt182e9f52008-11-03 23:15:56 -050019#include "trace.h"
20
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040021/* Up this if you want to test the TIME_EXTENTS and normalization */
22#define DEBUG_SHIFT 0
23
24/* FIXME!!! */
25u64 ring_buffer_time_stamp(int cpu)
26{
27 /* shift to debug/test normalization and TIME_EXTENTS */
28 return sched_clock() << DEBUG_SHIFT;
29}
30
31void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
32{
33 /* Just stupid testing the normalize function and deltas */
34 *ts >>= DEBUG_SHIFT;
35}
36
37#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
38#define RB_ALIGNMENT_SHIFT 2
39#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
40#define RB_MAX_SMALL_DATA 28
41
42enum {
43 RB_LEN_TIME_EXTEND = 8,
44 RB_LEN_TIME_STAMP = 16,
45};
46
47/* inline for ring buffer fast paths */
48static inline unsigned
49rb_event_length(struct ring_buffer_event *event)
50{
51 unsigned length;
52
53 switch (event->type) {
54 case RINGBUF_TYPE_PADDING:
55 /* undefined */
56 return -1;
57
58 case RINGBUF_TYPE_TIME_EXTEND:
59 return RB_LEN_TIME_EXTEND;
60
61 case RINGBUF_TYPE_TIME_STAMP:
62 return RB_LEN_TIME_STAMP;
63
64 case RINGBUF_TYPE_DATA:
65 if (event->len)
66 length = event->len << RB_ALIGNMENT_SHIFT;
67 else
68 length = event->array[0];
69 return length + RB_EVNT_HDR_SIZE;
70 default:
71 BUG();
72 }
73 /* not hit */
74 return 0;
75}
76
77/**
78 * ring_buffer_event_length - return the length of the event
79 * @event: the event to get the length of
80 */
81unsigned ring_buffer_event_length(struct ring_buffer_event *event)
82{
83 return rb_event_length(event);
84}
85
86/* inline for ring buffer fast paths */
87static inline void *
88rb_event_data(struct ring_buffer_event *event)
89{
90 BUG_ON(event->type != RINGBUF_TYPE_DATA);
91 /* If length is in len field, then array[0] has the data */
92 if (event->len)
93 return (void *)&event->array[0];
94 /* Otherwise length is in array[0] and array[1] has the data */
95 return (void *)&event->array[1];
96}
97
98/**
99 * ring_buffer_event_data - return the data of the event
100 * @event: the event to get the data from
101 */
102void *ring_buffer_event_data(struct ring_buffer_event *event)
103{
104 return rb_event_data(event);
105}
106
107#define for_each_buffer_cpu(buffer, cpu) \
108 for_each_cpu_mask(cpu, buffer->cpumask)
109
110#define TS_SHIFT 27
111#define TS_MASK ((1ULL << TS_SHIFT) - 1)
112#define TS_DELTA_TEST (~TS_MASK)
113
114/*
115 * This hack stolen from mm/slob.c.
116 * We can store per page timing information in the page frame of the page.
117 * Thanks to Peter Zijlstra for suggesting this idea.
118 */
119struct buffer_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400120 u64 time_stamp; /* page time stamp */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400121 local_t write; /* index for next write */
122 local_t commit; /* write commited index */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400123 unsigned read; /* index for next read */
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400124 struct list_head list; /* list of free pages */
125 void *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400126};
127
128/*
Steven Rostedted568292008-09-29 23:02:40 -0400129 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
130 * this issue out.
131 */
132static inline void free_buffer_page(struct buffer_page *bpage)
133{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400134 if (bpage->page)
Steven Rostedt6ae2a072008-10-13 10:22:06 -0400135 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400136 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400137}
138
139/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400140 * We need to fit the time_stamp delta into 27 bits.
141 */
142static inline int test_time_stamp(u64 delta)
143{
144 if (delta & TS_DELTA_TEST)
145 return 1;
146 return 0;
147}
148
149#define BUF_PAGE_SIZE PAGE_SIZE
150
151/*
152 * head_page == tail_page && head == tail then buffer is empty.
153 */
154struct ring_buffer_per_cpu {
155 int cpu;
156 struct ring_buffer *buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100157 spinlock_t reader_lock; /* serialize readers */
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500158 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400159 struct lock_class_key lock_key;
160 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400161 struct buffer_page *head_page; /* read from head */
162 struct buffer_page *tail_page; /* write to tail */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400163 struct buffer_page *commit_page; /* commited pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400164 struct buffer_page *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400165 unsigned long overrun;
166 unsigned long entries;
167 u64 write_stamp;
168 u64 read_stamp;
169 atomic_t record_disabled;
170};
171
172struct ring_buffer {
173 unsigned long size;
174 unsigned pages;
175 unsigned flags;
176 int cpus;
177 cpumask_t cpumask;
178 atomic_t record_disabled;
179
180 struct mutex mutex;
181
182 struct ring_buffer_per_cpu **buffers;
183};
184
185struct ring_buffer_iter {
186 struct ring_buffer_per_cpu *cpu_buffer;
187 unsigned long head;
188 struct buffer_page *head_page;
189 u64 read_stamp;
190};
191
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500192/* buffer may be either ring_buffer or ring_buffer_per_cpu */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400193#define RB_WARN_ON(buffer, cond) \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500194 ({ \
195 int _____ret = unlikely(cond); \
196 if (_____ret) { \
Steven Rostedtbf41a152008-10-04 02:00:59 -0400197 atomic_inc(&buffer->record_disabled); \
198 WARN_ON(1); \
199 } \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500200 _____ret; \
201 })
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500202
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400203/**
204 * check_pages - integrity check of buffer pages
205 * @cpu_buffer: CPU buffer with pages to test
206 *
207 * As a safty measure we check to make sure the data pages have not
208 * been corrupted.
209 */
210static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
211{
212 struct list_head *head = &cpu_buffer->pages;
213 struct buffer_page *page, *tmp;
214
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500215 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
216 return -1;
217 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
218 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400219
220 list_for_each_entry_safe(page, tmp, head, list) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500221 if (RB_WARN_ON(cpu_buffer,
222 page->list.next->prev != &page->list))
223 return -1;
224 if (RB_WARN_ON(cpu_buffer,
225 page->list.prev->next != &page->list))
226 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400227 }
228
229 return 0;
230}
231
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400232static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
233 unsigned nr_pages)
234{
235 struct list_head *head = &cpu_buffer->pages;
236 struct buffer_page *page, *tmp;
237 unsigned long addr;
238 LIST_HEAD(pages);
239 unsigned i;
240
241 for (i = 0; i < nr_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400242 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400243 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400244 if (!page)
245 goto free_pages;
246 list_add(&page->list, &pages);
247
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400248 addr = __get_free_page(GFP_KERNEL);
249 if (!addr)
250 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400251 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400252 }
253
254 list_splice(&pages, head);
255
256 rb_check_pages(cpu_buffer);
257
258 return 0;
259
260 free_pages:
261 list_for_each_entry_safe(page, tmp, &pages, list) {
262 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400263 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400264 }
265 return -ENOMEM;
266}
267
268static struct ring_buffer_per_cpu *
269rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
270{
271 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400272 struct buffer_page *page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400273 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400274 int ret;
275
276 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
277 GFP_KERNEL, cpu_to_node(cpu));
278 if (!cpu_buffer)
279 return NULL;
280
281 cpu_buffer->cpu = cpu;
282 cpu_buffer->buffer = buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100283 spin_lock_init(&cpu_buffer->reader_lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500284 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400285 INIT_LIST_HEAD(&cpu_buffer->pages);
286
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400287 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
288 GFP_KERNEL, cpu_to_node(cpu));
289 if (!page)
290 goto fail_free_buffer;
291
292 cpu_buffer->reader_page = page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400293 addr = __get_free_page(GFP_KERNEL);
294 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400295 goto fail_free_reader;
296 page->page = (void *)addr;
297
Steven Rostedtd7690412008-10-01 00:29:53 -0400298 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400299
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400300 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
301 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400302 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400303
304 cpu_buffer->head_page
305 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400306 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400307
308 return cpu_buffer;
309
Steven Rostedtd7690412008-10-01 00:29:53 -0400310 fail_free_reader:
311 free_buffer_page(cpu_buffer->reader_page);
312
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400313 fail_free_buffer:
314 kfree(cpu_buffer);
315 return NULL;
316}
317
318static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
319{
320 struct list_head *head = &cpu_buffer->pages;
321 struct buffer_page *page, *tmp;
322
Steven Rostedtd7690412008-10-01 00:29:53 -0400323 list_del_init(&cpu_buffer->reader_page->list);
324 free_buffer_page(cpu_buffer->reader_page);
325
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400326 list_for_each_entry_safe(page, tmp, head, list) {
327 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400328 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400329 }
330 kfree(cpu_buffer);
331}
332
Steven Rostedta7b13742008-09-29 23:02:39 -0400333/*
334 * Causes compile errors if the struct buffer_page gets bigger
335 * than the struct page.
336 */
337extern int ring_buffer_page_too_big(void);
338
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400339/**
340 * ring_buffer_alloc - allocate a new ring_buffer
341 * @size: the size in bytes that is needed.
342 * @flags: attributes to set for the ring buffer.
343 *
344 * Currently the only flag that is available is the RB_FL_OVERWRITE
345 * flag. This flag means that the buffer will overwrite old data
346 * when the buffer wraps. If this flag is not set, the buffer will
347 * drop data when the tail hits the head.
348 */
349struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
350{
351 struct ring_buffer *buffer;
352 int bsize;
353 int cpu;
354
Steven Rostedta7b13742008-09-29 23:02:39 -0400355 /* Paranoid! Optimizes out when all is well */
356 if (sizeof(struct buffer_page) > sizeof(struct page))
357 ring_buffer_page_too_big();
358
359
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400360 /* keep it in its own cache line */
361 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
362 GFP_KERNEL);
363 if (!buffer)
364 return NULL;
365
366 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
367 buffer->flags = flags;
368
369 /* need at least two pages */
370 if (buffer->pages == 1)
371 buffer->pages++;
372
373 buffer->cpumask = cpu_possible_map;
374 buffer->cpus = nr_cpu_ids;
375
376 bsize = sizeof(void *) * nr_cpu_ids;
377 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
378 GFP_KERNEL);
379 if (!buffer->buffers)
380 goto fail_free_buffer;
381
382 for_each_buffer_cpu(buffer, cpu) {
383 buffer->buffers[cpu] =
384 rb_allocate_cpu_buffer(buffer, cpu);
385 if (!buffer->buffers[cpu])
386 goto fail_free_buffers;
387 }
388
389 mutex_init(&buffer->mutex);
390
391 return buffer;
392
393 fail_free_buffers:
394 for_each_buffer_cpu(buffer, cpu) {
395 if (buffer->buffers[cpu])
396 rb_free_cpu_buffer(buffer->buffers[cpu]);
397 }
398 kfree(buffer->buffers);
399
400 fail_free_buffer:
401 kfree(buffer);
402 return NULL;
403}
404
405/**
406 * ring_buffer_free - free a ring buffer.
407 * @buffer: the buffer to free.
408 */
409void
410ring_buffer_free(struct ring_buffer *buffer)
411{
412 int cpu;
413
414 for_each_buffer_cpu(buffer, cpu)
415 rb_free_cpu_buffer(buffer->buffers[cpu]);
416
417 kfree(buffer);
418}
419
420static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
421
422static void
423rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
424{
425 struct buffer_page *page;
426 struct list_head *p;
427 unsigned i;
428
429 atomic_inc(&cpu_buffer->record_disabled);
430 synchronize_sched();
431
432 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500433 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
434 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400435 p = cpu_buffer->pages.next;
436 page = list_entry(p, struct buffer_page, list);
437 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400438 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400439 }
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500440 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
441 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400442
443 rb_reset_cpu(cpu_buffer);
444
445 rb_check_pages(cpu_buffer);
446
447 atomic_dec(&cpu_buffer->record_disabled);
448
449}
450
451static void
452rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
453 struct list_head *pages, unsigned nr_pages)
454{
455 struct buffer_page *page;
456 struct list_head *p;
457 unsigned i;
458
459 atomic_inc(&cpu_buffer->record_disabled);
460 synchronize_sched();
461
462 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500463 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
464 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400465 p = pages->next;
466 page = list_entry(p, struct buffer_page, list);
467 list_del_init(&page->list);
468 list_add_tail(&page->list, &cpu_buffer->pages);
469 }
470 rb_reset_cpu(cpu_buffer);
471
472 rb_check_pages(cpu_buffer);
473
474 atomic_dec(&cpu_buffer->record_disabled);
475}
476
477/**
478 * ring_buffer_resize - resize the ring buffer
479 * @buffer: the buffer to resize.
480 * @size: the new size.
481 *
482 * The tracer is responsible for making sure that the buffer is
483 * not being used while changing the size.
484 * Note: We may be able to change the above requirement by using
485 * RCU synchronizations.
486 *
487 * Minimum size is 2 * BUF_PAGE_SIZE.
488 *
489 * Returns -1 on failure.
490 */
491int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
492{
493 struct ring_buffer_per_cpu *cpu_buffer;
494 unsigned nr_pages, rm_pages, new_pages;
495 struct buffer_page *page, *tmp;
496 unsigned long buffer_size;
497 unsigned long addr;
498 LIST_HEAD(pages);
499 int i, cpu;
500
501 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
502 size *= BUF_PAGE_SIZE;
503 buffer_size = buffer->pages * BUF_PAGE_SIZE;
504
505 /* we need a minimum of two pages */
506 if (size < BUF_PAGE_SIZE * 2)
507 size = BUF_PAGE_SIZE * 2;
508
509 if (size == buffer_size)
510 return size;
511
512 mutex_lock(&buffer->mutex);
513
514 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
515
516 if (size < buffer_size) {
517
518 /* easy case, just free pages */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500519 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
520 mutex_unlock(&buffer->mutex);
521 return -1;
522 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400523
524 rm_pages = buffer->pages - nr_pages;
525
526 for_each_buffer_cpu(buffer, cpu) {
527 cpu_buffer = buffer->buffers[cpu];
528 rb_remove_pages(cpu_buffer, rm_pages);
529 }
530 goto out;
531 }
532
533 /*
534 * This is a bit more difficult. We only want to add pages
535 * when we can allocate enough for all CPUs. We do this
536 * by allocating all the pages and storing them on a local
537 * link list. If we succeed in our allocation, then we
538 * add these pages to the cpu_buffers. Otherwise we just free
539 * them all and return -ENOMEM;
540 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500541 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
542 mutex_unlock(&buffer->mutex);
543 return -1;
544 }
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500545
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400546 new_pages = nr_pages - buffer->pages;
547
548 for_each_buffer_cpu(buffer, cpu) {
549 for (i = 0; i < new_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400550 page = kzalloc_node(ALIGN(sizeof(*page),
551 cache_line_size()),
552 GFP_KERNEL, cpu_to_node(cpu));
553 if (!page)
554 goto free_pages;
555 list_add(&page->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400556 addr = __get_free_page(GFP_KERNEL);
557 if (!addr)
558 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400559 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400560 }
561 }
562
563 for_each_buffer_cpu(buffer, cpu) {
564 cpu_buffer = buffer->buffers[cpu];
565 rb_insert_pages(cpu_buffer, &pages, new_pages);
566 }
567
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500568 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
569 mutex_unlock(&buffer->mutex);
570 return -1;
571 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400572
573 out:
574 buffer->pages = nr_pages;
575 mutex_unlock(&buffer->mutex);
576
577 return size;
578
579 free_pages:
580 list_for_each_entry_safe(page, tmp, &pages, list) {
581 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400582 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400583 }
584 return -ENOMEM;
585}
586
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400587static inline int rb_null_event(struct ring_buffer_event *event)
588{
589 return event->type == RINGBUF_TYPE_PADDING;
590}
591
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400592static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400593{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400594 return page->page + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400595}
596
597static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400598rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400599{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400600 return __rb_page_index(cpu_buffer->reader_page,
601 cpu_buffer->reader_page->read);
602}
603
604static inline struct ring_buffer_event *
605rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
606{
607 return __rb_page_index(cpu_buffer->head_page,
608 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400609}
610
611static inline struct ring_buffer_event *
612rb_iter_head_event(struct ring_buffer_iter *iter)
613{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400614 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400615}
616
Steven Rostedtbf41a152008-10-04 02:00:59 -0400617static inline unsigned rb_page_write(struct buffer_page *bpage)
618{
619 return local_read(&bpage->write);
620}
621
622static inline unsigned rb_page_commit(struct buffer_page *bpage)
623{
624 return local_read(&bpage->commit);
625}
626
627/* Size is determined by what has been commited */
628static inline unsigned rb_page_size(struct buffer_page *bpage)
629{
630 return rb_page_commit(bpage);
631}
632
633static inline unsigned
634rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
635{
636 return rb_page_commit(cpu_buffer->commit_page);
637}
638
639static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
640{
641 return rb_page_commit(cpu_buffer->head_page);
642}
643
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400644/*
645 * When the tail hits the head and the buffer is in overwrite mode,
646 * the head jumps to the next page and all content on the previous
647 * page is discarded. But before doing so, we update the overrun
648 * variable of the buffer.
649 */
650static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
651{
652 struct ring_buffer_event *event;
653 unsigned long head;
654
655 for (head = 0; head < rb_head_size(cpu_buffer);
656 head += rb_event_length(event)) {
657
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400658 event = __rb_page_index(cpu_buffer->head_page, head);
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500659 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
660 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400661 /* Only count data entries */
662 if (event->type != RINGBUF_TYPE_DATA)
663 continue;
664 cpu_buffer->overrun++;
665 cpu_buffer->entries--;
666 }
667}
668
669static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
670 struct buffer_page **page)
671{
672 struct list_head *p = (*page)->list.next;
673
674 if (p == &cpu_buffer->pages)
675 p = p->next;
676
677 *page = list_entry(p, struct buffer_page, list);
678}
679
Steven Rostedtbf41a152008-10-04 02:00:59 -0400680static inline unsigned
681rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400682{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400683 unsigned long addr = (unsigned long)event;
684
685 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400686}
687
Steven Rostedtbf41a152008-10-04 02:00:59 -0400688static inline int
689rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
690 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400691{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400692 unsigned long addr = (unsigned long)event;
693 unsigned long index;
694
695 index = rb_event_index(event);
696 addr &= PAGE_MASK;
697
698 return cpu_buffer->commit_page->page == (void *)addr &&
699 rb_commit_index(cpu_buffer) == index;
700}
701
702static inline void
703rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
704 struct ring_buffer_event *event)
705{
706 unsigned long addr = (unsigned long)event;
707 unsigned long index;
708
709 index = rb_event_index(event);
710 addr &= PAGE_MASK;
711
712 while (cpu_buffer->commit_page->page != (void *)addr) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500713 if (RB_WARN_ON(cpu_buffer,
714 cpu_buffer->commit_page == cpu_buffer->tail_page))
715 return;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400716 cpu_buffer->commit_page->commit =
717 cpu_buffer->commit_page->write;
718 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
719 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
720 }
721
722 /* Now set the commit to the event's index */
723 local_set(&cpu_buffer->commit_page->commit, index);
724}
725
726static inline void
727rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
728{
729 /*
730 * We only race with interrupts and NMIs on this CPU.
731 * If we own the commit event, then we can commit
732 * all others that interrupted us, since the interruptions
733 * are in stack format (they finish before they come
734 * back to us). This allows us to do a simple loop to
735 * assign the commit to the tail.
736 */
737 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
738 cpu_buffer->commit_page->commit =
739 cpu_buffer->commit_page->write;
740 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
741 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
742 /* add barrier to keep gcc from optimizing too much */
743 barrier();
744 }
745 while (rb_commit_index(cpu_buffer) !=
746 rb_page_write(cpu_buffer->commit_page)) {
747 cpu_buffer->commit_page->commit =
748 cpu_buffer->commit_page->write;
749 barrier();
750 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400751}
752
Steven Rostedtd7690412008-10-01 00:29:53 -0400753static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400754{
Steven Rostedtd7690412008-10-01 00:29:53 -0400755 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400756 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -0400757}
758
759static inline void rb_inc_iter(struct ring_buffer_iter *iter)
760{
761 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
762
763 /*
764 * The iterator could be on the reader page (it starts there).
765 * But the head could have moved, since the reader was
766 * found. Check for this case and assign the iterator
767 * to the head page instead of next.
768 */
769 if (iter->head_page == cpu_buffer->reader_page)
770 iter->head_page = cpu_buffer->head_page;
771 else
772 rb_inc_page(cpu_buffer, &iter->head_page);
773
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400774 iter->read_stamp = iter->head_page->time_stamp;
775 iter->head = 0;
776}
777
778/**
779 * ring_buffer_update_event - update event type and data
780 * @event: the even to update
781 * @type: the type of event
782 * @length: the size of the event field in the ring buffer
783 *
784 * Update the type and data fields of the event. The length
785 * is the actual size that is written to the ring buffer,
786 * and with this, we can determine what to place into the
787 * data field.
788 */
789static inline void
790rb_update_event(struct ring_buffer_event *event,
791 unsigned type, unsigned length)
792{
793 event->type = type;
794
795 switch (type) {
796
797 case RINGBUF_TYPE_PADDING:
798 break;
799
800 case RINGBUF_TYPE_TIME_EXTEND:
801 event->len =
802 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
803 >> RB_ALIGNMENT_SHIFT;
804 break;
805
806 case RINGBUF_TYPE_TIME_STAMP:
807 event->len =
808 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
809 >> RB_ALIGNMENT_SHIFT;
810 break;
811
812 case RINGBUF_TYPE_DATA:
813 length -= RB_EVNT_HDR_SIZE;
814 if (length > RB_MAX_SMALL_DATA) {
815 event->len = 0;
816 event->array[0] = length;
817 } else
818 event->len =
819 (length + (RB_ALIGNMENT-1))
820 >> RB_ALIGNMENT_SHIFT;
821 break;
822 default:
823 BUG();
824 }
825}
826
827static inline unsigned rb_calculate_event_length(unsigned length)
828{
829 struct ring_buffer_event event; /* Used only for sizeof array */
830
831 /* zero length can cause confusions */
832 if (!length)
833 length = 1;
834
835 if (length > RB_MAX_SMALL_DATA)
836 length += sizeof(event.array[0]);
837
838 length += RB_EVNT_HDR_SIZE;
839 length = ALIGN(length, RB_ALIGNMENT);
840
841 return length;
842}
843
844static struct ring_buffer_event *
845__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
846 unsigned type, unsigned long length, u64 *ts)
847{
Steven Rostedtd7690412008-10-01 00:29:53 -0400848 struct buffer_page *tail_page, *head_page, *reader_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400849 unsigned long tail, write;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400850 struct ring_buffer *buffer = cpu_buffer->buffer;
851 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400852 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400853
854 tail_page = cpu_buffer->tail_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400855 write = local_add_return(length, &tail_page->write);
856 tail = write - length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400857
Steven Rostedtbf41a152008-10-04 02:00:59 -0400858 /* See if we shot pass the end of this buffer page */
859 if (write > BUF_PAGE_SIZE) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400860 struct buffer_page *next_page = tail_page;
861
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500862 local_irq_save(flags);
863 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400864
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400865 rb_inc_page(cpu_buffer, &next_page);
866
Steven Rostedtd7690412008-10-01 00:29:53 -0400867 head_page = cpu_buffer->head_page;
868 reader_page = cpu_buffer->reader_page;
869
870 /* we grabbed the lock before incrementing */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500871 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
872 goto out_unlock;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400873
874 /*
875 * If for some reason, we had an interrupt storm that made
876 * it all the way around the buffer, bail, and warn
877 * about it.
878 */
879 if (unlikely(next_page == cpu_buffer->commit_page)) {
880 WARN_ON_ONCE(1);
881 goto out_unlock;
882 }
Steven Rostedtd7690412008-10-01 00:29:53 -0400883
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400884 if (next_page == head_page) {
Steven Rostedtd7690412008-10-01 00:29:53 -0400885 if (!(buffer->flags & RB_FL_OVERWRITE)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -0400886 /* reset write */
887 if (tail <= BUF_PAGE_SIZE)
888 local_set(&tail_page->write, tail);
889 goto out_unlock;
Steven Rostedtd7690412008-10-01 00:29:53 -0400890 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400891
Steven Rostedtbf41a152008-10-04 02:00:59 -0400892 /* tail_page has not moved yet? */
893 if (tail_page == cpu_buffer->tail_page) {
894 /* count overflows */
895 rb_update_overflow(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400896
Steven Rostedtbf41a152008-10-04 02:00:59 -0400897 rb_inc_page(cpu_buffer, &head_page);
898 cpu_buffer->head_page = head_page;
899 cpu_buffer->head_page->read = 0;
900 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400901 }
902
Steven Rostedtbf41a152008-10-04 02:00:59 -0400903 /*
904 * If the tail page is still the same as what we think
905 * it is, then it is up to us to update the tail
906 * pointer.
907 */
908 if (tail_page == cpu_buffer->tail_page) {
909 local_set(&next_page->write, 0);
910 local_set(&next_page->commit, 0);
911 cpu_buffer->tail_page = next_page;
912
913 /* reread the time stamp */
914 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
915 cpu_buffer->tail_page->time_stamp = *ts;
916 }
917
918 /*
919 * The actual tail page has moved forward.
920 */
921 if (tail < BUF_PAGE_SIZE) {
922 /* Mark the rest of the page with padding */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400923 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400924 event->type = RINGBUF_TYPE_PADDING;
925 }
926
Steven Rostedtbf41a152008-10-04 02:00:59 -0400927 if (tail <= BUF_PAGE_SIZE)
928 /* Set the write back to the previous setting */
929 local_set(&tail_page->write, tail);
930
931 /*
932 * If this was a commit entry that failed,
933 * increment that too
934 */
935 if (tail_page == cpu_buffer->commit_page &&
936 tail == rb_commit_index(cpu_buffer)) {
937 rb_set_commit_to_write(cpu_buffer);
938 }
939
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500940 __raw_spin_unlock(&cpu_buffer->lock);
941 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400942
943 /* fail and let the caller try again */
944 return ERR_PTR(-EAGAIN);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400945 }
946
Steven Rostedtbf41a152008-10-04 02:00:59 -0400947 /* We reserved something on the buffer */
948
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500949 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
950 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400951
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400952 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400953 rb_update_event(event, type, length);
954
Steven Rostedtbf41a152008-10-04 02:00:59 -0400955 /*
956 * If this is a commit and the tail is zero, then update
957 * this page's time stamp.
958 */
959 if (!tail && rb_is_commit(cpu_buffer, event))
960 cpu_buffer->commit_page->time_stamp = *ts;
961
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400962 return event;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400963
964 out_unlock:
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500965 __raw_spin_unlock(&cpu_buffer->lock);
966 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400967 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400968}
969
970static int
971rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
972 u64 *ts, u64 *delta)
973{
974 struct ring_buffer_event *event;
975 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400976 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400977
978 if (unlikely(*delta > (1ULL << 59) && !once++)) {
979 printk(KERN_WARNING "Delta way too big! %llu"
980 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +1100981 (unsigned long long)*delta,
982 (unsigned long long)*ts,
983 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400984 WARN_ON(1);
985 }
986
987 /*
988 * The delta is too big, we to add a
989 * new timestamp.
990 */
991 event = __rb_reserve_next(cpu_buffer,
992 RINGBUF_TYPE_TIME_EXTEND,
993 RB_LEN_TIME_EXTEND,
994 ts);
995 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -0400996 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400997
Steven Rostedtbf41a152008-10-04 02:00:59 -0400998 if (PTR_ERR(event) == -EAGAIN)
999 return -EAGAIN;
1000
1001 /* Only a commited time event can update the write stamp */
1002 if (rb_is_commit(cpu_buffer, event)) {
1003 /*
1004 * If this is the first on the page, then we need to
1005 * update the page itself, and just put in a zero.
1006 */
1007 if (rb_event_index(event)) {
1008 event->time_delta = *delta & TS_MASK;
1009 event->array[0] = *delta >> TS_SHIFT;
1010 } else {
1011 cpu_buffer->commit_page->time_stamp = *ts;
1012 event->time_delta = 0;
1013 event->array[0] = 0;
1014 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001015 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001016 /* let the caller know this was the commit */
1017 ret = 1;
1018 } else {
1019 /* Darn, this is just wasted space */
1020 event->time_delta = 0;
1021 event->array[0] = 0;
1022 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001023 }
1024
Steven Rostedtbf41a152008-10-04 02:00:59 -04001025 *delta = 0;
1026
1027 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001028}
1029
1030static struct ring_buffer_event *
1031rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1032 unsigned type, unsigned long length)
1033{
1034 struct ring_buffer_event *event;
1035 u64 ts, delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001036 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001037 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001038
Steven Rostedtbf41a152008-10-04 02:00:59 -04001039 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001040 /*
1041 * We allow for interrupts to reenter here and do a trace.
1042 * If one does, it will cause this original code to loop
1043 * back here. Even with heavy interrupts happening, this
1044 * should only happen a few times in a row. If this happens
1045 * 1000 times in a row, there must be either an interrupt
1046 * storm or we have something buggy.
1047 * Bail!
1048 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001049 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001050 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001051
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001052 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1053
Steven Rostedtbf41a152008-10-04 02:00:59 -04001054 /*
1055 * Only the first commit can update the timestamp.
1056 * Yes there is a race here. If an interrupt comes in
1057 * just after the conditional and it traces too, then it
1058 * will also check the deltas. More than one timestamp may
1059 * also be made. But only the entry that did the actual
1060 * commit will be something other than zero.
1061 */
1062 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1063 rb_page_write(cpu_buffer->tail_page) ==
1064 rb_commit_index(cpu_buffer)) {
1065
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001066 delta = ts - cpu_buffer->write_stamp;
1067
Steven Rostedtbf41a152008-10-04 02:00:59 -04001068 /* make sure this delta is calculated here */
1069 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001070
Steven Rostedtbf41a152008-10-04 02:00:59 -04001071 /* Did the write stamp get updated already? */
1072 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt4143c5c2008-11-10 21:46:01 -05001073 delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001074
1075 if (test_time_stamp(delta)) {
1076
1077 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1078
1079 if (commit == -EBUSY)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001080 return NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001081
1082 if (commit == -EAGAIN)
1083 goto again;
1084
1085 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001086 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001087 } else
1088 /* Non commits have zero deltas */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001089 delta = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001090
1091 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001092 if (PTR_ERR(event) == -EAGAIN)
1093 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001094
Steven Rostedtbf41a152008-10-04 02:00:59 -04001095 if (!event) {
1096 if (unlikely(commit))
1097 /*
1098 * Ouch! We needed a timestamp and it was commited. But
1099 * we didn't get our event reserved.
1100 */
1101 rb_set_commit_to_write(cpu_buffer);
1102 return NULL;
1103 }
1104
1105 /*
1106 * If the timestamp was commited, make the commit our entry
1107 * now so that we will update it when needed.
1108 */
1109 if (commit)
1110 rb_set_commit_event(cpu_buffer, event);
1111 else if (!rb_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001112 delta = 0;
1113
1114 event->time_delta = delta;
1115
1116 return event;
1117}
1118
Steven Rostedtbf41a152008-10-04 02:00:59 -04001119static DEFINE_PER_CPU(int, rb_need_resched);
1120
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001121/**
1122 * ring_buffer_lock_reserve - reserve a part of the buffer
1123 * @buffer: the ring buffer to reserve from
1124 * @length: the length of the data to reserve (excluding event header)
1125 * @flags: a pointer to save the interrupt flags
1126 *
1127 * Returns a reseverd event on the ring buffer to copy directly to.
1128 * The user of this interface will need to get the body to write into
1129 * and can use the ring_buffer_event_data() interface.
1130 *
1131 * The length is the length of the data needed, not the event length
1132 * which also includes the event header.
1133 *
1134 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1135 * If NULL is returned, then nothing has been allocated or locked.
1136 */
1137struct ring_buffer_event *
1138ring_buffer_lock_reserve(struct ring_buffer *buffer,
1139 unsigned long length,
1140 unsigned long *flags)
1141{
1142 struct ring_buffer_per_cpu *cpu_buffer;
1143 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001144 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001145
1146 if (atomic_read(&buffer->record_disabled))
1147 return NULL;
1148
Steven Rostedtbf41a152008-10-04 02:00:59 -04001149 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001150 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001151
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001152 cpu = raw_smp_processor_id();
1153
1154 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001155 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001156
1157 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001158
1159 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001160 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001161
1162 length = rb_calculate_event_length(length);
1163 if (length > BUF_PAGE_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001164 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001165
1166 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1167 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001168 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001169
Steven Rostedtbf41a152008-10-04 02:00:59 -04001170 /*
1171 * Need to store resched state on this cpu.
1172 * Only the first needs to.
1173 */
1174
1175 if (preempt_count() == 1)
1176 per_cpu(rb_need_resched, cpu) = resched;
1177
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001178 return event;
1179
Steven Rostedtd7690412008-10-01 00:29:53 -04001180 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001181 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001182 return NULL;
1183}
1184
1185static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1186 struct ring_buffer_event *event)
1187{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001188 cpu_buffer->entries++;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001189
1190 /* Only process further if we own the commit */
1191 if (!rb_is_commit(cpu_buffer, event))
1192 return;
1193
1194 cpu_buffer->write_stamp += event->time_delta;
1195
1196 rb_set_commit_to_write(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001197}
1198
1199/**
1200 * ring_buffer_unlock_commit - commit a reserved
1201 * @buffer: The buffer to commit to
1202 * @event: The event pointer to commit.
1203 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1204 *
1205 * This commits the data to the ring buffer, and releases any locks held.
1206 *
1207 * Must be paired with ring_buffer_lock_reserve.
1208 */
1209int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1210 struct ring_buffer_event *event,
1211 unsigned long flags)
1212{
1213 struct ring_buffer_per_cpu *cpu_buffer;
1214 int cpu = raw_smp_processor_id();
1215
1216 cpu_buffer = buffer->buffers[cpu];
1217
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001218 rb_commit(cpu_buffer, event);
1219
Steven Rostedtbf41a152008-10-04 02:00:59 -04001220 /*
1221 * Only the last preempt count needs to restore preemption.
1222 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001223 if (preempt_count() == 1)
1224 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1225 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04001226 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001227
1228 return 0;
1229}
1230
1231/**
1232 * ring_buffer_write - write data to the buffer without reserving
1233 * @buffer: The ring buffer to write to.
1234 * @length: The length of the data being written (excluding the event header)
1235 * @data: The data to write to the buffer.
1236 *
1237 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1238 * one function. If you already have the data to write to the buffer, it
1239 * may be easier to simply call this function.
1240 *
1241 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1242 * and not the length of the event which would hold the header.
1243 */
1244int ring_buffer_write(struct ring_buffer *buffer,
1245 unsigned long length,
1246 void *data)
1247{
1248 struct ring_buffer_per_cpu *cpu_buffer;
1249 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001250 unsigned long event_length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001251 void *body;
1252 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001253 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001254
1255 if (atomic_read(&buffer->record_disabled))
1256 return -EBUSY;
1257
Steven Rostedt182e9f52008-11-03 23:15:56 -05001258 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001259
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001260 cpu = raw_smp_processor_id();
1261
1262 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001263 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001264
1265 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001266
1267 if (atomic_read(&cpu_buffer->record_disabled))
1268 goto out;
1269
1270 event_length = rb_calculate_event_length(length);
1271 event = rb_reserve_next_event(cpu_buffer,
1272 RINGBUF_TYPE_DATA, event_length);
1273 if (!event)
1274 goto out;
1275
1276 body = rb_event_data(event);
1277
1278 memcpy(body, data, length);
1279
1280 rb_commit(cpu_buffer, event);
1281
1282 ret = 0;
1283 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001284 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001285
1286 return ret;
1287}
1288
Steven Rostedtbf41a152008-10-04 02:00:59 -04001289static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1290{
1291 struct buffer_page *reader = cpu_buffer->reader_page;
1292 struct buffer_page *head = cpu_buffer->head_page;
1293 struct buffer_page *commit = cpu_buffer->commit_page;
1294
1295 return reader->read == rb_page_commit(reader) &&
1296 (commit == reader ||
1297 (commit == head &&
1298 head->read == rb_page_commit(commit)));
1299}
1300
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001301/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001302 * ring_buffer_record_disable - stop all writes into the buffer
1303 * @buffer: The ring buffer to stop writes to.
1304 *
1305 * This prevents all writes to the buffer. Any attempt to write
1306 * to the buffer after this will fail and return NULL.
1307 *
1308 * The caller should call synchronize_sched() after this.
1309 */
1310void ring_buffer_record_disable(struct ring_buffer *buffer)
1311{
1312 atomic_inc(&buffer->record_disabled);
1313}
1314
1315/**
1316 * ring_buffer_record_enable - enable writes to the buffer
1317 * @buffer: The ring buffer to enable writes
1318 *
1319 * Note, multiple disables will need the same number of enables
1320 * to truely enable the writing (much like preempt_disable).
1321 */
1322void ring_buffer_record_enable(struct ring_buffer *buffer)
1323{
1324 atomic_dec(&buffer->record_disabled);
1325}
1326
1327/**
1328 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1329 * @buffer: The ring buffer to stop writes to.
1330 * @cpu: The CPU buffer to stop
1331 *
1332 * This prevents all writes to the buffer. Any attempt to write
1333 * to the buffer after this will fail and return NULL.
1334 *
1335 * The caller should call synchronize_sched() after this.
1336 */
1337void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1338{
1339 struct ring_buffer_per_cpu *cpu_buffer;
1340
1341 if (!cpu_isset(cpu, buffer->cpumask))
1342 return;
1343
1344 cpu_buffer = buffer->buffers[cpu];
1345 atomic_inc(&cpu_buffer->record_disabled);
1346}
1347
1348/**
1349 * ring_buffer_record_enable_cpu - enable writes to the buffer
1350 * @buffer: The ring buffer to enable writes
1351 * @cpu: The CPU to enable.
1352 *
1353 * Note, multiple disables will need the same number of enables
1354 * to truely enable the writing (much like preempt_disable).
1355 */
1356void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1357{
1358 struct ring_buffer_per_cpu *cpu_buffer;
1359
1360 if (!cpu_isset(cpu, buffer->cpumask))
1361 return;
1362
1363 cpu_buffer = buffer->buffers[cpu];
1364 atomic_dec(&cpu_buffer->record_disabled);
1365}
1366
1367/**
1368 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1369 * @buffer: The ring buffer
1370 * @cpu: The per CPU buffer to get the entries from.
1371 */
1372unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1373{
1374 struct ring_buffer_per_cpu *cpu_buffer;
1375
1376 if (!cpu_isset(cpu, buffer->cpumask))
1377 return 0;
1378
1379 cpu_buffer = buffer->buffers[cpu];
1380 return cpu_buffer->entries;
1381}
1382
1383/**
1384 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1385 * @buffer: The ring buffer
1386 * @cpu: The per CPU buffer to get the number of overruns from
1387 */
1388unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1389{
1390 struct ring_buffer_per_cpu *cpu_buffer;
1391
1392 if (!cpu_isset(cpu, buffer->cpumask))
1393 return 0;
1394
1395 cpu_buffer = buffer->buffers[cpu];
1396 return cpu_buffer->overrun;
1397}
1398
1399/**
1400 * ring_buffer_entries - get the number of entries in a buffer
1401 * @buffer: The ring buffer
1402 *
1403 * Returns the total number of entries in the ring buffer
1404 * (all CPU entries)
1405 */
1406unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1407{
1408 struct ring_buffer_per_cpu *cpu_buffer;
1409 unsigned long entries = 0;
1410 int cpu;
1411
1412 /* if you care about this being correct, lock the buffer */
1413 for_each_buffer_cpu(buffer, cpu) {
1414 cpu_buffer = buffer->buffers[cpu];
1415 entries += cpu_buffer->entries;
1416 }
1417
1418 return entries;
1419}
1420
1421/**
1422 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1423 * @buffer: The ring buffer
1424 *
1425 * Returns the total number of overruns in the ring buffer
1426 * (all CPU entries)
1427 */
1428unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1429{
1430 struct ring_buffer_per_cpu *cpu_buffer;
1431 unsigned long overruns = 0;
1432 int cpu;
1433
1434 /* if you care about this being correct, lock the buffer */
1435 for_each_buffer_cpu(buffer, cpu) {
1436 cpu_buffer = buffer->buffers[cpu];
1437 overruns += cpu_buffer->overrun;
1438 }
1439
1440 return overruns;
1441}
1442
1443/**
1444 * ring_buffer_iter_reset - reset an iterator
1445 * @iter: The iterator to reset
1446 *
1447 * Resets the iterator, so that it will start from the beginning
1448 * again.
1449 */
1450void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1451{
1452 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001453 unsigned long flags;
1454
1455 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001456
Steven Rostedtd7690412008-10-01 00:29:53 -04001457 /* Iterator usage is expected to have record disabled */
1458 if (list_empty(&cpu_buffer->reader_page->list)) {
1459 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001460 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001461 } else {
1462 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001463 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001464 }
1465 if (iter->head)
1466 iter->read_stamp = cpu_buffer->read_stamp;
1467 else
1468 iter->read_stamp = iter->head_page->time_stamp;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001469
1470 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001471}
1472
1473/**
1474 * ring_buffer_iter_empty - check if an iterator has no more to read
1475 * @iter: The iterator to check
1476 */
1477int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1478{
1479 struct ring_buffer_per_cpu *cpu_buffer;
1480
1481 cpu_buffer = iter->cpu_buffer;
1482
Steven Rostedtbf41a152008-10-04 02:00:59 -04001483 return iter->head_page == cpu_buffer->commit_page &&
1484 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001485}
1486
1487static void
1488rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1489 struct ring_buffer_event *event)
1490{
1491 u64 delta;
1492
1493 switch (event->type) {
1494 case RINGBUF_TYPE_PADDING:
1495 return;
1496
1497 case RINGBUF_TYPE_TIME_EXTEND:
1498 delta = event->array[0];
1499 delta <<= TS_SHIFT;
1500 delta += event->time_delta;
1501 cpu_buffer->read_stamp += delta;
1502 return;
1503
1504 case RINGBUF_TYPE_TIME_STAMP:
1505 /* FIXME: not implemented */
1506 return;
1507
1508 case RINGBUF_TYPE_DATA:
1509 cpu_buffer->read_stamp += event->time_delta;
1510 return;
1511
1512 default:
1513 BUG();
1514 }
1515 return;
1516}
1517
1518static void
1519rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1520 struct ring_buffer_event *event)
1521{
1522 u64 delta;
1523
1524 switch (event->type) {
1525 case RINGBUF_TYPE_PADDING:
1526 return;
1527
1528 case RINGBUF_TYPE_TIME_EXTEND:
1529 delta = event->array[0];
1530 delta <<= TS_SHIFT;
1531 delta += event->time_delta;
1532 iter->read_stamp += delta;
1533 return;
1534
1535 case RINGBUF_TYPE_TIME_STAMP:
1536 /* FIXME: not implemented */
1537 return;
1538
1539 case RINGBUF_TYPE_DATA:
1540 iter->read_stamp += event->time_delta;
1541 return;
1542
1543 default:
1544 BUG();
1545 }
1546 return;
1547}
1548
Steven Rostedtd7690412008-10-01 00:29:53 -04001549static struct buffer_page *
1550rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001551{
Steven Rostedtd7690412008-10-01 00:29:53 -04001552 struct buffer_page *reader = NULL;
1553 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001554 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001555
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001556 local_irq_save(flags);
1557 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04001558
1559 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001560 /*
1561 * This should normally only loop twice. But because the
1562 * start of the reader inserts an empty page, it causes
1563 * a case where we will loop three times. There should be no
1564 * reason to loop four times (that I know of).
1565 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001566 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001567 reader = NULL;
1568 goto out;
1569 }
1570
Steven Rostedtd7690412008-10-01 00:29:53 -04001571 reader = cpu_buffer->reader_page;
1572
1573 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001574 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04001575 goto out;
1576
1577 /* Never should we have an index greater than the size */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001578 if (RB_WARN_ON(cpu_buffer,
1579 cpu_buffer->reader_page->read > rb_page_size(reader)))
1580 goto out;
Steven Rostedtd7690412008-10-01 00:29:53 -04001581
1582 /* check if we caught up to the tail */
1583 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001584 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04001585 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001586
1587 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04001588 * Splice the empty reader page into the list around the head.
1589 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001590 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001591
Steven Rostedtd7690412008-10-01 00:29:53 -04001592 reader = cpu_buffer->head_page;
1593 cpu_buffer->reader_page->list.next = reader->list.next;
1594 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001595
1596 local_set(&cpu_buffer->reader_page->write, 0);
1597 local_set(&cpu_buffer->reader_page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04001598
1599 /* Make the reader page now replace the head */
1600 reader->list.prev->next = &cpu_buffer->reader_page->list;
1601 reader->list.next->prev = &cpu_buffer->reader_page->list;
1602
1603 /*
1604 * If the tail is on the reader, then we must set the head
1605 * to the inserted page, otherwise we set it one before.
1606 */
1607 cpu_buffer->head_page = cpu_buffer->reader_page;
1608
Steven Rostedtbf41a152008-10-04 02:00:59 -04001609 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04001610 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1611
1612 /* Finally update the reader page to the new head */
1613 cpu_buffer->reader_page = reader;
1614 rb_reset_reader_page(cpu_buffer);
1615
1616 goto again;
1617
1618 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001619 __raw_spin_unlock(&cpu_buffer->lock);
1620 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04001621
1622 return reader;
1623}
1624
1625static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1626{
1627 struct ring_buffer_event *event;
1628 struct buffer_page *reader;
1629 unsigned length;
1630
1631 reader = rb_get_reader_page(cpu_buffer);
1632
1633 /* This function should not be called when buffer is empty */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001634 if (RB_WARN_ON(cpu_buffer, !reader))
1635 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04001636
1637 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001638
1639 if (event->type == RINGBUF_TYPE_DATA)
1640 cpu_buffer->entries--;
1641
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001642 rb_update_read_stamp(cpu_buffer, event);
1643
Steven Rostedtd7690412008-10-01 00:29:53 -04001644 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001645 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001646}
1647
1648static void rb_advance_iter(struct ring_buffer_iter *iter)
1649{
1650 struct ring_buffer *buffer;
1651 struct ring_buffer_per_cpu *cpu_buffer;
1652 struct ring_buffer_event *event;
1653 unsigned length;
1654
1655 cpu_buffer = iter->cpu_buffer;
1656 buffer = cpu_buffer->buffer;
1657
1658 /*
1659 * Check if we are at the end of the buffer.
1660 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001661 if (iter->head >= rb_page_size(iter->head_page)) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001662 if (RB_WARN_ON(buffer,
1663 iter->head_page == cpu_buffer->commit_page))
1664 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04001665 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001666 return;
1667 }
1668
1669 event = rb_iter_head_event(iter);
1670
1671 length = rb_event_length(event);
1672
1673 /*
1674 * This should not be called to advance the header if we are
1675 * at the tail of the buffer.
1676 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001677 if (RB_WARN_ON(cpu_buffer,
Steven Rostedtf536aaf2008-11-10 23:07:30 -05001678 (iter->head_page == cpu_buffer->commit_page) &&
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001679 (iter->head + length > rb_commit_index(cpu_buffer))))
1680 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001681
1682 rb_update_iter_read_stamp(iter, event);
1683
1684 iter->head += length;
1685
1686 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001687 if ((iter->head >= rb_page_size(iter->head_page)) &&
1688 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001689 rb_advance_iter(iter);
1690}
1691
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001692static struct ring_buffer_event *
1693rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001694{
1695 struct ring_buffer_per_cpu *cpu_buffer;
1696 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04001697 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001698 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001699
1700 if (!cpu_isset(cpu, buffer->cpumask))
1701 return NULL;
1702
1703 cpu_buffer = buffer->buffers[cpu];
1704
1705 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001706 /*
1707 * We repeat when a timestamp is encountered. It is possible
1708 * to get multiple timestamps from an interrupt entering just
1709 * as one timestamp is about to be written. The max times
1710 * that this can happen is the number of nested interrupts we
1711 * can have. Nesting 10 deep of interrupts is clearly
1712 * an anomaly.
1713 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001714 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001715 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001716
Steven Rostedtd7690412008-10-01 00:29:53 -04001717 reader = rb_get_reader_page(cpu_buffer);
1718 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001719 return NULL;
1720
Steven Rostedtd7690412008-10-01 00:29:53 -04001721 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001722
1723 switch (event->type) {
1724 case RINGBUF_TYPE_PADDING:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001725 RB_WARN_ON(cpu_buffer, 1);
Steven Rostedtd7690412008-10-01 00:29:53 -04001726 rb_advance_reader(cpu_buffer);
1727 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001728
1729 case RINGBUF_TYPE_TIME_EXTEND:
1730 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04001731 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001732 goto again;
1733
1734 case RINGBUF_TYPE_TIME_STAMP:
1735 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04001736 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001737 goto again;
1738
1739 case RINGBUF_TYPE_DATA:
1740 if (ts) {
1741 *ts = cpu_buffer->read_stamp + event->time_delta;
1742 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1743 }
1744 return event;
1745
1746 default:
1747 BUG();
1748 }
1749
1750 return NULL;
1751}
1752
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001753static struct ring_buffer_event *
1754rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001755{
1756 struct ring_buffer *buffer;
1757 struct ring_buffer_per_cpu *cpu_buffer;
1758 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001759 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001760
1761 if (ring_buffer_iter_empty(iter))
1762 return NULL;
1763
1764 cpu_buffer = iter->cpu_buffer;
1765 buffer = cpu_buffer->buffer;
1766
1767 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001768 /*
1769 * We repeat when a timestamp is encountered. It is possible
1770 * to get multiple timestamps from an interrupt entering just
1771 * as one timestamp is about to be written. The max times
1772 * that this can happen is the number of nested interrupts we
1773 * can have. Nesting 10 deep of interrupts is clearly
1774 * an anomaly.
1775 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001776 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001777 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001778
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001779 if (rb_per_cpu_empty(cpu_buffer))
1780 return NULL;
1781
1782 event = rb_iter_head_event(iter);
1783
1784 switch (event->type) {
1785 case RINGBUF_TYPE_PADDING:
Steven Rostedtd7690412008-10-01 00:29:53 -04001786 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001787 goto again;
1788
1789 case RINGBUF_TYPE_TIME_EXTEND:
1790 /* Internal data, OK to advance */
1791 rb_advance_iter(iter);
1792 goto again;
1793
1794 case RINGBUF_TYPE_TIME_STAMP:
1795 /* FIXME: not implemented */
1796 rb_advance_iter(iter);
1797 goto again;
1798
1799 case RINGBUF_TYPE_DATA:
1800 if (ts) {
1801 *ts = iter->read_stamp + event->time_delta;
1802 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1803 }
1804 return event;
1805
1806 default:
1807 BUG();
1808 }
1809
1810 return NULL;
1811}
1812
1813/**
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001814 * ring_buffer_peek - peek at the next event to be read
1815 * @buffer: The ring buffer to read
1816 * @cpu: The cpu to peak at
1817 * @ts: The timestamp counter of this event.
1818 *
1819 * This will return the event that will be read next, but does
1820 * not consume the data.
1821 */
1822struct ring_buffer_event *
1823ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1824{
1825 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1826 struct ring_buffer_event *event;
1827 unsigned long flags;
1828
1829 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1830 event = rb_buffer_peek(buffer, cpu, ts);
1831 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1832
1833 return event;
1834}
1835
1836/**
1837 * ring_buffer_iter_peek - peek at the next event to be read
1838 * @iter: The ring buffer iterator
1839 * @ts: The timestamp counter of this event.
1840 *
1841 * This will return the event that will be read next, but does
1842 * not increment the iterator.
1843 */
1844struct ring_buffer_event *
1845ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1846{
1847 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1848 struct ring_buffer_event *event;
1849 unsigned long flags;
1850
1851 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1852 event = rb_iter_peek(iter, ts);
1853 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1854
1855 return event;
1856}
1857
1858/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001859 * ring_buffer_consume - return an event and consume it
1860 * @buffer: The ring buffer to get the next event from
1861 *
1862 * Returns the next event in the ring buffer, and that event is consumed.
1863 * Meaning, that sequential reads will keep returning a different event,
1864 * and eventually empty the ring buffer if the producer is slower.
1865 */
1866struct ring_buffer_event *
1867ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1868{
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001869 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001870 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001871 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001872
1873 if (!cpu_isset(cpu, buffer->cpumask))
1874 return NULL;
1875
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001876 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001877
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001878 event = rb_buffer_peek(buffer, cpu, ts);
1879 if (!event)
1880 goto out;
1881
Steven Rostedtd7690412008-10-01 00:29:53 -04001882 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001883
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001884 out:
1885 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1886
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001887 return event;
1888}
1889
1890/**
1891 * ring_buffer_read_start - start a non consuming read of the buffer
1892 * @buffer: The ring buffer to read from
1893 * @cpu: The cpu buffer to iterate over
1894 *
1895 * This starts up an iteration through the buffer. It also disables
1896 * the recording to the buffer until the reading is finished.
1897 * This prevents the reading from being corrupted. This is not
1898 * a consuming read, so a producer is not expected.
1899 *
1900 * Must be paired with ring_buffer_finish.
1901 */
1902struct ring_buffer_iter *
1903ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1904{
1905 struct ring_buffer_per_cpu *cpu_buffer;
1906 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04001907 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001908
1909 if (!cpu_isset(cpu, buffer->cpumask))
1910 return NULL;
1911
1912 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1913 if (!iter)
1914 return NULL;
1915
1916 cpu_buffer = buffer->buffers[cpu];
1917
1918 iter->cpu_buffer = cpu_buffer;
1919
1920 atomic_inc(&cpu_buffer->record_disabled);
1921 synchronize_sched();
1922
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001923 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001924 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04001925 ring_buffer_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001926 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001927 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001928
1929 return iter;
1930}
1931
1932/**
1933 * ring_buffer_finish - finish reading the iterator of the buffer
1934 * @iter: The iterator retrieved by ring_buffer_start
1935 *
1936 * This re-enables the recording to the buffer, and frees the
1937 * iterator.
1938 */
1939void
1940ring_buffer_read_finish(struct ring_buffer_iter *iter)
1941{
1942 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1943
1944 atomic_dec(&cpu_buffer->record_disabled);
1945 kfree(iter);
1946}
1947
1948/**
1949 * ring_buffer_read - read the next item in the ring buffer by the iterator
1950 * @iter: The ring buffer iterator
1951 * @ts: The time stamp of the event read.
1952 *
1953 * This reads the next event in the ring buffer and increments the iterator.
1954 */
1955struct ring_buffer_event *
1956ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1957{
1958 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001959 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1960 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001961
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001962 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1963 event = rb_iter_peek(iter, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001964 if (!event)
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001965 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001966
1967 rb_advance_iter(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001968 out:
1969 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001970
1971 return event;
1972}
1973
1974/**
1975 * ring_buffer_size - return the size of the ring buffer (in bytes)
1976 * @buffer: The ring buffer.
1977 */
1978unsigned long ring_buffer_size(struct ring_buffer *buffer)
1979{
1980 return BUF_PAGE_SIZE * buffer->pages;
1981}
1982
1983static void
1984rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1985{
1986 cpu_buffer->head_page
1987 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001988 local_set(&cpu_buffer->head_page->write, 0);
1989 local_set(&cpu_buffer->head_page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001990
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001991 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001992
1993 cpu_buffer->tail_page = cpu_buffer->head_page;
1994 cpu_buffer->commit_page = cpu_buffer->head_page;
1995
1996 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1997 local_set(&cpu_buffer->reader_page->write, 0);
1998 local_set(&cpu_buffer->reader_page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001999 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002000
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002001 cpu_buffer->overrun = 0;
2002 cpu_buffer->entries = 0;
2003}
2004
2005/**
2006 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2007 * @buffer: The ring buffer to reset a per cpu buffer of
2008 * @cpu: The CPU buffer to be reset
2009 */
2010void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2011{
2012 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2013 unsigned long flags;
2014
2015 if (!cpu_isset(cpu, buffer->cpumask))
2016 return;
2017
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002018 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2019
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002020 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002021
2022 rb_reset_cpu(cpu_buffer);
2023
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002024 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002025
2026 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002027}
2028
2029/**
2030 * ring_buffer_reset - reset a ring buffer
2031 * @buffer: The ring buffer to reset all cpu buffers
2032 */
2033void ring_buffer_reset(struct ring_buffer *buffer)
2034{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002035 int cpu;
2036
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002037 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002038 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002039}
2040
2041/**
2042 * rind_buffer_empty - is the ring buffer empty?
2043 * @buffer: The ring buffer to test
2044 */
2045int ring_buffer_empty(struct ring_buffer *buffer)
2046{
2047 struct ring_buffer_per_cpu *cpu_buffer;
2048 int cpu;
2049
2050 /* yes this is racy, but if you don't like the race, lock the buffer */
2051 for_each_buffer_cpu(buffer, cpu) {
2052 cpu_buffer = buffer->buffers[cpu];
2053 if (!rb_per_cpu_empty(cpu_buffer))
2054 return 0;
2055 }
2056 return 1;
2057}
2058
2059/**
2060 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2061 * @buffer: The ring buffer
2062 * @cpu: The CPU buffer to test
2063 */
2064int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2065{
2066 struct ring_buffer_per_cpu *cpu_buffer;
2067
2068 if (!cpu_isset(cpu, buffer->cpumask))
2069 return 1;
2070
2071 cpu_buffer = buffer->buffers[cpu];
2072 return rb_per_cpu_empty(cpu_buffer);
2073}
2074
2075/**
2076 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2077 * @buffer_a: One buffer to swap with
2078 * @buffer_b: The other buffer to swap with
2079 *
2080 * This function is useful for tracers that want to take a "snapshot"
2081 * of a CPU buffer and has another back up buffer lying around.
2082 * it is expected that the tracer handles the cpu buffer not being
2083 * used at the moment.
2084 */
2085int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2086 struct ring_buffer *buffer_b, int cpu)
2087{
2088 struct ring_buffer_per_cpu *cpu_buffer_a;
2089 struct ring_buffer_per_cpu *cpu_buffer_b;
2090
2091 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2092 !cpu_isset(cpu, buffer_b->cpumask))
2093 return -EINVAL;
2094
2095 /* At least make sure the two buffers are somewhat the same */
2096 if (buffer_a->size != buffer_b->size ||
2097 buffer_a->pages != buffer_b->pages)
2098 return -EINVAL;
2099
2100 cpu_buffer_a = buffer_a->buffers[cpu];
2101 cpu_buffer_b = buffer_b->buffers[cpu];
2102
2103 /*
2104 * We can't do a synchronize_sched here because this
2105 * function can be called in atomic context.
2106 * Normally this will be called from the same CPU as cpu.
2107 * If not it's up to the caller to protect this.
2108 */
2109 atomic_inc(&cpu_buffer_a->record_disabled);
2110 atomic_inc(&cpu_buffer_b->record_disabled);
2111
2112 buffer_a->buffers[cpu] = cpu_buffer_b;
2113 buffer_b->buffers[cpu] = cpu_buffer_a;
2114
2115 cpu_buffer_b->buffer = buffer_a;
2116 cpu_buffer_a->buffer = buffer_b;
2117
2118 atomic_dec(&cpu_buffer_a->record_disabled);
2119 atomic_dec(&cpu_buffer_b->record_disabled);
2120
2121 return 0;
2122}
2123