blob: d42b882dfe4ba87131beb591aba51179fe4bb209 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
Steven Rostedta3583242008-11-11 15:01:42 -050019#include "trace.h"
20
21/* Global flag to disable all recording to ring buffers */
22static int ring_buffers_off __read_mostly;
23
24/**
25 * tracing_on - enable all tracing buffers
26 *
27 * This function enables all tracing buffers that may have been
28 * disabled with tracing_off.
29 */
30void tracing_on(void)
31{
32 ring_buffers_off = 0;
33}
Robert Richterc4f50182008-12-11 16:49:22 +010034EXPORT_SYMBOL_GPL(tracing_on);
Steven Rostedta3583242008-11-11 15:01:42 -050035
36/**
37 * tracing_off - turn off all tracing buffers
38 *
39 * This function stops all tracing buffers from recording data.
40 * It does not disable any overhead the tracers themselves may
41 * be causing. This function simply causes all recording to
42 * the ring buffers to fail.
43 */
44void tracing_off(void)
45{
46 ring_buffers_off = 1;
47}
Robert Richterc4f50182008-12-11 16:49:22 +010048EXPORT_SYMBOL_GPL(tracing_off);
Steven Rostedta3583242008-11-11 15:01:42 -050049
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040050/* Up this if you want to test the TIME_EXTENTS and normalization */
51#define DEBUG_SHIFT 0
52
53/* FIXME!!! */
54u64 ring_buffer_time_stamp(int cpu)
55{
Steven Rostedt47e74f22008-11-12 00:01:27 -050056 u64 time;
57
58 preempt_disable_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040059 /* shift to debug/test normalization and TIME_EXTENTS */
Steven Rostedt47e74f22008-11-12 00:01:27 -050060 time = sched_clock() << DEBUG_SHIFT;
61 preempt_enable_notrace();
62
63 return time;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040064}
Robert Richterc4f50182008-12-11 16:49:22 +010065EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040066
67void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
68{
69 /* Just stupid testing the normalize function and deltas */
70 *ts >>= DEBUG_SHIFT;
71}
Robert Richterc4f50182008-12-11 16:49:22 +010072EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040073
74#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
75#define RB_ALIGNMENT_SHIFT 2
76#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
77#define RB_MAX_SMALL_DATA 28
78
79enum {
80 RB_LEN_TIME_EXTEND = 8,
81 RB_LEN_TIME_STAMP = 16,
82};
83
84/* inline for ring buffer fast paths */
85static inline unsigned
86rb_event_length(struct ring_buffer_event *event)
87{
88 unsigned length;
89
90 switch (event->type) {
91 case RINGBUF_TYPE_PADDING:
92 /* undefined */
93 return -1;
94
95 case RINGBUF_TYPE_TIME_EXTEND:
96 return RB_LEN_TIME_EXTEND;
97
98 case RINGBUF_TYPE_TIME_STAMP:
99 return RB_LEN_TIME_STAMP;
100
101 case RINGBUF_TYPE_DATA:
102 if (event->len)
103 length = event->len << RB_ALIGNMENT_SHIFT;
104 else
105 length = event->array[0];
106 return length + RB_EVNT_HDR_SIZE;
107 default:
108 BUG();
109 }
110 /* not hit */
111 return 0;
112}
113
114/**
115 * ring_buffer_event_length - return the length of the event
116 * @event: the event to get the length of
117 */
118unsigned ring_buffer_event_length(struct ring_buffer_event *event)
119{
Robert Richter465634a2009-01-07 15:32:11 +0100120 unsigned length = rb_event_length(event);
121 if (event->type != RINGBUF_TYPE_DATA)
122 return length;
123 length -= RB_EVNT_HDR_SIZE;
124 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
125 length -= sizeof(event->array[0]);
126 return length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400127}
Robert Richterc4f50182008-12-11 16:49:22 +0100128EXPORT_SYMBOL_GPL(ring_buffer_event_length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400129
130/* inline for ring buffer fast paths */
131static inline void *
132rb_event_data(struct ring_buffer_event *event)
133{
134 BUG_ON(event->type != RINGBUF_TYPE_DATA);
135 /* If length is in len field, then array[0] has the data */
136 if (event->len)
137 return (void *)&event->array[0];
138 /* Otherwise length is in array[0] and array[1] has the data */
139 return (void *)&event->array[1];
140}
141
142/**
143 * ring_buffer_event_data - return the data of the event
144 * @event: the event to get the data from
145 */
146void *ring_buffer_event_data(struct ring_buffer_event *event)
147{
148 return rb_event_data(event);
149}
Robert Richterc4f50182008-12-11 16:49:22 +0100150EXPORT_SYMBOL_GPL(ring_buffer_event_data);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400151
152#define for_each_buffer_cpu(buffer, cpu) \
153 for_each_cpu_mask(cpu, buffer->cpumask)
154
155#define TS_SHIFT 27
156#define TS_MASK ((1ULL << TS_SHIFT) - 1)
157#define TS_DELTA_TEST (~TS_MASK)
158
159/*
160 * This hack stolen from mm/slob.c.
161 * We can store per page timing information in the page frame of the page.
162 * Thanks to Peter Zijlstra for suggesting this idea.
163 */
164struct buffer_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400165 u64 time_stamp; /* page time stamp */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400166 local_t write; /* index for next write */
167 local_t commit; /* write commited index */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400168 unsigned read; /* index for next read */
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400169 struct list_head list; /* list of free pages */
170 void *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400171};
172
173/*
Steven Rostedted568292008-09-29 23:02:40 -0400174 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
175 * this issue out.
176 */
177static inline void free_buffer_page(struct buffer_page *bpage)
178{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400179 if (bpage->page)
Steven Rostedt6ae2a072008-10-13 10:22:06 -0400180 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400181 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400182}
183
184/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400185 * We need to fit the time_stamp delta into 27 bits.
186 */
187static inline int test_time_stamp(u64 delta)
188{
189 if (delta & TS_DELTA_TEST)
190 return 1;
191 return 0;
192}
193
194#define BUF_PAGE_SIZE PAGE_SIZE
195
196/*
197 * head_page == tail_page && head == tail then buffer is empty.
198 */
199struct ring_buffer_per_cpu {
200 int cpu;
201 struct ring_buffer *buffer;
202 spinlock_t lock;
203 struct lock_class_key lock_key;
204 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400205 struct buffer_page *head_page; /* read from head */
206 struct buffer_page *tail_page; /* write to tail */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400207 struct buffer_page *commit_page; /* commited pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400208 struct buffer_page *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400209 unsigned long overrun;
210 unsigned long entries;
211 u64 write_stamp;
212 u64 read_stamp;
213 atomic_t record_disabled;
214};
215
216struct ring_buffer {
217 unsigned long size;
218 unsigned pages;
219 unsigned flags;
220 int cpus;
221 cpumask_t cpumask;
222 atomic_t record_disabled;
223
224 struct mutex mutex;
225
226 struct ring_buffer_per_cpu **buffers;
227};
228
229struct ring_buffer_iter {
230 struct ring_buffer_per_cpu *cpu_buffer;
231 unsigned long head;
232 struct buffer_page *head_page;
233 u64 read_stamp;
234};
235
Steven Rostedtbf41a152008-10-04 02:00:59 -0400236#define RB_WARN_ON(buffer, cond) \
237 do { \
238 if (unlikely(cond)) { \
239 atomic_inc(&buffer->record_disabled); \
240 WARN_ON(1); \
241 } \
242 } while (0)
243
244#define RB_WARN_ON_RET(buffer, cond) \
245 do { \
246 if (unlikely(cond)) { \
247 atomic_inc(&buffer->record_disabled); \
248 WARN_ON(1); \
249 return -1; \
250 } \
251 } while (0)
252
253#define RB_WARN_ON_ONCE(buffer, cond) \
254 do { \
255 static int once; \
256 if (unlikely(cond) && !once) { \
257 once++; \
258 atomic_inc(&buffer->record_disabled); \
259 WARN_ON(1); \
260 } \
261 } while (0)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400262
263/**
264 * check_pages - integrity check of buffer pages
265 * @cpu_buffer: CPU buffer with pages to test
266 *
267 * As a safty measure we check to make sure the data pages have not
268 * been corrupted.
269 */
270static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
271{
272 struct list_head *head = &cpu_buffer->pages;
273 struct buffer_page *page, *tmp;
274
Steven Rostedtbf41a152008-10-04 02:00:59 -0400275 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
276 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400277
278 list_for_each_entry_safe(page, tmp, head, list) {
Steven Rostedtbf41a152008-10-04 02:00:59 -0400279 RB_WARN_ON_RET(cpu_buffer,
280 page->list.next->prev != &page->list);
281 RB_WARN_ON_RET(cpu_buffer,
282 page->list.prev->next != &page->list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400283 }
284
285 return 0;
286}
287
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400288static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
289 unsigned nr_pages)
290{
291 struct list_head *head = &cpu_buffer->pages;
292 struct buffer_page *page, *tmp;
293 unsigned long addr;
294 LIST_HEAD(pages);
295 unsigned i;
296
297 for (i = 0; i < nr_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400298 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400299 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400300 if (!page)
301 goto free_pages;
302 list_add(&page->list, &pages);
303
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400304 addr = __get_free_page(GFP_KERNEL);
305 if (!addr)
306 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400307 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400308 }
309
310 list_splice(&pages, head);
311
312 rb_check_pages(cpu_buffer);
313
314 return 0;
315
316 free_pages:
317 list_for_each_entry_safe(page, tmp, &pages, list) {
318 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400319 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400320 }
321 return -ENOMEM;
322}
323
324static struct ring_buffer_per_cpu *
325rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
326{
327 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400328 struct buffer_page *page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400329 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400330 int ret;
331
332 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
333 GFP_KERNEL, cpu_to_node(cpu));
334 if (!cpu_buffer)
335 return NULL;
336
337 cpu_buffer->cpu = cpu;
338 cpu_buffer->buffer = buffer;
339 spin_lock_init(&cpu_buffer->lock);
340 INIT_LIST_HEAD(&cpu_buffer->pages);
341
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400342 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
343 GFP_KERNEL, cpu_to_node(cpu));
344 if (!page)
345 goto fail_free_buffer;
346
347 cpu_buffer->reader_page = page;
Steven Rostedtd7690412008-10-01 00:29:53 -0400348 addr = __get_free_page(GFP_KERNEL);
349 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400350 goto fail_free_reader;
351 page->page = (void *)addr;
352
Steven Rostedtd7690412008-10-01 00:29:53 -0400353 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400354
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400355 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
356 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400357 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400358
359 cpu_buffer->head_page
360 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400361 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400362
363 return cpu_buffer;
364
Steven Rostedtd7690412008-10-01 00:29:53 -0400365 fail_free_reader:
366 free_buffer_page(cpu_buffer->reader_page);
367
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400368 fail_free_buffer:
369 kfree(cpu_buffer);
370 return NULL;
371}
372
373static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
374{
375 struct list_head *head = &cpu_buffer->pages;
376 struct buffer_page *page, *tmp;
377
Steven Rostedtd7690412008-10-01 00:29:53 -0400378 list_del_init(&cpu_buffer->reader_page->list);
379 free_buffer_page(cpu_buffer->reader_page);
380
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400381 list_for_each_entry_safe(page, tmp, head, list) {
382 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400383 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400384 }
385 kfree(cpu_buffer);
386}
387
Steven Rostedta7b13742008-09-29 23:02:39 -0400388/*
389 * Causes compile errors if the struct buffer_page gets bigger
390 * than the struct page.
391 */
392extern int ring_buffer_page_too_big(void);
393
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400394/**
395 * ring_buffer_alloc - allocate a new ring_buffer
Robert Richter68814b52008-11-24 12:24:12 +0100396 * @size: the size in bytes per cpu that is needed.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400397 * @flags: attributes to set for the ring buffer.
398 *
399 * Currently the only flag that is available is the RB_FL_OVERWRITE
400 * flag. This flag means that the buffer will overwrite old data
401 * when the buffer wraps. If this flag is not set, the buffer will
402 * drop data when the tail hits the head.
403 */
404struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
405{
406 struct ring_buffer *buffer;
407 int bsize;
408 int cpu;
409
Steven Rostedta7b13742008-09-29 23:02:39 -0400410 /* Paranoid! Optimizes out when all is well */
411 if (sizeof(struct buffer_page) > sizeof(struct page))
412 ring_buffer_page_too_big();
413
414
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400415 /* keep it in its own cache line */
416 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
417 GFP_KERNEL);
418 if (!buffer)
419 return NULL;
420
421 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
422 buffer->flags = flags;
423
424 /* need at least two pages */
425 if (buffer->pages == 1)
426 buffer->pages++;
427
428 buffer->cpumask = cpu_possible_map;
429 buffer->cpus = nr_cpu_ids;
430
431 bsize = sizeof(void *) * nr_cpu_ids;
432 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
433 GFP_KERNEL);
434 if (!buffer->buffers)
435 goto fail_free_buffer;
436
437 for_each_buffer_cpu(buffer, cpu) {
438 buffer->buffers[cpu] =
439 rb_allocate_cpu_buffer(buffer, cpu);
440 if (!buffer->buffers[cpu])
441 goto fail_free_buffers;
442 }
443
444 mutex_init(&buffer->mutex);
445
446 return buffer;
447
448 fail_free_buffers:
449 for_each_buffer_cpu(buffer, cpu) {
450 if (buffer->buffers[cpu])
451 rb_free_cpu_buffer(buffer->buffers[cpu]);
452 }
453 kfree(buffer->buffers);
454
455 fail_free_buffer:
456 kfree(buffer);
457 return NULL;
458}
Robert Richterc4f50182008-12-11 16:49:22 +0100459EXPORT_SYMBOL_GPL(ring_buffer_alloc);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400460
461/**
462 * ring_buffer_free - free a ring buffer.
463 * @buffer: the buffer to free.
464 */
465void
466ring_buffer_free(struct ring_buffer *buffer)
467{
468 int cpu;
469
470 for_each_buffer_cpu(buffer, cpu)
471 rb_free_cpu_buffer(buffer->buffers[cpu]);
472
473 kfree(buffer);
474}
Robert Richterc4f50182008-12-11 16:49:22 +0100475EXPORT_SYMBOL_GPL(ring_buffer_free);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400476
477static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
478
479static void
480rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
481{
482 struct buffer_page *page;
483 struct list_head *p;
484 unsigned i;
485
486 atomic_inc(&cpu_buffer->record_disabled);
487 synchronize_sched();
488
489 for (i = 0; i < nr_pages; i++) {
490 BUG_ON(list_empty(&cpu_buffer->pages));
491 p = cpu_buffer->pages.next;
492 page = list_entry(p, struct buffer_page, list);
493 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400494 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400495 }
496 BUG_ON(list_empty(&cpu_buffer->pages));
497
498 rb_reset_cpu(cpu_buffer);
499
500 rb_check_pages(cpu_buffer);
501
502 atomic_dec(&cpu_buffer->record_disabled);
503
504}
505
506static void
507rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
508 struct list_head *pages, unsigned nr_pages)
509{
510 struct buffer_page *page;
511 struct list_head *p;
512 unsigned i;
513
514 atomic_inc(&cpu_buffer->record_disabled);
515 synchronize_sched();
516
517 for (i = 0; i < nr_pages; i++) {
518 BUG_ON(list_empty(pages));
519 p = pages->next;
520 page = list_entry(p, struct buffer_page, list);
521 list_del_init(&page->list);
522 list_add_tail(&page->list, &cpu_buffer->pages);
523 }
524 rb_reset_cpu(cpu_buffer);
525
526 rb_check_pages(cpu_buffer);
527
528 atomic_dec(&cpu_buffer->record_disabled);
529}
530
531/**
532 * ring_buffer_resize - resize the ring buffer
533 * @buffer: the buffer to resize.
534 * @size: the new size.
535 *
536 * The tracer is responsible for making sure that the buffer is
537 * not being used while changing the size.
538 * Note: We may be able to change the above requirement by using
539 * RCU synchronizations.
540 *
541 * Minimum size is 2 * BUF_PAGE_SIZE.
542 *
543 * Returns -1 on failure.
544 */
545int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
546{
547 struct ring_buffer_per_cpu *cpu_buffer;
548 unsigned nr_pages, rm_pages, new_pages;
549 struct buffer_page *page, *tmp;
550 unsigned long buffer_size;
551 unsigned long addr;
552 LIST_HEAD(pages);
553 int i, cpu;
554
Ingo Molnaree51a1d2008-11-13 14:58:31 +0100555 /*
556 * Always succeed at resizing a non-existent buffer:
557 */
558 if (!buffer)
559 return size;
560
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400561 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
562 size *= BUF_PAGE_SIZE;
563 buffer_size = buffer->pages * BUF_PAGE_SIZE;
564
565 /* we need a minimum of two pages */
566 if (size < BUF_PAGE_SIZE * 2)
567 size = BUF_PAGE_SIZE * 2;
568
569 if (size == buffer_size)
570 return size;
571
572 mutex_lock(&buffer->mutex);
573
574 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
575
576 if (size < buffer_size) {
577
578 /* easy case, just free pages */
579 BUG_ON(nr_pages >= buffer->pages);
580
581 rm_pages = buffer->pages - nr_pages;
582
583 for_each_buffer_cpu(buffer, cpu) {
584 cpu_buffer = buffer->buffers[cpu];
585 rb_remove_pages(cpu_buffer, rm_pages);
586 }
587 goto out;
588 }
589
590 /*
591 * This is a bit more difficult. We only want to add pages
592 * when we can allocate enough for all CPUs. We do this
593 * by allocating all the pages and storing them on a local
594 * link list. If we succeed in our allocation, then we
595 * add these pages to the cpu_buffers. Otherwise we just free
596 * them all and return -ENOMEM;
597 */
598 BUG_ON(nr_pages <= buffer->pages);
599 new_pages = nr_pages - buffer->pages;
600
601 for_each_buffer_cpu(buffer, cpu) {
602 for (i = 0; i < new_pages; i++) {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400603 page = kzalloc_node(ALIGN(sizeof(*page),
604 cache_line_size()),
605 GFP_KERNEL, cpu_to_node(cpu));
606 if (!page)
607 goto free_pages;
608 list_add(&page->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400609 addr = __get_free_page(GFP_KERNEL);
610 if (!addr)
611 goto free_pages;
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400612 page->page = (void *)addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400613 }
614 }
615
616 for_each_buffer_cpu(buffer, cpu) {
617 cpu_buffer = buffer->buffers[cpu];
618 rb_insert_pages(cpu_buffer, &pages, new_pages);
619 }
620
621 BUG_ON(!list_empty(&pages));
622
623 out:
624 buffer->pages = nr_pages;
625 mutex_unlock(&buffer->mutex);
626
627 return size;
628
629 free_pages:
630 list_for_each_entry_safe(page, tmp, &pages, list) {
631 list_del_init(&page->list);
Steven Rostedted568292008-09-29 23:02:40 -0400632 free_buffer_page(page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400633 }
Vegard Nossum641d2f62008-11-18 19:22:13 +0100634 mutex_unlock(&buffer->mutex);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400635 return -ENOMEM;
636}
Robert Richterc4f50182008-12-11 16:49:22 +0100637EXPORT_SYMBOL_GPL(ring_buffer_resize);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400638
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400639static inline int rb_null_event(struct ring_buffer_event *event)
640{
641 return event->type == RINGBUF_TYPE_PADDING;
642}
643
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400644static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400645{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400646 return page->page + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400647}
648
649static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400650rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400651{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400652 return __rb_page_index(cpu_buffer->reader_page,
653 cpu_buffer->reader_page->read);
654}
655
656static inline struct ring_buffer_event *
657rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
658{
659 return __rb_page_index(cpu_buffer->head_page,
660 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400661}
662
663static inline struct ring_buffer_event *
664rb_iter_head_event(struct ring_buffer_iter *iter)
665{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400666 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400667}
668
Steven Rostedtbf41a152008-10-04 02:00:59 -0400669static inline unsigned rb_page_write(struct buffer_page *bpage)
670{
671 return local_read(&bpage->write);
672}
673
674static inline unsigned rb_page_commit(struct buffer_page *bpage)
675{
676 return local_read(&bpage->commit);
677}
678
679/* Size is determined by what has been commited */
680static inline unsigned rb_page_size(struct buffer_page *bpage)
681{
682 return rb_page_commit(bpage);
683}
684
685static inline unsigned
686rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
687{
688 return rb_page_commit(cpu_buffer->commit_page);
689}
690
691static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
692{
693 return rb_page_commit(cpu_buffer->head_page);
694}
695
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400696/*
697 * When the tail hits the head and the buffer is in overwrite mode,
698 * the head jumps to the next page and all content on the previous
699 * page is discarded. But before doing so, we update the overrun
700 * variable of the buffer.
701 */
702static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
703{
704 struct ring_buffer_event *event;
705 unsigned long head;
706
707 for (head = 0; head < rb_head_size(cpu_buffer);
708 head += rb_event_length(event)) {
709
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400710 event = __rb_page_index(cpu_buffer->head_page, head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400711 BUG_ON(rb_null_event(event));
712 /* Only count data entries */
713 if (event->type != RINGBUF_TYPE_DATA)
714 continue;
715 cpu_buffer->overrun++;
716 cpu_buffer->entries--;
717 }
718}
719
720static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
721 struct buffer_page **page)
722{
723 struct list_head *p = (*page)->list.next;
724
725 if (p == &cpu_buffer->pages)
726 p = p->next;
727
728 *page = list_entry(p, struct buffer_page, list);
729}
730
Steven Rostedtbf41a152008-10-04 02:00:59 -0400731static inline unsigned
732rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400733{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400734 unsigned long addr = (unsigned long)event;
735
736 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400737}
738
Steven Rostedtbf41a152008-10-04 02:00:59 -0400739static inline int
740rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
741 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400742{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400743 unsigned long addr = (unsigned long)event;
744 unsigned long index;
745
746 index = rb_event_index(event);
747 addr &= PAGE_MASK;
748
749 return cpu_buffer->commit_page->page == (void *)addr &&
750 rb_commit_index(cpu_buffer) == index;
751}
752
753static inline void
754rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
755 struct ring_buffer_event *event)
756{
757 unsigned long addr = (unsigned long)event;
758 unsigned long index;
759
760 index = rb_event_index(event);
761 addr &= PAGE_MASK;
762
763 while (cpu_buffer->commit_page->page != (void *)addr) {
764 RB_WARN_ON(cpu_buffer,
765 cpu_buffer->commit_page == cpu_buffer->tail_page);
766 cpu_buffer->commit_page->commit =
767 cpu_buffer->commit_page->write;
768 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
769 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
770 }
771
772 /* Now set the commit to the event's index */
773 local_set(&cpu_buffer->commit_page->commit, index);
774}
775
776static inline void
777rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
778{
779 /*
780 * We only race with interrupts and NMIs on this CPU.
781 * If we own the commit event, then we can commit
782 * all others that interrupted us, since the interruptions
783 * are in stack format (they finish before they come
784 * back to us). This allows us to do a simple loop to
785 * assign the commit to the tail.
786 */
787 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
788 cpu_buffer->commit_page->commit =
789 cpu_buffer->commit_page->write;
790 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
791 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
792 /* add barrier to keep gcc from optimizing too much */
793 barrier();
794 }
795 while (rb_commit_index(cpu_buffer) !=
796 rb_page_write(cpu_buffer->commit_page)) {
797 cpu_buffer->commit_page->commit =
798 cpu_buffer->commit_page->write;
799 barrier();
800 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400801}
802
Steven Rostedtd7690412008-10-01 00:29:53 -0400803static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400804{
Steven Rostedtd7690412008-10-01 00:29:53 -0400805 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400806 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -0400807}
808
809static inline void rb_inc_iter(struct ring_buffer_iter *iter)
810{
811 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
812
813 /*
814 * The iterator could be on the reader page (it starts there).
815 * But the head could have moved, since the reader was
816 * found. Check for this case and assign the iterator
817 * to the head page instead of next.
818 */
819 if (iter->head_page == cpu_buffer->reader_page)
820 iter->head_page = cpu_buffer->head_page;
821 else
822 rb_inc_page(cpu_buffer, &iter->head_page);
823
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400824 iter->read_stamp = iter->head_page->time_stamp;
825 iter->head = 0;
826}
827
828/**
829 * ring_buffer_update_event - update event type and data
830 * @event: the even to update
831 * @type: the type of event
832 * @length: the size of the event field in the ring buffer
833 *
834 * Update the type and data fields of the event. The length
835 * is the actual size that is written to the ring buffer,
836 * and with this, we can determine what to place into the
837 * data field.
838 */
839static inline void
840rb_update_event(struct ring_buffer_event *event,
841 unsigned type, unsigned length)
842{
843 event->type = type;
844
845 switch (type) {
846
847 case RINGBUF_TYPE_PADDING:
848 break;
849
850 case RINGBUF_TYPE_TIME_EXTEND:
851 event->len =
852 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
853 >> RB_ALIGNMENT_SHIFT;
854 break;
855
856 case RINGBUF_TYPE_TIME_STAMP:
857 event->len =
858 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
859 >> RB_ALIGNMENT_SHIFT;
860 break;
861
862 case RINGBUF_TYPE_DATA:
863 length -= RB_EVNT_HDR_SIZE;
864 if (length > RB_MAX_SMALL_DATA) {
865 event->len = 0;
866 event->array[0] = length;
867 } else
868 event->len =
869 (length + (RB_ALIGNMENT-1))
870 >> RB_ALIGNMENT_SHIFT;
871 break;
872 default:
873 BUG();
874 }
875}
876
877static inline unsigned rb_calculate_event_length(unsigned length)
878{
879 struct ring_buffer_event event; /* Used only for sizeof array */
880
881 /* zero length can cause confusions */
882 if (!length)
883 length = 1;
884
885 if (length > RB_MAX_SMALL_DATA)
886 length += sizeof(event.array[0]);
887
888 length += RB_EVNT_HDR_SIZE;
889 length = ALIGN(length, RB_ALIGNMENT);
890
891 return length;
892}
893
894static struct ring_buffer_event *
895__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
896 unsigned type, unsigned long length, u64 *ts)
897{
Steven Rostedtd7690412008-10-01 00:29:53 -0400898 struct buffer_page *tail_page, *head_page, *reader_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400899 unsigned long tail, write;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400900 struct ring_buffer *buffer = cpu_buffer->buffer;
901 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400902 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400903
904 tail_page = cpu_buffer->tail_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400905 write = local_add_return(length, &tail_page->write);
906 tail = write - length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400907
Steven Rostedtbf41a152008-10-04 02:00:59 -0400908 /* See if we shot pass the end of this buffer page */
909 if (write > BUF_PAGE_SIZE) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400910 struct buffer_page *next_page = tail_page;
911
Steven Rostedtbf41a152008-10-04 02:00:59 -0400912 spin_lock_irqsave(&cpu_buffer->lock, flags);
913
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400914 rb_inc_page(cpu_buffer, &next_page);
915
Steven Rostedtd7690412008-10-01 00:29:53 -0400916 head_page = cpu_buffer->head_page;
917 reader_page = cpu_buffer->reader_page;
918
919 /* we grabbed the lock before incrementing */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400920 RB_WARN_ON(cpu_buffer, next_page == reader_page);
921
922 /*
923 * If for some reason, we had an interrupt storm that made
924 * it all the way around the buffer, bail, and warn
925 * about it.
926 */
927 if (unlikely(next_page == cpu_buffer->commit_page)) {
928 WARN_ON_ONCE(1);
929 goto out_unlock;
930 }
Steven Rostedtd7690412008-10-01 00:29:53 -0400931
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400932 if (next_page == head_page) {
Steven Rostedtd7690412008-10-01 00:29:53 -0400933 if (!(buffer->flags & RB_FL_OVERWRITE)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -0400934 /* reset write */
935 if (tail <= BUF_PAGE_SIZE)
936 local_set(&tail_page->write, tail);
937 goto out_unlock;
Steven Rostedtd7690412008-10-01 00:29:53 -0400938 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400939
Steven Rostedtbf41a152008-10-04 02:00:59 -0400940 /* tail_page has not moved yet? */
941 if (tail_page == cpu_buffer->tail_page) {
942 /* count overflows */
943 rb_update_overflow(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400944
Steven Rostedtbf41a152008-10-04 02:00:59 -0400945 rb_inc_page(cpu_buffer, &head_page);
946 cpu_buffer->head_page = head_page;
947 cpu_buffer->head_page->read = 0;
948 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400949 }
950
Steven Rostedtbf41a152008-10-04 02:00:59 -0400951 /*
952 * If the tail page is still the same as what we think
953 * it is, then it is up to us to update the tail
954 * pointer.
955 */
956 if (tail_page == cpu_buffer->tail_page) {
957 local_set(&next_page->write, 0);
958 local_set(&next_page->commit, 0);
959 cpu_buffer->tail_page = next_page;
960
961 /* reread the time stamp */
962 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
963 cpu_buffer->tail_page->time_stamp = *ts;
964 }
965
966 /*
967 * The actual tail page has moved forward.
968 */
969 if (tail < BUF_PAGE_SIZE) {
970 /* Mark the rest of the page with padding */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400971 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400972 event->type = RINGBUF_TYPE_PADDING;
973 }
974
Steven Rostedtbf41a152008-10-04 02:00:59 -0400975 if (tail <= BUF_PAGE_SIZE)
976 /* Set the write back to the previous setting */
977 local_set(&tail_page->write, tail);
978
979 /*
980 * If this was a commit entry that failed,
981 * increment that too
982 */
983 if (tail_page == cpu_buffer->commit_page &&
984 tail == rb_commit_index(cpu_buffer)) {
985 rb_set_commit_to_write(cpu_buffer);
986 }
987
988 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
989
990 /* fail and let the caller try again */
991 return ERR_PTR(-EAGAIN);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400992 }
993
Steven Rostedtbf41a152008-10-04 02:00:59 -0400994 /* We reserved something on the buffer */
995
996 BUG_ON(write > BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400997
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400998 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400999 rb_update_event(event, type, length);
1000
Steven Rostedtbf41a152008-10-04 02:00:59 -04001001 /*
1002 * If this is a commit and the tail is zero, then update
1003 * this page's time stamp.
1004 */
1005 if (!tail && rb_is_commit(cpu_buffer, event))
1006 cpu_buffer->commit_page->time_stamp = *ts;
1007
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001008 return event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001009
1010 out_unlock:
1011 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1012 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001013}
1014
1015static int
1016rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1017 u64 *ts, u64 *delta)
1018{
1019 struct ring_buffer_event *event;
1020 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001021 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001022
1023 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1024 printk(KERN_WARNING "Delta way too big! %llu"
1025 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +11001026 (unsigned long long)*delta,
1027 (unsigned long long)*ts,
1028 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001029 WARN_ON(1);
1030 }
1031
1032 /*
1033 * The delta is too big, we to add a
1034 * new timestamp.
1035 */
1036 event = __rb_reserve_next(cpu_buffer,
1037 RINGBUF_TYPE_TIME_EXTEND,
1038 RB_LEN_TIME_EXTEND,
1039 ts);
1040 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001041 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001042
Steven Rostedtbf41a152008-10-04 02:00:59 -04001043 if (PTR_ERR(event) == -EAGAIN)
1044 return -EAGAIN;
1045
1046 /* Only a commited time event can update the write stamp */
1047 if (rb_is_commit(cpu_buffer, event)) {
1048 /*
1049 * If this is the first on the page, then we need to
1050 * update the page itself, and just put in a zero.
1051 */
1052 if (rb_event_index(event)) {
1053 event->time_delta = *delta & TS_MASK;
1054 event->array[0] = *delta >> TS_SHIFT;
1055 } else {
1056 cpu_buffer->commit_page->time_stamp = *ts;
1057 event->time_delta = 0;
1058 event->array[0] = 0;
1059 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001060 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001061 /* let the caller know this was the commit */
1062 ret = 1;
1063 } else {
1064 /* Darn, this is just wasted space */
1065 event->time_delta = 0;
1066 event->array[0] = 0;
1067 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001068 }
1069
Steven Rostedtbf41a152008-10-04 02:00:59 -04001070 *delta = 0;
1071
1072 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001073}
1074
1075static struct ring_buffer_event *
1076rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1077 unsigned type, unsigned long length)
1078{
1079 struct ring_buffer_event *event;
1080 u64 ts, delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001081 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001082 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001083
Steven Rostedtbf41a152008-10-04 02:00:59 -04001084 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001085 /*
1086 * We allow for interrupts to reenter here and do a trace.
1087 * If one does, it will cause this original code to loop
1088 * back here. Even with heavy interrupts happening, this
1089 * should only happen a few times in a row. If this happens
1090 * 1000 times in a row, there must be either an interrupt
1091 * storm or we have something buggy.
1092 * Bail!
1093 */
1094 if (unlikely(++nr_loops > 1000)) {
1095 RB_WARN_ON(cpu_buffer, 1);
1096 return NULL;
1097 }
1098
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001099 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1100
Steven Rostedtbf41a152008-10-04 02:00:59 -04001101 /*
1102 * Only the first commit can update the timestamp.
1103 * Yes there is a race here. If an interrupt comes in
1104 * just after the conditional and it traces too, then it
1105 * will also check the deltas. More than one timestamp may
1106 * also be made. But only the entry that did the actual
1107 * commit will be something other than zero.
1108 */
1109 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1110 rb_page_write(cpu_buffer->tail_page) ==
1111 rb_commit_index(cpu_buffer)) {
1112
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001113 delta = ts - cpu_buffer->write_stamp;
1114
Steven Rostedtbf41a152008-10-04 02:00:59 -04001115 /* make sure this delta is calculated here */
1116 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001117
Steven Rostedtbf41a152008-10-04 02:00:59 -04001118 /* Did the write stamp get updated already? */
1119 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt4143c5c2008-11-10 21:46:01 -05001120 delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001121
1122 if (test_time_stamp(delta)) {
1123
1124 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1125
1126 if (commit == -EBUSY)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001127 return NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001128
1129 if (commit == -EAGAIN)
1130 goto again;
1131
1132 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001133 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001134 } else
1135 /* Non commits have zero deltas */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001136 delta = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001137
1138 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001139 if (PTR_ERR(event) == -EAGAIN)
1140 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001141
Steven Rostedtbf41a152008-10-04 02:00:59 -04001142 if (!event) {
1143 if (unlikely(commit))
1144 /*
1145 * Ouch! We needed a timestamp and it was commited. But
1146 * we didn't get our event reserved.
1147 */
1148 rb_set_commit_to_write(cpu_buffer);
1149 return NULL;
1150 }
1151
1152 /*
1153 * If the timestamp was commited, make the commit our entry
1154 * now so that we will update it when needed.
1155 */
1156 if (commit)
1157 rb_set_commit_event(cpu_buffer, event);
1158 else if (!rb_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001159 delta = 0;
1160
1161 event->time_delta = delta;
1162
1163 return event;
1164}
1165
Steven Rostedtbf41a152008-10-04 02:00:59 -04001166static DEFINE_PER_CPU(int, rb_need_resched);
1167
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001168/**
1169 * ring_buffer_lock_reserve - reserve a part of the buffer
1170 * @buffer: the ring buffer to reserve from
1171 * @length: the length of the data to reserve (excluding event header)
1172 * @flags: a pointer to save the interrupt flags
1173 *
1174 * Returns a reseverd event on the ring buffer to copy directly to.
1175 * The user of this interface will need to get the body to write into
1176 * and can use the ring_buffer_event_data() interface.
1177 *
1178 * The length is the length of the data needed, not the event length
1179 * which also includes the event header.
1180 *
1181 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1182 * If NULL is returned, then nothing has been allocated or locked.
1183 */
1184struct ring_buffer_event *
1185ring_buffer_lock_reserve(struct ring_buffer *buffer,
1186 unsigned long length,
1187 unsigned long *flags)
1188{
1189 struct ring_buffer_per_cpu *cpu_buffer;
1190 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001191 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001192
Steven Rostedta3583242008-11-11 15:01:42 -05001193 if (ring_buffers_off)
1194 return NULL;
1195
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001196 if (atomic_read(&buffer->record_disabled))
1197 return NULL;
1198
Steven Rostedtbf41a152008-10-04 02:00:59 -04001199 /* If we are tracing schedule, we don't want to recurse */
1200 resched = need_resched();
1201 preempt_disable_notrace();
1202
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001203 cpu = raw_smp_processor_id();
1204
1205 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001206 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001207
1208 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001209
1210 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001211 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001212
1213 length = rb_calculate_event_length(length);
1214 if (length > BUF_PAGE_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001215 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001216
1217 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1218 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001219 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001220
Steven Rostedtbf41a152008-10-04 02:00:59 -04001221 /*
1222 * Need to store resched state on this cpu.
1223 * Only the first needs to.
1224 */
1225
1226 if (preempt_count() == 1)
1227 per_cpu(rb_need_resched, cpu) = resched;
1228
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001229 return event;
1230
Steven Rostedtd7690412008-10-01 00:29:53 -04001231 out:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001232 if (resched)
Lai Jiangshan4f5a7f42008-11-27 10:21:46 +08001233 preempt_enable_no_resched_notrace();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001234 else
1235 preempt_enable_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001236 return NULL;
1237}
Robert Richterc4f50182008-12-11 16:49:22 +01001238EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001239
1240static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1241 struct ring_buffer_event *event)
1242{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001243 cpu_buffer->entries++;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001244
1245 /* Only process further if we own the commit */
1246 if (!rb_is_commit(cpu_buffer, event))
1247 return;
1248
1249 cpu_buffer->write_stamp += event->time_delta;
1250
1251 rb_set_commit_to_write(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001252}
1253
1254/**
1255 * ring_buffer_unlock_commit - commit a reserved
1256 * @buffer: The buffer to commit to
1257 * @event: The event pointer to commit.
1258 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1259 *
1260 * This commits the data to the ring buffer, and releases any locks held.
1261 *
1262 * Must be paired with ring_buffer_lock_reserve.
1263 */
1264int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1265 struct ring_buffer_event *event,
1266 unsigned long flags)
1267{
1268 struct ring_buffer_per_cpu *cpu_buffer;
1269 int cpu = raw_smp_processor_id();
1270
1271 cpu_buffer = buffer->buffers[cpu];
1272
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001273 rb_commit(cpu_buffer, event);
1274
Steven Rostedtbf41a152008-10-04 02:00:59 -04001275 /*
1276 * Only the last preempt count needs to restore preemption.
1277 */
1278 if (preempt_count() == 1) {
1279 if (per_cpu(rb_need_resched, cpu))
1280 preempt_enable_no_resched_notrace();
1281 else
1282 preempt_enable_notrace();
1283 } else
1284 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001285
1286 return 0;
1287}
Robert Richterc4f50182008-12-11 16:49:22 +01001288EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001289
1290/**
1291 * ring_buffer_write - write data to the buffer without reserving
1292 * @buffer: The ring buffer to write to.
1293 * @length: The length of the data being written (excluding the event header)
1294 * @data: The data to write to the buffer.
1295 *
1296 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1297 * one function. If you already have the data to write to the buffer, it
1298 * may be easier to simply call this function.
1299 *
1300 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1301 * and not the length of the event which would hold the header.
1302 */
1303int ring_buffer_write(struct ring_buffer *buffer,
1304 unsigned long length,
1305 void *data)
1306{
1307 struct ring_buffer_per_cpu *cpu_buffer;
1308 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001309 unsigned long event_length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001310 void *body;
1311 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001312 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001313
Steven Rostedta3583242008-11-11 15:01:42 -05001314 if (ring_buffers_off)
1315 return -EBUSY;
1316
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001317 if (atomic_read(&buffer->record_disabled))
1318 return -EBUSY;
1319
Steven Rostedtbf41a152008-10-04 02:00:59 -04001320 resched = need_resched();
1321 preempt_disable_notrace();
1322
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001323 cpu = raw_smp_processor_id();
1324
1325 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001326 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001327
1328 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001329
1330 if (atomic_read(&cpu_buffer->record_disabled))
1331 goto out;
1332
1333 event_length = rb_calculate_event_length(length);
1334 event = rb_reserve_next_event(cpu_buffer,
1335 RINGBUF_TYPE_DATA, event_length);
1336 if (!event)
1337 goto out;
1338
1339 body = rb_event_data(event);
1340
1341 memcpy(body, data, length);
1342
1343 rb_commit(cpu_buffer, event);
1344
1345 ret = 0;
1346 out:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001347 if (resched)
1348 preempt_enable_no_resched_notrace();
1349 else
1350 preempt_enable_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001351
1352 return ret;
1353}
Robert Richterc4f50182008-12-11 16:49:22 +01001354EXPORT_SYMBOL_GPL(ring_buffer_write);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001355
Steven Rostedtbf41a152008-10-04 02:00:59 -04001356static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1357{
1358 struct buffer_page *reader = cpu_buffer->reader_page;
1359 struct buffer_page *head = cpu_buffer->head_page;
1360 struct buffer_page *commit = cpu_buffer->commit_page;
1361
1362 return reader->read == rb_page_commit(reader) &&
1363 (commit == reader ||
1364 (commit == head &&
1365 head->read == rb_page_commit(commit)));
1366}
1367
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001368/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001369 * ring_buffer_record_disable - stop all writes into the buffer
1370 * @buffer: The ring buffer to stop writes to.
1371 *
1372 * This prevents all writes to the buffer. Any attempt to write
1373 * to the buffer after this will fail and return NULL.
1374 *
1375 * The caller should call synchronize_sched() after this.
1376 */
1377void ring_buffer_record_disable(struct ring_buffer *buffer)
1378{
1379 atomic_inc(&buffer->record_disabled);
1380}
Robert Richterc4f50182008-12-11 16:49:22 +01001381EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001382
1383/**
1384 * ring_buffer_record_enable - enable writes to the buffer
1385 * @buffer: The ring buffer to enable writes
1386 *
1387 * Note, multiple disables will need the same number of enables
1388 * to truely enable the writing (much like preempt_disable).
1389 */
1390void ring_buffer_record_enable(struct ring_buffer *buffer)
1391{
1392 atomic_dec(&buffer->record_disabled);
1393}
Robert Richterc4f50182008-12-11 16:49:22 +01001394EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001395
1396/**
1397 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1398 * @buffer: The ring buffer to stop writes to.
1399 * @cpu: The CPU buffer to stop
1400 *
1401 * This prevents all writes to the buffer. Any attempt to write
1402 * to the buffer after this will fail and return NULL.
1403 *
1404 * The caller should call synchronize_sched() after this.
1405 */
1406void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1407{
1408 struct ring_buffer_per_cpu *cpu_buffer;
1409
1410 if (!cpu_isset(cpu, buffer->cpumask))
1411 return;
1412
1413 cpu_buffer = buffer->buffers[cpu];
1414 atomic_inc(&cpu_buffer->record_disabled);
1415}
Robert Richterc4f50182008-12-11 16:49:22 +01001416EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001417
1418/**
1419 * ring_buffer_record_enable_cpu - enable writes to the buffer
1420 * @buffer: The ring buffer to enable writes
1421 * @cpu: The CPU to enable.
1422 *
1423 * Note, multiple disables will need the same number of enables
1424 * to truely enable the writing (much like preempt_disable).
1425 */
1426void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1427{
1428 struct ring_buffer_per_cpu *cpu_buffer;
1429
1430 if (!cpu_isset(cpu, buffer->cpumask))
1431 return;
1432
1433 cpu_buffer = buffer->buffers[cpu];
1434 atomic_dec(&cpu_buffer->record_disabled);
1435}
Robert Richterc4f50182008-12-11 16:49:22 +01001436EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001437
1438/**
1439 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1440 * @buffer: The ring buffer
1441 * @cpu: The per CPU buffer to get the entries from.
1442 */
1443unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1444{
1445 struct ring_buffer_per_cpu *cpu_buffer;
1446
1447 if (!cpu_isset(cpu, buffer->cpumask))
1448 return 0;
1449
1450 cpu_buffer = buffer->buffers[cpu];
1451 return cpu_buffer->entries;
1452}
Robert Richterc4f50182008-12-11 16:49:22 +01001453EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001454
1455/**
1456 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1457 * @buffer: The ring buffer
1458 * @cpu: The per CPU buffer to get the number of overruns from
1459 */
1460unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1461{
1462 struct ring_buffer_per_cpu *cpu_buffer;
1463
1464 if (!cpu_isset(cpu, buffer->cpumask))
1465 return 0;
1466
1467 cpu_buffer = buffer->buffers[cpu];
1468 return cpu_buffer->overrun;
1469}
Robert Richterc4f50182008-12-11 16:49:22 +01001470EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001471
1472/**
1473 * ring_buffer_entries - get the number of entries in a buffer
1474 * @buffer: The ring buffer
1475 *
1476 * Returns the total number of entries in the ring buffer
1477 * (all CPU entries)
1478 */
1479unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1480{
1481 struct ring_buffer_per_cpu *cpu_buffer;
1482 unsigned long entries = 0;
1483 int cpu;
1484
1485 /* if you care about this being correct, lock the buffer */
1486 for_each_buffer_cpu(buffer, cpu) {
1487 cpu_buffer = buffer->buffers[cpu];
1488 entries += cpu_buffer->entries;
1489 }
1490
1491 return entries;
1492}
Robert Richterc4f50182008-12-11 16:49:22 +01001493EXPORT_SYMBOL_GPL(ring_buffer_entries);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001494
1495/**
1496 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1497 * @buffer: The ring buffer
1498 *
1499 * Returns the total number of overruns in the ring buffer
1500 * (all CPU entries)
1501 */
1502unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1503{
1504 struct ring_buffer_per_cpu *cpu_buffer;
1505 unsigned long overruns = 0;
1506 int cpu;
1507
1508 /* if you care about this being correct, lock the buffer */
1509 for_each_buffer_cpu(buffer, cpu) {
1510 cpu_buffer = buffer->buffers[cpu];
1511 overruns += cpu_buffer->overrun;
1512 }
1513
1514 return overruns;
1515}
Robert Richterc4f50182008-12-11 16:49:22 +01001516EXPORT_SYMBOL_GPL(ring_buffer_overruns);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001517
1518/**
1519 * ring_buffer_iter_reset - reset an iterator
1520 * @iter: The iterator to reset
1521 *
1522 * Resets the iterator, so that it will start from the beginning
1523 * again.
1524 */
1525void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1526{
1527 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1528
Steven Rostedtd7690412008-10-01 00:29:53 -04001529 /* Iterator usage is expected to have record disabled */
1530 if (list_empty(&cpu_buffer->reader_page->list)) {
1531 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001532 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001533 } else {
1534 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001535 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001536 }
1537 if (iter->head)
1538 iter->read_stamp = cpu_buffer->read_stamp;
1539 else
1540 iter->read_stamp = iter->head_page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001541}
Robert Richterc4f50182008-12-11 16:49:22 +01001542EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001543
1544/**
1545 * ring_buffer_iter_empty - check if an iterator has no more to read
1546 * @iter: The iterator to check
1547 */
1548int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1549{
1550 struct ring_buffer_per_cpu *cpu_buffer;
1551
1552 cpu_buffer = iter->cpu_buffer;
1553
Steven Rostedtbf41a152008-10-04 02:00:59 -04001554 return iter->head_page == cpu_buffer->commit_page &&
1555 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001556}
Robert Richterc4f50182008-12-11 16:49:22 +01001557EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001558
1559static void
1560rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1561 struct ring_buffer_event *event)
1562{
1563 u64 delta;
1564
1565 switch (event->type) {
1566 case RINGBUF_TYPE_PADDING:
1567 return;
1568
1569 case RINGBUF_TYPE_TIME_EXTEND:
1570 delta = event->array[0];
1571 delta <<= TS_SHIFT;
1572 delta += event->time_delta;
1573 cpu_buffer->read_stamp += delta;
1574 return;
1575
1576 case RINGBUF_TYPE_TIME_STAMP:
1577 /* FIXME: not implemented */
1578 return;
1579
1580 case RINGBUF_TYPE_DATA:
1581 cpu_buffer->read_stamp += event->time_delta;
1582 return;
1583
1584 default:
1585 BUG();
1586 }
1587 return;
1588}
1589
1590static void
1591rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1592 struct ring_buffer_event *event)
1593{
1594 u64 delta;
1595
1596 switch (event->type) {
1597 case RINGBUF_TYPE_PADDING:
1598 return;
1599
1600 case RINGBUF_TYPE_TIME_EXTEND:
1601 delta = event->array[0];
1602 delta <<= TS_SHIFT;
1603 delta += event->time_delta;
1604 iter->read_stamp += delta;
1605 return;
1606
1607 case RINGBUF_TYPE_TIME_STAMP:
1608 /* FIXME: not implemented */
1609 return;
1610
1611 case RINGBUF_TYPE_DATA:
1612 iter->read_stamp += event->time_delta;
1613 return;
1614
1615 default:
1616 BUG();
1617 }
1618 return;
1619}
1620
Steven Rostedtd7690412008-10-01 00:29:53 -04001621static struct buffer_page *
1622rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001623{
Steven Rostedtd7690412008-10-01 00:29:53 -04001624 struct buffer_page *reader = NULL;
1625 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001626 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001627
1628 spin_lock_irqsave(&cpu_buffer->lock, flags);
1629
1630 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001631 /*
1632 * This should normally only loop twice. But because the
1633 * start of the reader inserts an empty page, it causes
1634 * a case where we will loop three times. There should be no
1635 * reason to loop four times (that I know of).
1636 */
1637 if (unlikely(++nr_loops > 3)) {
1638 RB_WARN_ON(cpu_buffer, 1);
1639 reader = NULL;
1640 goto out;
1641 }
1642
Steven Rostedtd7690412008-10-01 00:29:53 -04001643 reader = cpu_buffer->reader_page;
1644
1645 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001646 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04001647 goto out;
1648
1649 /* Never should we have an index greater than the size */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001650 RB_WARN_ON(cpu_buffer,
1651 cpu_buffer->reader_page->read > rb_page_size(reader));
Steven Rostedtd7690412008-10-01 00:29:53 -04001652
1653 /* check if we caught up to the tail */
1654 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001655 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04001656 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001657
1658 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04001659 * Splice the empty reader page into the list around the head.
1660 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001661 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001662
Steven Rostedtd7690412008-10-01 00:29:53 -04001663 reader = cpu_buffer->head_page;
1664 cpu_buffer->reader_page->list.next = reader->list.next;
1665 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001666
1667 local_set(&cpu_buffer->reader_page->write, 0);
1668 local_set(&cpu_buffer->reader_page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04001669
1670 /* Make the reader page now replace the head */
1671 reader->list.prev->next = &cpu_buffer->reader_page->list;
1672 reader->list.next->prev = &cpu_buffer->reader_page->list;
1673
1674 /*
1675 * If the tail is on the reader, then we must set the head
1676 * to the inserted page, otherwise we set it one before.
1677 */
1678 cpu_buffer->head_page = cpu_buffer->reader_page;
1679
Steven Rostedtbf41a152008-10-04 02:00:59 -04001680 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04001681 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1682
1683 /* Finally update the reader page to the new head */
1684 cpu_buffer->reader_page = reader;
1685 rb_reset_reader_page(cpu_buffer);
1686
1687 goto again;
1688
1689 out:
1690 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1691
1692 return reader;
1693}
1694
1695static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1696{
1697 struct ring_buffer_event *event;
1698 struct buffer_page *reader;
1699 unsigned length;
1700
1701 reader = rb_get_reader_page(cpu_buffer);
1702
1703 /* This function should not be called when buffer is empty */
1704 BUG_ON(!reader);
1705
1706 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001707
1708 if (event->type == RINGBUF_TYPE_DATA)
1709 cpu_buffer->entries--;
1710
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001711 rb_update_read_stamp(cpu_buffer, event);
1712
Steven Rostedtd7690412008-10-01 00:29:53 -04001713 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001714 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001715}
1716
1717static void rb_advance_iter(struct ring_buffer_iter *iter)
1718{
1719 struct ring_buffer *buffer;
1720 struct ring_buffer_per_cpu *cpu_buffer;
1721 struct ring_buffer_event *event;
1722 unsigned length;
1723
1724 cpu_buffer = iter->cpu_buffer;
1725 buffer = cpu_buffer->buffer;
1726
1727 /*
1728 * Check if we are at the end of the buffer.
1729 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001730 if (iter->head >= rb_page_size(iter->head_page)) {
1731 BUG_ON(iter->head_page == cpu_buffer->commit_page);
Steven Rostedtd7690412008-10-01 00:29:53 -04001732 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001733 return;
1734 }
1735
1736 event = rb_iter_head_event(iter);
1737
1738 length = rb_event_length(event);
1739
1740 /*
1741 * This should not be called to advance the header if we are
1742 * at the tail of the buffer.
1743 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001744 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1745 (iter->head + length > rb_commit_index(cpu_buffer)));
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001746
1747 rb_update_iter_read_stamp(iter, event);
1748
1749 iter->head += length;
1750
1751 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001752 if ((iter->head >= rb_page_size(iter->head_page)) &&
1753 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001754 rb_advance_iter(iter);
1755}
1756
1757/**
1758 * ring_buffer_peek - peek at the next event to be read
1759 * @buffer: The ring buffer to read
1760 * @cpu: The cpu to peak at
1761 * @ts: The timestamp counter of this event.
1762 *
1763 * This will return the event that will be read next, but does
1764 * not consume the data.
1765 */
1766struct ring_buffer_event *
1767ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1768{
1769 struct ring_buffer_per_cpu *cpu_buffer;
1770 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04001771 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001772 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001773
1774 if (!cpu_isset(cpu, buffer->cpumask))
1775 return NULL;
1776
1777 cpu_buffer = buffer->buffers[cpu];
1778
1779 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001780 /*
1781 * We repeat when a timestamp is encountered. It is possible
1782 * to get multiple timestamps from an interrupt entering just
1783 * as one timestamp is about to be written. The max times
1784 * that this can happen is the number of nested interrupts we
1785 * can have. Nesting 10 deep of interrupts is clearly
1786 * an anomaly.
1787 */
1788 if (unlikely(++nr_loops > 10)) {
1789 RB_WARN_ON(cpu_buffer, 1);
1790 return NULL;
1791 }
1792
Steven Rostedtd7690412008-10-01 00:29:53 -04001793 reader = rb_get_reader_page(cpu_buffer);
1794 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001795 return NULL;
1796
Steven Rostedtd7690412008-10-01 00:29:53 -04001797 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001798
1799 switch (event->type) {
1800 case RINGBUF_TYPE_PADDING:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001801 RB_WARN_ON(cpu_buffer, 1);
Steven Rostedtd7690412008-10-01 00:29:53 -04001802 rb_advance_reader(cpu_buffer);
1803 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001804
1805 case RINGBUF_TYPE_TIME_EXTEND:
1806 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04001807 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001808 goto again;
1809
1810 case RINGBUF_TYPE_TIME_STAMP:
1811 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04001812 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001813 goto again;
1814
1815 case RINGBUF_TYPE_DATA:
1816 if (ts) {
1817 *ts = cpu_buffer->read_stamp + event->time_delta;
1818 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1819 }
1820 return event;
1821
1822 default:
1823 BUG();
1824 }
1825
1826 return NULL;
1827}
Robert Richterc4f50182008-12-11 16:49:22 +01001828EXPORT_SYMBOL_GPL(ring_buffer_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001829
1830/**
1831 * ring_buffer_iter_peek - peek at the next event to be read
1832 * @iter: The ring buffer iterator
1833 * @ts: The timestamp counter of this event.
1834 *
1835 * This will return the event that will be read next, but does
1836 * not increment the iterator.
1837 */
1838struct ring_buffer_event *
1839ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1840{
1841 struct ring_buffer *buffer;
1842 struct ring_buffer_per_cpu *cpu_buffer;
1843 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001844 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001845
1846 if (ring_buffer_iter_empty(iter))
1847 return NULL;
1848
1849 cpu_buffer = iter->cpu_buffer;
1850 buffer = cpu_buffer->buffer;
1851
1852 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001853 /*
1854 * We repeat when a timestamp is encountered. It is possible
1855 * to get multiple timestamps from an interrupt entering just
1856 * as one timestamp is about to be written. The max times
1857 * that this can happen is the number of nested interrupts we
1858 * can have. Nesting 10 deep of interrupts is clearly
1859 * an anomaly.
1860 */
1861 if (unlikely(++nr_loops > 10)) {
1862 RB_WARN_ON(cpu_buffer, 1);
1863 return NULL;
1864 }
1865
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001866 if (rb_per_cpu_empty(cpu_buffer))
1867 return NULL;
1868
1869 event = rb_iter_head_event(iter);
1870
1871 switch (event->type) {
1872 case RINGBUF_TYPE_PADDING:
Steven Rostedtd7690412008-10-01 00:29:53 -04001873 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001874 goto again;
1875
1876 case RINGBUF_TYPE_TIME_EXTEND:
1877 /* Internal data, OK to advance */
1878 rb_advance_iter(iter);
1879 goto again;
1880
1881 case RINGBUF_TYPE_TIME_STAMP:
1882 /* FIXME: not implemented */
1883 rb_advance_iter(iter);
1884 goto again;
1885
1886 case RINGBUF_TYPE_DATA:
1887 if (ts) {
1888 *ts = iter->read_stamp + event->time_delta;
1889 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1890 }
1891 return event;
1892
1893 default:
1894 BUG();
1895 }
1896
1897 return NULL;
1898}
Robert Richterc4f50182008-12-11 16:49:22 +01001899EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001900
1901/**
1902 * ring_buffer_consume - return an event and consume it
1903 * @buffer: The ring buffer to get the next event from
1904 *
1905 * Returns the next event in the ring buffer, and that event is consumed.
1906 * Meaning, that sequential reads will keep returning a different event,
1907 * and eventually empty the ring buffer if the producer is slower.
1908 */
1909struct ring_buffer_event *
1910ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1911{
1912 struct ring_buffer_per_cpu *cpu_buffer;
1913 struct ring_buffer_event *event;
1914
1915 if (!cpu_isset(cpu, buffer->cpumask))
1916 return NULL;
1917
1918 event = ring_buffer_peek(buffer, cpu, ts);
1919 if (!event)
1920 return NULL;
1921
1922 cpu_buffer = buffer->buffers[cpu];
Steven Rostedtd7690412008-10-01 00:29:53 -04001923 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001924
1925 return event;
1926}
Robert Richterc4f50182008-12-11 16:49:22 +01001927EXPORT_SYMBOL_GPL(ring_buffer_consume);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001928
1929/**
1930 * ring_buffer_read_start - start a non consuming read of the buffer
1931 * @buffer: The ring buffer to read from
1932 * @cpu: The cpu buffer to iterate over
1933 *
1934 * This starts up an iteration through the buffer. It also disables
1935 * the recording to the buffer until the reading is finished.
1936 * This prevents the reading from being corrupted. This is not
1937 * a consuming read, so a producer is not expected.
1938 *
1939 * Must be paired with ring_buffer_finish.
1940 */
1941struct ring_buffer_iter *
1942ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1943{
1944 struct ring_buffer_per_cpu *cpu_buffer;
1945 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04001946 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001947
1948 if (!cpu_isset(cpu, buffer->cpumask))
1949 return NULL;
1950
1951 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1952 if (!iter)
1953 return NULL;
1954
1955 cpu_buffer = buffer->buffers[cpu];
1956
1957 iter->cpu_buffer = cpu_buffer;
1958
1959 atomic_inc(&cpu_buffer->record_disabled);
1960 synchronize_sched();
1961
Steven Rostedtd7690412008-10-01 00:29:53 -04001962 spin_lock_irqsave(&cpu_buffer->lock, flags);
1963 ring_buffer_iter_reset(iter);
1964 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001965
1966 return iter;
1967}
Robert Richterc4f50182008-12-11 16:49:22 +01001968EXPORT_SYMBOL_GPL(ring_buffer_read_start);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001969
1970/**
1971 * ring_buffer_finish - finish reading the iterator of the buffer
1972 * @iter: The iterator retrieved by ring_buffer_start
1973 *
1974 * This re-enables the recording to the buffer, and frees the
1975 * iterator.
1976 */
1977void
1978ring_buffer_read_finish(struct ring_buffer_iter *iter)
1979{
1980 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1981
1982 atomic_dec(&cpu_buffer->record_disabled);
1983 kfree(iter);
1984}
Robert Richterc4f50182008-12-11 16:49:22 +01001985EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001986
1987/**
1988 * ring_buffer_read - read the next item in the ring buffer by the iterator
1989 * @iter: The ring buffer iterator
1990 * @ts: The time stamp of the event read.
1991 *
1992 * This reads the next event in the ring buffer and increments the iterator.
1993 */
1994struct ring_buffer_event *
1995ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1996{
1997 struct ring_buffer_event *event;
1998
1999 event = ring_buffer_iter_peek(iter, ts);
2000 if (!event)
2001 return NULL;
2002
2003 rb_advance_iter(iter);
2004
2005 return event;
2006}
Robert Richterc4f50182008-12-11 16:49:22 +01002007EXPORT_SYMBOL_GPL(ring_buffer_read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002008
2009/**
2010 * ring_buffer_size - return the size of the ring buffer (in bytes)
2011 * @buffer: The ring buffer.
2012 */
2013unsigned long ring_buffer_size(struct ring_buffer *buffer)
2014{
2015 return BUF_PAGE_SIZE * buffer->pages;
2016}
Robert Richterc4f50182008-12-11 16:49:22 +01002017EXPORT_SYMBOL_GPL(ring_buffer_size);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002018
2019static void
2020rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2021{
2022 cpu_buffer->head_page
2023 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002024 local_set(&cpu_buffer->head_page->write, 0);
2025 local_set(&cpu_buffer->head_page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002026
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002027 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002028
2029 cpu_buffer->tail_page = cpu_buffer->head_page;
2030 cpu_buffer->commit_page = cpu_buffer->head_page;
2031
2032 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2033 local_set(&cpu_buffer->reader_page->write, 0);
2034 local_set(&cpu_buffer->reader_page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002035 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002036
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002037 cpu_buffer->overrun = 0;
2038 cpu_buffer->entries = 0;
2039}
2040
2041/**
2042 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2043 * @buffer: The ring buffer to reset a per cpu buffer of
2044 * @cpu: The CPU buffer to be reset
2045 */
2046void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2047{
2048 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2049 unsigned long flags;
2050
2051 if (!cpu_isset(cpu, buffer->cpumask))
2052 return;
2053
Steven Rostedtd7690412008-10-01 00:29:53 -04002054 spin_lock_irqsave(&cpu_buffer->lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002055
2056 rb_reset_cpu(cpu_buffer);
2057
Steven Rostedtd7690412008-10-01 00:29:53 -04002058 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002059}
Robert Richterc4f50182008-12-11 16:49:22 +01002060EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002061
2062/**
2063 * ring_buffer_reset - reset a ring buffer
2064 * @buffer: The ring buffer to reset all cpu buffers
2065 */
2066void ring_buffer_reset(struct ring_buffer *buffer)
2067{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002068 int cpu;
2069
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002070 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002071 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002072}
Robert Richterc4f50182008-12-11 16:49:22 +01002073EXPORT_SYMBOL_GPL(ring_buffer_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002074
2075/**
2076 * rind_buffer_empty - is the ring buffer empty?
2077 * @buffer: The ring buffer to test
2078 */
2079int ring_buffer_empty(struct ring_buffer *buffer)
2080{
2081 struct ring_buffer_per_cpu *cpu_buffer;
2082 int cpu;
2083
2084 /* yes this is racy, but if you don't like the race, lock the buffer */
2085 for_each_buffer_cpu(buffer, cpu) {
2086 cpu_buffer = buffer->buffers[cpu];
2087 if (!rb_per_cpu_empty(cpu_buffer))
2088 return 0;
2089 }
2090 return 1;
2091}
Robert Richterc4f50182008-12-11 16:49:22 +01002092EXPORT_SYMBOL_GPL(ring_buffer_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002093
2094/**
2095 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2096 * @buffer: The ring buffer
2097 * @cpu: The CPU buffer to test
2098 */
2099int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2100{
2101 struct ring_buffer_per_cpu *cpu_buffer;
2102
2103 if (!cpu_isset(cpu, buffer->cpumask))
2104 return 1;
2105
2106 cpu_buffer = buffer->buffers[cpu];
2107 return rb_per_cpu_empty(cpu_buffer);
2108}
Robert Richterc4f50182008-12-11 16:49:22 +01002109EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002110
2111/**
2112 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2113 * @buffer_a: One buffer to swap with
2114 * @buffer_b: The other buffer to swap with
2115 *
2116 * This function is useful for tracers that want to take a "snapshot"
2117 * of a CPU buffer and has another back up buffer lying around.
2118 * it is expected that the tracer handles the cpu buffer not being
2119 * used at the moment.
2120 */
2121int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2122 struct ring_buffer *buffer_b, int cpu)
2123{
2124 struct ring_buffer_per_cpu *cpu_buffer_a;
2125 struct ring_buffer_per_cpu *cpu_buffer_b;
2126
2127 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2128 !cpu_isset(cpu, buffer_b->cpumask))
2129 return -EINVAL;
2130
2131 /* At least make sure the two buffers are somewhat the same */
2132 if (buffer_a->size != buffer_b->size ||
2133 buffer_a->pages != buffer_b->pages)
2134 return -EINVAL;
2135
2136 cpu_buffer_a = buffer_a->buffers[cpu];
2137 cpu_buffer_b = buffer_b->buffers[cpu];
2138
2139 /*
2140 * We can't do a synchronize_sched here because this
2141 * function can be called in atomic context.
2142 * Normally this will be called from the same CPU as cpu.
2143 * If not it's up to the caller to protect this.
2144 */
2145 atomic_inc(&cpu_buffer_a->record_disabled);
2146 atomic_inc(&cpu_buffer_b->record_disabled);
2147
2148 buffer_a->buffers[cpu] = cpu_buffer_b;
2149 buffer_b->buffers[cpu] = cpu_buffer_a;
2150
2151 cpu_buffer_b->buffer = buffer_a;
2152 cpu_buffer_a->buffer = buffer_b;
2153
2154 atomic_dec(&cpu_buffer_a->record_disabled);
2155 atomic_dec(&cpu_buffer_b->record_disabled);
2156
2157 return 0;
2158}
Robert Richterc4f50182008-12-11 16:49:22 +01002159EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002160
Steven Rostedta3583242008-11-11 15:01:42 -05002161static ssize_t
2162rb_simple_read(struct file *filp, char __user *ubuf,
2163 size_t cnt, loff_t *ppos)
2164{
2165 int *p = filp->private_data;
2166 char buf[64];
2167 int r;
2168
2169 /* !ring_buffers_off == tracing_on */
2170 r = sprintf(buf, "%d\n", !*p);
2171
2172 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2173}
2174
2175static ssize_t
2176rb_simple_write(struct file *filp, const char __user *ubuf,
2177 size_t cnt, loff_t *ppos)
2178{
2179 int *p = filp->private_data;
2180 char buf[64];
2181 long val;
2182 int ret;
2183
2184 if (cnt >= sizeof(buf))
2185 return -EINVAL;
2186
2187 if (copy_from_user(&buf, ubuf, cnt))
2188 return -EFAULT;
2189
2190 buf[cnt] = 0;
2191
2192 ret = strict_strtoul(buf, 10, &val);
2193 if (ret < 0)
2194 return ret;
2195
2196 /* !ring_buffers_off == tracing_on */
2197 *p = !val;
2198
2199 (*ppos)++;
2200
2201 return cnt;
2202}
2203
2204static struct file_operations rb_simple_fops = {
2205 .open = tracing_open_generic,
2206 .read = rb_simple_read,
2207 .write = rb_simple_write,
2208};
2209
2210
2211static __init int rb_init_debugfs(void)
2212{
2213 struct dentry *d_tracer;
2214 struct dentry *entry;
2215
2216 d_tracer = tracing_init_dentry();
2217
2218 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2219 &ring_buffers_off, &rb_simple_fops);
2220 if (!entry)
2221 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2222
2223 return 0;
2224}
2225
2226fs_initcall(rb_init_debugfs);