blob: 76f34c0ef29c3aa9ea0123a3933e84c296a3caf9 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
Steven Rostedt182e9f52008-11-03 23:15:56 -050019#include "trace.h"
20
Steven Rostedt033601a2008-11-21 12:41:55 -050021/*
22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
27 *
28 * There's three layers that must be on in order to write
29 * to the ring buffer.
30 *
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
34 *
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
37 */
38
39/*
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
42 *
43 * ON DISABLED
44 * ---- ----------
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
48 */
49
50enum {
51 RB_BUFFERS_ON_BIT = 0,
52 RB_BUFFERS_DISABLED_BIT = 1,
53};
54
55enum {
56 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
58};
59
60static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
Steven Rostedta3583242008-11-11 15:01:42 -050061
62/**
63 * tracing_on - enable all tracing buffers
64 *
65 * This function enables all tracing buffers that may have been
66 * disabled with tracing_off.
67 */
68void tracing_on(void)
69{
Steven Rostedt033601a2008-11-21 12:41:55 -050070 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -050071}
72
73/**
74 * tracing_off - turn off all tracing buffers
75 *
76 * This function stops all tracing buffers from recording data.
77 * It does not disable any overhead the tracers themselves may
78 * be causing. This function simply causes all recording to
79 * the ring buffers to fail.
80 */
81void tracing_off(void)
82{
Steven Rostedt033601a2008-11-21 12:41:55 -050083 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84}
85
86/**
87 * tracing_off_permanent - permanently disable ring buffers
88 *
89 * This function, once called, will disable all ring buffers
90 * permanenty.
91 */
92void tracing_off_permanent(void)
93{
94 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -050095}
96
Ingo Molnard06bbd62008-11-12 10:11:37 +010097#include "trace.h"
98
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040099/* Up this if you want to test the TIME_EXTENTS and normalization */
100#define DEBUG_SHIFT 0
101
102/* FIXME!!! */
103u64 ring_buffer_time_stamp(int cpu)
104{
Steven Rostedt47e74f22008-11-12 00:01:27 -0500105 u64 time;
106
107 preempt_disable_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400108 /* shift to debug/test normalization and TIME_EXTENTS */
Steven Rostedt47e74f22008-11-12 00:01:27 -0500109 time = sched_clock() << DEBUG_SHIFT;
Frederic Weisbecker2c2d7322008-12-16 22:08:58 +0100110 preempt_enable_no_resched_notrace();
Steven Rostedt47e74f22008-11-12 00:01:27 -0500111
112 return time;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400113}
114
115void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
116{
117 /* Just stupid testing the normalize function and deltas */
118 *ts >>= DEBUG_SHIFT;
119}
120
121#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122#define RB_ALIGNMENT_SHIFT 2
123#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
124#define RB_MAX_SMALL_DATA 28
125
126enum {
127 RB_LEN_TIME_EXTEND = 8,
128 RB_LEN_TIME_STAMP = 16,
129};
130
131/* inline for ring buffer fast paths */
132static inline unsigned
133rb_event_length(struct ring_buffer_event *event)
134{
135 unsigned length;
136
137 switch (event->type) {
138 case RINGBUF_TYPE_PADDING:
139 /* undefined */
140 return -1;
141
142 case RINGBUF_TYPE_TIME_EXTEND:
143 return RB_LEN_TIME_EXTEND;
144
145 case RINGBUF_TYPE_TIME_STAMP:
146 return RB_LEN_TIME_STAMP;
147
148 case RINGBUF_TYPE_DATA:
149 if (event->len)
150 length = event->len << RB_ALIGNMENT_SHIFT;
151 else
152 length = event->array[0];
153 return length + RB_EVNT_HDR_SIZE;
154 default:
155 BUG();
156 }
157 /* not hit */
158 return 0;
159}
160
161/**
162 * ring_buffer_event_length - return the length of the event
163 * @event: the event to get the length of
164 */
165unsigned ring_buffer_event_length(struct ring_buffer_event *event)
166{
167 return rb_event_length(event);
168}
169
170/* inline for ring buffer fast paths */
171static inline void *
172rb_event_data(struct ring_buffer_event *event)
173{
174 BUG_ON(event->type != RINGBUF_TYPE_DATA);
175 /* If length is in len field, then array[0] has the data */
176 if (event->len)
177 return (void *)&event->array[0];
178 /* Otherwise length is in array[0] and array[1] has the data */
179 return (void *)&event->array[1];
180}
181
182/**
183 * ring_buffer_event_data - return the data of the event
184 * @event: the event to get the data from
185 */
186void *ring_buffer_event_data(struct ring_buffer_event *event)
187{
188 return rb_event_data(event);
189}
190
191#define for_each_buffer_cpu(buffer, cpu) \
192 for_each_cpu_mask(cpu, buffer->cpumask)
193
194#define TS_SHIFT 27
195#define TS_MASK ((1ULL << TS_SHIFT) - 1)
196#define TS_DELTA_TEST (~TS_MASK)
197
Steven Rostedtabc9b562008-12-02 15:34:06 -0500198struct buffer_data_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400199 u64 time_stamp; /* page time stamp */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400200 local_t commit; /* write commited index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500201 unsigned char data[]; /* data of buffer page */
202};
203
204struct buffer_page {
205 local_t write; /* index for next write */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400206 unsigned read; /* index for next read */
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400207 struct list_head list; /* list of free pages */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500208 struct buffer_data_page *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400209};
210
Steven Rostedt044fa782008-12-02 23:50:03 -0500211static void rb_init_page(struct buffer_data_page *bpage)
Steven Rostedtabc9b562008-12-02 15:34:06 -0500212{
Steven Rostedt044fa782008-12-02 23:50:03 -0500213 local_set(&bpage->commit, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500214}
215
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400216/*
Steven Rostedted568292008-09-29 23:02:40 -0400217 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
218 * this issue out.
219 */
220static inline void free_buffer_page(struct buffer_page *bpage)
221{
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400222 if (bpage->page)
Steven Rostedt6ae2a072008-10-13 10:22:06 -0400223 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400224 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400225}
226
227/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400228 * We need to fit the time_stamp delta into 27 bits.
229 */
230static inline int test_time_stamp(u64 delta)
231{
232 if (delta & TS_DELTA_TEST)
233 return 1;
234 return 0;
235}
236
Steven Rostedtabc9b562008-12-02 15:34:06 -0500237#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400238
239/*
240 * head_page == tail_page && head == tail then buffer is empty.
241 */
242struct ring_buffer_per_cpu {
243 int cpu;
244 struct ring_buffer *buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100245 spinlock_t reader_lock; /* serialize readers */
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500246 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400247 struct lock_class_key lock_key;
248 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400249 struct buffer_page *head_page; /* read from head */
250 struct buffer_page *tail_page; /* write to tail */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400251 struct buffer_page *commit_page; /* commited pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400252 struct buffer_page *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400253 unsigned long overrun;
254 unsigned long entries;
255 u64 write_stamp;
256 u64 read_stamp;
257 atomic_t record_disabled;
258};
259
260struct ring_buffer {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400261 unsigned pages;
262 unsigned flags;
263 int cpus;
264 cpumask_t cpumask;
265 atomic_t record_disabled;
266
267 struct mutex mutex;
268
269 struct ring_buffer_per_cpu **buffers;
270};
271
272struct ring_buffer_iter {
273 struct ring_buffer_per_cpu *cpu_buffer;
274 unsigned long head;
275 struct buffer_page *head_page;
276 u64 read_stamp;
277};
278
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500279/* buffer may be either ring_buffer or ring_buffer_per_cpu */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400280#define RB_WARN_ON(buffer, cond) \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500281 ({ \
282 int _____ret = unlikely(cond); \
283 if (_____ret) { \
Steven Rostedtbf41a152008-10-04 02:00:59 -0400284 atomic_inc(&buffer->record_disabled); \
285 WARN_ON(1); \
286 } \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500287 _____ret; \
288 })
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500289
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400290/**
291 * check_pages - integrity check of buffer pages
292 * @cpu_buffer: CPU buffer with pages to test
293 *
294 * As a safty measure we check to make sure the data pages have not
295 * been corrupted.
296 */
297static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
298{
299 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500300 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400301
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500302 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
303 return -1;
304 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
305 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400306
Steven Rostedt044fa782008-12-02 23:50:03 -0500307 list_for_each_entry_safe(bpage, tmp, head, list) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500308 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500309 bpage->list.next->prev != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500310 return -1;
311 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500312 bpage->list.prev->next != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500313 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400314 }
315
316 return 0;
317}
318
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400319static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
320 unsigned nr_pages)
321{
322 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500323 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400324 unsigned long addr;
325 LIST_HEAD(pages);
326 unsigned i;
327
328 for (i = 0; i < nr_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500329 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400330 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500331 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400332 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500333 list_add(&bpage->list, &pages);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400334
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400335 addr = __get_free_page(GFP_KERNEL);
336 if (!addr)
337 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500338 bpage->page = (void *)addr;
339 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400340 }
341
342 list_splice(&pages, head);
343
344 rb_check_pages(cpu_buffer);
345
346 return 0;
347
348 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500349 list_for_each_entry_safe(bpage, tmp, &pages, list) {
350 list_del_init(&bpage->list);
351 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400352 }
353 return -ENOMEM;
354}
355
356static struct ring_buffer_per_cpu *
357rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
358{
359 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt044fa782008-12-02 23:50:03 -0500360 struct buffer_page *bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400361 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400362 int ret;
363
364 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
365 GFP_KERNEL, cpu_to_node(cpu));
366 if (!cpu_buffer)
367 return NULL;
368
369 cpu_buffer->cpu = cpu;
370 cpu_buffer->buffer = buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100371 spin_lock_init(&cpu_buffer->reader_lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500372 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400373 INIT_LIST_HEAD(&cpu_buffer->pages);
374
Steven Rostedt044fa782008-12-02 23:50:03 -0500375 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400376 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500377 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400378 goto fail_free_buffer;
379
Steven Rostedt044fa782008-12-02 23:50:03 -0500380 cpu_buffer->reader_page = bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400381 addr = __get_free_page(GFP_KERNEL);
382 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400383 goto fail_free_reader;
Steven Rostedt044fa782008-12-02 23:50:03 -0500384 bpage->page = (void *)addr;
385 rb_init_page(bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400386
Steven Rostedtd7690412008-10-01 00:29:53 -0400387 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400388
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400389 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
390 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400391 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400392
393 cpu_buffer->head_page
394 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400395 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400396
397 return cpu_buffer;
398
Steven Rostedtd7690412008-10-01 00:29:53 -0400399 fail_free_reader:
400 free_buffer_page(cpu_buffer->reader_page);
401
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400402 fail_free_buffer:
403 kfree(cpu_buffer);
404 return NULL;
405}
406
407static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
408{
409 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500410 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400411
Steven Rostedtd7690412008-10-01 00:29:53 -0400412 list_del_init(&cpu_buffer->reader_page->list);
413 free_buffer_page(cpu_buffer->reader_page);
414
Steven Rostedt044fa782008-12-02 23:50:03 -0500415 list_for_each_entry_safe(bpage, tmp, head, list) {
416 list_del_init(&bpage->list);
417 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400418 }
419 kfree(cpu_buffer);
420}
421
Steven Rostedta7b13742008-09-29 23:02:39 -0400422/*
423 * Causes compile errors if the struct buffer_page gets bigger
424 * than the struct page.
425 */
426extern int ring_buffer_page_too_big(void);
427
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400428/**
429 * ring_buffer_alloc - allocate a new ring_buffer
430 * @size: the size in bytes that is needed.
431 * @flags: attributes to set for the ring buffer.
432 *
433 * Currently the only flag that is available is the RB_FL_OVERWRITE
434 * flag. This flag means that the buffer will overwrite old data
435 * when the buffer wraps. If this flag is not set, the buffer will
436 * drop data when the tail hits the head.
437 */
438struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
439{
440 struct ring_buffer *buffer;
441 int bsize;
442 int cpu;
443
Steven Rostedta7b13742008-09-29 23:02:39 -0400444 /* Paranoid! Optimizes out when all is well */
445 if (sizeof(struct buffer_page) > sizeof(struct page))
446 ring_buffer_page_too_big();
447
448
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400449 /* keep it in its own cache line */
450 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
451 GFP_KERNEL);
452 if (!buffer)
453 return NULL;
454
455 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
456 buffer->flags = flags;
457
458 /* need at least two pages */
459 if (buffer->pages == 1)
460 buffer->pages++;
461
462 buffer->cpumask = cpu_possible_map;
463 buffer->cpus = nr_cpu_ids;
464
465 bsize = sizeof(void *) * nr_cpu_ids;
466 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
467 GFP_KERNEL);
468 if (!buffer->buffers)
469 goto fail_free_buffer;
470
471 for_each_buffer_cpu(buffer, cpu) {
472 buffer->buffers[cpu] =
473 rb_allocate_cpu_buffer(buffer, cpu);
474 if (!buffer->buffers[cpu])
475 goto fail_free_buffers;
476 }
477
478 mutex_init(&buffer->mutex);
479
480 return buffer;
481
482 fail_free_buffers:
483 for_each_buffer_cpu(buffer, cpu) {
484 if (buffer->buffers[cpu])
485 rb_free_cpu_buffer(buffer->buffers[cpu]);
486 }
487 kfree(buffer->buffers);
488
489 fail_free_buffer:
490 kfree(buffer);
491 return NULL;
492}
493
494/**
495 * ring_buffer_free - free a ring buffer.
496 * @buffer: the buffer to free.
497 */
498void
499ring_buffer_free(struct ring_buffer *buffer)
500{
501 int cpu;
502
503 for_each_buffer_cpu(buffer, cpu)
504 rb_free_cpu_buffer(buffer->buffers[cpu]);
505
506 kfree(buffer);
507}
508
509static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
510
511static void
512rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
513{
Steven Rostedt044fa782008-12-02 23:50:03 -0500514 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400515 struct list_head *p;
516 unsigned i;
517
518 atomic_inc(&cpu_buffer->record_disabled);
519 synchronize_sched();
520
521 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500522 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
523 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400524 p = cpu_buffer->pages.next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500525 bpage = list_entry(p, struct buffer_page, list);
526 list_del_init(&bpage->list);
527 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400528 }
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500529 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
530 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400531
532 rb_reset_cpu(cpu_buffer);
533
534 rb_check_pages(cpu_buffer);
535
536 atomic_dec(&cpu_buffer->record_disabled);
537
538}
539
540static void
541rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
542 struct list_head *pages, unsigned nr_pages)
543{
Steven Rostedt044fa782008-12-02 23:50:03 -0500544 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400545 struct list_head *p;
546 unsigned i;
547
548 atomic_inc(&cpu_buffer->record_disabled);
549 synchronize_sched();
550
551 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500552 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
553 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400554 p = pages->next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500555 bpage = list_entry(p, struct buffer_page, list);
556 list_del_init(&bpage->list);
557 list_add_tail(&bpage->list, &cpu_buffer->pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400558 }
559 rb_reset_cpu(cpu_buffer);
560
561 rb_check_pages(cpu_buffer);
562
563 atomic_dec(&cpu_buffer->record_disabled);
564}
565
566/**
567 * ring_buffer_resize - resize the ring buffer
568 * @buffer: the buffer to resize.
569 * @size: the new size.
570 *
571 * The tracer is responsible for making sure that the buffer is
572 * not being used while changing the size.
573 * Note: We may be able to change the above requirement by using
574 * RCU synchronizations.
575 *
576 * Minimum size is 2 * BUF_PAGE_SIZE.
577 *
578 * Returns -1 on failure.
579 */
580int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
581{
582 struct ring_buffer_per_cpu *cpu_buffer;
583 unsigned nr_pages, rm_pages, new_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500584 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400585 unsigned long buffer_size;
586 unsigned long addr;
587 LIST_HEAD(pages);
588 int i, cpu;
589
Ingo Molnaree51a1d2008-11-13 14:58:31 +0100590 /*
591 * Always succeed at resizing a non-existent buffer:
592 */
593 if (!buffer)
594 return size;
595
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400596 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
597 size *= BUF_PAGE_SIZE;
598 buffer_size = buffer->pages * BUF_PAGE_SIZE;
599
600 /* we need a minimum of two pages */
601 if (size < BUF_PAGE_SIZE * 2)
602 size = BUF_PAGE_SIZE * 2;
603
604 if (size == buffer_size)
605 return size;
606
607 mutex_lock(&buffer->mutex);
608
609 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
610
611 if (size < buffer_size) {
612
613 /* easy case, just free pages */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500614 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
615 mutex_unlock(&buffer->mutex);
616 return -1;
617 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400618
619 rm_pages = buffer->pages - nr_pages;
620
621 for_each_buffer_cpu(buffer, cpu) {
622 cpu_buffer = buffer->buffers[cpu];
623 rb_remove_pages(cpu_buffer, rm_pages);
624 }
625 goto out;
626 }
627
628 /*
629 * This is a bit more difficult. We only want to add pages
630 * when we can allocate enough for all CPUs. We do this
631 * by allocating all the pages and storing them on a local
632 * link list. If we succeed in our allocation, then we
633 * add these pages to the cpu_buffers. Otherwise we just free
634 * them all and return -ENOMEM;
635 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500636 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
637 mutex_unlock(&buffer->mutex);
638 return -1;
639 }
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500640
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400641 new_pages = nr_pages - buffer->pages;
642
643 for_each_buffer_cpu(buffer, cpu) {
644 for (i = 0; i < new_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500645 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400646 cache_line_size()),
647 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500648 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400649 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500650 list_add(&bpage->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400651 addr = __get_free_page(GFP_KERNEL);
652 if (!addr)
653 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500654 bpage->page = (void *)addr;
655 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400656 }
657 }
658
659 for_each_buffer_cpu(buffer, cpu) {
660 cpu_buffer = buffer->buffers[cpu];
661 rb_insert_pages(cpu_buffer, &pages, new_pages);
662 }
663
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500664 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
665 mutex_unlock(&buffer->mutex);
666 return -1;
667 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400668
669 out:
670 buffer->pages = nr_pages;
671 mutex_unlock(&buffer->mutex);
672
673 return size;
674
675 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500676 list_for_each_entry_safe(bpage, tmp, &pages, list) {
677 list_del_init(&bpage->list);
678 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400679 }
Vegard Nossum641d2f62008-11-18 19:22:13 +0100680 mutex_unlock(&buffer->mutex);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400681 return -ENOMEM;
682}
683
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400684static inline int rb_null_event(struct ring_buffer_event *event)
685{
686 return event->type == RINGBUF_TYPE_PADDING;
687}
688
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500689static inline void *
Steven Rostedt044fa782008-12-02 23:50:03 -0500690__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500691{
Steven Rostedt044fa782008-12-02 23:50:03 -0500692 return bpage->data + index;
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500693}
694
Steven Rostedt044fa782008-12-02 23:50:03 -0500695static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400696{
Steven Rostedt044fa782008-12-02 23:50:03 -0500697 return bpage->page->data + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400698}
699
700static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400701rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400702{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400703 return __rb_page_index(cpu_buffer->reader_page,
704 cpu_buffer->reader_page->read);
705}
706
707static inline struct ring_buffer_event *
708rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
709{
710 return __rb_page_index(cpu_buffer->head_page,
711 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400712}
713
714static inline struct ring_buffer_event *
715rb_iter_head_event(struct ring_buffer_iter *iter)
716{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400717 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400718}
719
Steven Rostedtbf41a152008-10-04 02:00:59 -0400720static inline unsigned rb_page_write(struct buffer_page *bpage)
721{
722 return local_read(&bpage->write);
723}
724
725static inline unsigned rb_page_commit(struct buffer_page *bpage)
726{
Steven Rostedtabc9b562008-12-02 15:34:06 -0500727 return local_read(&bpage->page->commit);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400728}
729
730/* Size is determined by what has been commited */
731static inline unsigned rb_page_size(struct buffer_page *bpage)
732{
733 return rb_page_commit(bpage);
734}
735
736static inline unsigned
737rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
738{
739 return rb_page_commit(cpu_buffer->commit_page);
740}
741
742static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
743{
744 return rb_page_commit(cpu_buffer->head_page);
745}
746
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400747/*
748 * When the tail hits the head and the buffer is in overwrite mode,
749 * the head jumps to the next page and all content on the previous
750 * page is discarded. But before doing so, we update the overrun
751 * variable of the buffer.
752 */
753static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
754{
755 struct ring_buffer_event *event;
756 unsigned long head;
757
758 for (head = 0; head < rb_head_size(cpu_buffer);
759 head += rb_event_length(event)) {
760
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400761 event = __rb_page_index(cpu_buffer->head_page, head);
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500762 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
763 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400764 /* Only count data entries */
765 if (event->type != RINGBUF_TYPE_DATA)
766 continue;
767 cpu_buffer->overrun++;
768 cpu_buffer->entries--;
769 }
770}
771
772static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500773 struct buffer_page **bpage)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400774{
Steven Rostedt044fa782008-12-02 23:50:03 -0500775 struct list_head *p = (*bpage)->list.next;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400776
777 if (p == &cpu_buffer->pages)
778 p = p->next;
779
Steven Rostedt044fa782008-12-02 23:50:03 -0500780 *bpage = list_entry(p, struct buffer_page, list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400781}
782
Steven Rostedtbf41a152008-10-04 02:00:59 -0400783static inline unsigned
784rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400785{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400786 unsigned long addr = (unsigned long)event;
787
788 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400789}
790
Steven Rostedtbf41a152008-10-04 02:00:59 -0400791static inline int
792rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
793 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400794{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400795 unsigned long addr = (unsigned long)event;
796 unsigned long index;
797
798 index = rb_event_index(event);
799 addr &= PAGE_MASK;
800
801 return cpu_buffer->commit_page->page == (void *)addr &&
802 rb_commit_index(cpu_buffer) == index;
803}
804
805static inline void
806rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
807 struct ring_buffer_event *event)
808{
809 unsigned long addr = (unsigned long)event;
810 unsigned long index;
811
812 index = rb_event_index(event);
813 addr &= PAGE_MASK;
814
815 while (cpu_buffer->commit_page->page != (void *)addr) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500816 if (RB_WARN_ON(cpu_buffer,
817 cpu_buffer->commit_page == cpu_buffer->tail_page))
818 return;
Steven Rostedtabc9b562008-12-02 15:34:06 -0500819 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -0400820 cpu_buffer->commit_page->write;
821 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500822 cpu_buffer->write_stamp =
823 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400824 }
825
826 /* Now set the commit to the event's index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500827 local_set(&cpu_buffer->commit_page->page->commit, index);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400828}
829
830static inline void
831rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
832{
833 /*
834 * We only race with interrupts and NMIs on this CPU.
835 * If we own the commit event, then we can commit
836 * all others that interrupted us, since the interruptions
837 * are in stack format (they finish before they come
838 * back to us). This allows us to do a simple loop to
839 * assign the commit to the tail.
840 */
Steven Rostedta8ccf1d2008-12-23 11:32:24 -0500841 again:
Steven Rostedtbf41a152008-10-04 02:00:59 -0400842 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
Steven Rostedtabc9b562008-12-02 15:34:06 -0500843 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -0400844 cpu_buffer->commit_page->write;
845 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500846 cpu_buffer->write_stamp =
847 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400848 /* add barrier to keep gcc from optimizing too much */
849 barrier();
850 }
851 while (rb_commit_index(cpu_buffer) !=
852 rb_page_write(cpu_buffer->commit_page)) {
Steven Rostedtabc9b562008-12-02 15:34:06 -0500853 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -0400854 cpu_buffer->commit_page->write;
855 barrier();
856 }
Steven Rostedta8ccf1d2008-12-23 11:32:24 -0500857
858 /* again, keep gcc from optimizing */
859 barrier();
860
861 /*
862 * If an interrupt came in just after the first while loop
863 * and pushed the tail page forward, we will be left with
864 * a dangling commit that will never go forward.
865 */
866 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
867 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400868}
869
Steven Rostedtd7690412008-10-01 00:29:53 -0400870static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400871{
Steven Rostedtabc9b562008-12-02 15:34:06 -0500872 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400873 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -0400874}
875
876static inline void rb_inc_iter(struct ring_buffer_iter *iter)
877{
878 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
879
880 /*
881 * The iterator could be on the reader page (it starts there).
882 * But the head could have moved, since the reader was
883 * found. Check for this case and assign the iterator
884 * to the head page instead of next.
885 */
886 if (iter->head_page == cpu_buffer->reader_page)
887 iter->head_page = cpu_buffer->head_page;
888 else
889 rb_inc_page(cpu_buffer, &iter->head_page);
890
Steven Rostedtabc9b562008-12-02 15:34:06 -0500891 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400892 iter->head = 0;
893}
894
895/**
896 * ring_buffer_update_event - update event type and data
897 * @event: the even to update
898 * @type: the type of event
899 * @length: the size of the event field in the ring buffer
900 *
901 * Update the type and data fields of the event. The length
902 * is the actual size that is written to the ring buffer,
903 * and with this, we can determine what to place into the
904 * data field.
905 */
906static inline void
907rb_update_event(struct ring_buffer_event *event,
908 unsigned type, unsigned length)
909{
910 event->type = type;
911
912 switch (type) {
913
914 case RINGBUF_TYPE_PADDING:
915 break;
916
917 case RINGBUF_TYPE_TIME_EXTEND:
918 event->len =
919 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
920 >> RB_ALIGNMENT_SHIFT;
921 break;
922
923 case RINGBUF_TYPE_TIME_STAMP:
924 event->len =
925 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
926 >> RB_ALIGNMENT_SHIFT;
927 break;
928
929 case RINGBUF_TYPE_DATA:
930 length -= RB_EVNT_HDR_SIZE;
931 if (length > RB_MAX_SMALL_DATA) {
932 event->len = 0;
933 event->array[0] = length;
934 } else
935 event->len =
936 (length + (RB_ALIGNMENT-1))
937 >> RB_ALIGNMENT_SHIFT;
938 break;
939 default:
940 BUG();
941 }
942}
943
944static inline unsigned rb_calculate_event_length(unsigned length)
945{
946 struct ring_buffer_event event; /* Used only for sizeof array */
947
948 /* zero length can cause confusions */
949 if (!length)
950 length = 1;
951
952 if (length > RB_MAX_SMALL_DATA)
953 length += sizeof(event.array[0]);
954
955 length += RB_EVNT_HDR_SIZE;
956 length = ALIGN(length, RB_ALIGNMENT);
957
958 return length;
959}
960
961static struct ring_buffer_event *
962__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
963 unsigned type, unsigned long length, u64 *ts)
964{
Steven Rostedt98db8df2008-12-23 11:32:25 -0500965 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400966 unsigned long tail, write;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400967 struct ring_buffer *buffer = cpu_buffer->buffer;
968 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400969 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400970
Steven Rostedt98db8df2008-12-23 11:32:25 -0500971 commit_page = cpu_buffer->commit_page;
972 /* we just need to protect against interrupts */
973 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400974 tail_page = cpu_buffer->tail_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400975 write = local_add_return(length, &tail_page->write);
976 tail = write - length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400977
Steven Rostedtbf41a152008-10-04 02:00:59 -0400978 /* See if we shot pass the end of this buffer page */
979 if (write > BUF_PAGE_SIZE) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400980 struct buffer_page *next_page = tail_page;
981
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500982 local_irq_save(flags);
983 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400984
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400985 rb_inc_page(cpu_buffer, &next_page);
986
Steven Rostedtd7690412008-10-01 00:29:53 -0400987 head_page = cpu_buffer->head_page;
988 reader_page = cpu_buffer->reader_page;
989
990 /* we grabbed the lock before incrementing */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500991 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
992 goto out_unlock;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400993
994 /*
995 * If for some reason, we had an interrupt storm that made
996 * it all the way around the buffer, bail, and warn
997 * about it.
998 */
Steven Rostedt98db8df2008-12-23 11:32:25 -0500999 if (unlikely(next_page == commit_page)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001000 WARN_ON_ONCE(1);
1001 goto out_unlock;
1002 }
Steven Rostedtd7690412008-10-01 00:29:53 -04001003
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001004 if (next_page == head_page) {
Steven Rostedtd7690412008-10-01 00:29:53 -04001005 if (!(buffer->flags & RB_FL_OVERWRITE)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001006 /* reset write */
1007 if (tail <= BUF_PAGE_SIZE)
1008 local_set(&tail_page->write, tail);
1009 goto out_unlock;
Steven Rostedtd7690412008-10-01 00:29:53 -04001010 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001011
Steven Rostedtbf41a152008-10-04 02:00:59 -04001012 /* tail_page has not moved yet? */
1013 if (tail_page == cpu_buffer->tail_page) {
1014 /* count overflows */
1015 rb_update_overflow(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001016
Steven Rostedtbf41a152008-10-04 02:00:59 -04001017 rb_inc_page(cpu_buffer, &head_page);
1018 cpu_buffer->head_page = head_page;
1019 cpu_buffer->head_page->read = 0;
1020 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001021 }
1022
Steven Rostedtbf41a152008-10-04 02:00:59 -04001023 /*
1024 * If the tail page is still the same as what we think
1025 * it is, then it is up to us to update the tail
1026 * pointer.
1027 */
1028 if (tail_page == cpu_buffer->tail_page) {
1029 local_set(&next_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001030 local_set(&next_page->page->commit, 0);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001031 cpu_buffer->tail_page = next_page;
1032
1033 /* reread the time stamp */
1034 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001035 cpu_buffer->tail_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001036 }
1037
1038 /*
1039 * The actual tail page has moved forward.
1040 */
1041 if (tail < BUF_PAGE_SIZE) {
1042 /* Mark the rest of the page with padding */
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001043 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001044 event->type = RINGBUF_TYPE_PADDING;
1045 }
1046
Steven Rostedtbf41a152008-10-04 02:00:59 -04001047 if (tail <= BUF_PAGE_SIZE)
1048 /* Set the write back to the previous setting */
1049 local_set(&tail_page->write, tail);
1050
1051 /*
1052 * If this was a commit entry that failed,
1053 * increment that too
1054 */
1055 if (tail_page == cpu_buffer->commit_page &&
1056 tail == rb_commit_index(cpu_buffer)) {
1057 rb_set_commit_to_write(cpu_buffer);
1058 }
1059
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001060 __raw_spin_unlock(&cpu_buffer->lock);
1061 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001062
1063 /* fail and let the caller try again */
1064 return ERR_PTR(-EAGAIN);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001065 }
1066
Steven Rostedtbf41a152008-10-04 02:00:59 -04001067 /* We reserved something on the buffer */
1068
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001069 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1070 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001071
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001072 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001073 rb_update_event(event, type, length);
1074
Steven Rostedtbf41a152008-10-04 02:00:59 -04001075 /*
1076 * If this is a commit and the tail is zero, then update
1077 * this page's time stamp.
1078 */
1079 if (!tail && rb_is_commit(cpu_buffer, event))
Steven Rostedtabc9b562008-12-02 15:34:06 -05001080 cpu_buffer->commit_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001081
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001082 return event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001083
1084 out_unlock:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001085 __raw_spin_unlock(&cpu_buffer->lock);
1086 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001087 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001088}
1089
1090static int
1091rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1092 u64 *ts, u64 *delta)
1093{
1094 struct ring_buffer_event *event;
1095 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001096 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001097
1098 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1099 printk(KERN_WARNING "Delta way too big! %llu"
1100 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +11001101 (unsigned long long)*delta,
1102 (unsigned long long)*ts,
1103 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001104 WARN_ON(1);
1105 }
1106
1107 /*
1108 * The delta is too big, we to add a
1109 * new timestamp.
1110 */
1111 event = __rb_reserve_next(cpu_buffer,
1112 RINGBUF_TYPE_TIME_EXTEND,
1113 RB_LEN_TIME_EXTEND,
1114 ts);
1115 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001116 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001117
Steven Rostedtbf41a152008-10-04 02:00:59 -04001118 if (PTR_ERR(event) == -EAGAIN)
1119 return -EAGAIN;
1120
1121 /* Only a commited time event can update the write stamp */
1122 if (rb_is_commit(cpu_buffer, event)) {
1123 /*
1124 * If this is the first on the page, then we need to
1125 * update the page itself, and just put in a zero.
1126 */
1127 if (rb_event_index(event)) {
1128 event->time_delta = *delta & TS_MASK;
1129 event->array[0] = *delta >> TS_SHIFT;
1130 } else {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001131 cpu_buffer->commit_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001132 event->time_delta = 0;
1133 event->array[0] = 0;
1134 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001135 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001136 /* let the caller know this was the commit */
1137 ret = 1;
1138 } else {
1139 /* Darn, this is just wasted space */
1140 event->time_delta = 0;
1141 event->array[0] = 0;
1142 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001143 }
1144
Steven Rostedtbf41a152008-10-04 02:00:59 -04001145 *delta = 0;
1146
1147 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001148}
1149
1150static struct ring_buffer_event *
1151rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1152 unsigned type, unsigned long length)
1153{
1154 struct ring_buffer_event *event;
1155 u64 ts, delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001156 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001157 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001158
Steven Rostedtbf41a152008-10-04 02:00:59 -04001159 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001160 /*
1161 * We allow for interrupts to reenter here and do a trace.
1162 * If one does, it will cause this original code to loop
1163 * back here. Even with heavy interrupts happening, this
1164 * should only happen a few times in a row. If this happens
1165 * 1000 times in a row, there must be either an interrupt
1166 * storm or we have something buggy.
1167 * Bail!
1168 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001169 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001170 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001171
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001172 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1173
Steven Rostedtbf41a152008-10-04 02:00:59 -04001174 /*
1175 * Only the first commit can update the timestamp.
1176 * Yes there is a race here. If an interrupt comes in
1177 * just after the conditional and it traces too, then it
1178 * will also check the deltas. More than one timestamp may
1179 * also be made. But only the entry that did the actual
1180 * commit will be something other than zero.
1181 */
1182 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1183 rb_page_write(cpu_buffer->tail_page) ==
1184 rb_commit_index(cpu_buffer)) {
1185
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001186 delta = ts - cpu_buffer->write_stamp;
1187
Steven Rostedtbf41a152008-10-04 02:00:59 -04001188 /* make sure this delta is calculated here */
1189 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001190
Steven Rostedtbf41a152008-10-04 02:00:59 -04001191 /* Did the write stamp get updated already? */
1192 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt4143c5c2008-11-10 21:46:01 -05001193 delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001194
1195 if (test_time_stamp(delta)) {
1196
1197 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1198
1199 if (commit == -EBUSY)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001200 return NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001201
1202 if (commit == -EAGAIN)
1203 goto again;
1204
1205 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001206 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001207 } else
1208 /* Non commits have zero deltas */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001209 delta = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001210
1211 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001212 if (PTR_ERR(event) == -EAGAIN)
1213 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001214
Steven Rostedtbf41a152008-10-04 02:00:59 -04001215 if (!event) {
1216 if (unlikely(commit))
1217 /*
1218 * Ouch! We needed a timestamp and it was commited. But
1219 * we didn't get our event reserved.
1220 */
1221 rb_set_commit_to_write(cpu_buffer);
1222 return NULL;
1223 }
1224
1225 /*
1226 * If the timestamp was commited, make the commit our entry
1227 * now so that we will update it when needed.
1228 */
1229 if (commit)
1230 rb_set_commit_event(cpu_buffer, event);
1231 else if (!rb_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001232 delta = 0;
1233
1234 event->time_delta = delta;
1235
1236 return event;
1237}
1238
Steven Rostedtbf41a152008-10-04 02:00:59 -04001239static DEFINE_PER_CPU(int, rb_need_resched);
1240
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001241/**
1242 * ring_buffer_lock_reserve - reserve a part of the buffer
1243 * @buffer: the ring buffer to reserve from
1244 * @length: the length of the data to reserve (excluding event header)
1245 * @flags: a pointer to save the interrupt flags
1246 *
1247 * Returns a reseverd event on the ring buffer to copy directly to.
1248 * The user of this interface will need to get the body to write into
1249 * and can use the ring_buffer_event_data() interface.
1250 *
1251 * The length is the length of the data needed, not the event length
1252 * which also includes the event header.
1253 *
1254 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1255 * If NULL is returned, then nothing has been allocated or locked.
1256 */
1257struct ring_buffer_event *
1258ring_buffer_lock_reserve(struct ring_buffer *buffer,
1259 unsigned long length,
1260 unsigned long *flags)
1261{
1262 struct ring_buffer_per_cpu *cpu_buffer;
1263 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001264 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001265
Steven Rostedt033601a2008-11-21 12:41:55 -05001266 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001267 return NULL;
1268
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001269 if (atomic_read(&buffer->record_disabled))
1270 return NULL;
1271
Steven Rostedtbf41a152008-10-04 02:00:59 -04001272 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001273 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001274
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001275 cpu = raw_smp_processor_id();
1276
1277 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001278 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001279
1280 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001281
1282 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001283 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001284
1285 length = rb_calculate_event_length(length);
1286 if (length > BUF_PAGE_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001287 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001288
1289 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1290 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001291 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001292
Steven Rostedtbf41a152008-10-04 02:00:59 -04001293 /*
1294 * Need to store resched state on this cpu.
1295 * Only the first needs to.
1296 */
1297
1298 if (preempt_count() == 1)
1299 per_cpu(rb_need_resched, cpu) = resched;
1300
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001301 return event;
1302
Steven Rostedtd7690412008-10-01 00:29:53 -04001303 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001304 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001305 return NULL;
1306}
1307
1308static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1309 struct ring_buffer_event *event)
1310{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001311 cpu_buffer->entries++;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001312
1313 /* Only process further if we own the commit */
1314 if (!rb_is_commit(cpu_buffer, event))
1315 return;
1316
1317 cpu_buffer->write_stamp += event->time_delta;
1318
1319 rb_set_commit_to_write(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001320}
1321
1322/**
1323 * ring_buffer_unlock_commit - commit a reserved
1324 * @buffer: The buffer to commit to
1325 * @event: The event pointer to commit.
1326 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1327 *
1328 * This commits the data to the ring buffer, and releases any locks held.
1329 *
1330 * Must be paired with ring_buffer_lock_reserve.
1331 */
1332int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1333 struct ring_buffer_event *event,
1334 unsigned long flags)
1335{
1336 struct ring_buffer_per_cpu *cpu_buffer;
1337 int cpu = raw_smp_processor_id();
1338
1339 cpu_buffer = buffer->buffers[cpu];
1340
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001341 rb_commit(cpu_buffer, event);
1342
Steven Rostedtbf41a152008-10-04 02:00:59 -04001343 /*
1344 * Only the last preempt count needs to restore preemption.
1345 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001346 if (preempt_count() == 1)
1347 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1348 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04001349 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001350
1351 return 0;
1352}
1353
1354/**
1355 * ring_buffer_write - write data to the buffer without reserving
1356 * @buffer: The ring buffer to write to.
1357 * @length: The length of the data being written (excluding the event header)
1358 * @data: The data to write to the buffer.
1359 *
1360 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1361 * one function. If you already have the data to write to the buffer, it
1362 * may be easier to simply call this function.
1363 *
1364 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1365 * and not the length of the event which would hold the header.
1366 */
1367int ring_buffer_write(struct ring_buffer *buffer,
1368 unsigned long length,
1369 void *data)
1370{
1371 struct ring_buffer_per_cpu *cpu_buffer;
1372 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001373 unsigned long event_length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001374 void *body;
1375 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001376 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001377
Steven Rostedt033601a2008-11-21 12:41:55 -05001378 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001379 return -EBUSY;
1380
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001381 if (atomic_read(&buffer->record_disabled))
1382 return -EBUSY;
1383
Steven Rostedt182e9f52008-11-03 23:15:56 -05001384 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001385
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001386 cpu = raw_smp_processor_id();
1387
1388 if (!cpu_isset(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001389 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001390
1391 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001392
1393 if (atomic_read(&cpu_buffer->record_disabled))
1394 goto out;
1395
1396 event_length = rb_calculate_event_length(length);
1397 event = rb_reserve_next_event(cpu_buffer,
1398 RINGBUF_TYPE_DATA, event_length);
1399 if (!event)
1400 goto out;
1401
1402 body = rb_event_data(event);
1403
1404 memcpy(body, data, length);
1405
1406 rb_commit(cpu_buffer, event);
1407
1408 ret = 0;
1409 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001410 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001411
1412 return ret;
1413}
1414
Steven Rostedtbf41a152008-10-04 02:00:59 -04001415static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1416{
1417 struct buffer_page *reader = cpu_buffer->reader_page;
1418 struct buffer_page *head = cpu_buffer->head_page;
1419 struct buffer_page *commit = cpu_buffer->commit_page;
1420
1421 return reader->read == rb_page_commit(reader) &&
1422 (commit == reader ||
1423 (commit == head &&
1424 head->read == rb_page_commit(commit)));
1425}
1426
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001427/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001428 * ring_buffer_record_disable - stop all writes into the buffer
1429 * @buffer: The ring buffer to stop writes to.
1430 *
1431 * This prevents all writes to the buffer. Any attempt to write
1432 * to the buffer after this will fail and return NULL.
1433 *
1434 * The caller should call synchronize_sched() after this.
1435 */
1436void ring_buffer_record_disable(struct ring_buffer *buffer)
1437{
1438 atomic_inc(&buffer->record_disabled);
1439}
1440
1441/**
1442 * ring_buffer_record_enable - enable writes to the buffer
1443 * @buffer: The ring buffer to enable writes
1444 *
1445 * Note, multiple disables will need the same number of enables
1446 * to truely enable the writing (much like preempt_disable).
1447 */
1448void ring_buffer_record_enable(struct ring_buffer *buffer)
1449{
1450 atomic_dec(&buffer->record_disabled);
1451}
1452
1453/**
1454 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1455 * @buffer: The ring buffer to stop writes to.
1456 * @cpu: The CPU buffer to stop
1457 *
1458 * This prevents all writes to the buffer. Any attempt to write
1459 * to the buffer after this will fail and return NULL.
1460 *
1461 * The caller should call synchronize_sched() after this.
1462 */
1463void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1464{
1465 struct ring_buffer_per_cpu *cpu_buffer;
1466
1467 if (!cpu_isset(cpu, buffer->cpumask))
1468 return;
1469
1470 cpu_buffer = buffer->buffers[cpu];
1471 atomic_inc(&cpu_buffer->record_disabled);
1472}
1473
1474/**
1475 * ring_buffer_record_enable_cpu - enable writes to the buffer
1476 * @buffer: The ring buffer to enable writes
1477 * @cpu: The CPU to enable.
1478 *
1479 * Note, multiple disables will need the same number of enables
1480 * to truely enable the writing (much like preempt_disable).
1481 */
1482void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1483{
1484 struct ring_buffer_per_cpu *cpu_buffer;
1485
1486 if (!cpu_isset(cpu, buffer->cpumask))
1487 return;
1488
1489 cpu_buffer = buffer->buffers[cpu];
1490 atomic_dec(&cpu_buffer->record_disabled);
1491}
1492
1493/**
1494 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1495 * @buffer: The ring buffer
1496 * @cpu: The per CPU buffer to get the entries from.
1497 */
1498unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1499{
1500 struct ring_buffer_per_cpu *cpu_buffer;
1501
1502 if (!cpu_isset(cpu, buffer->cpumask))
1503 return 0;
1504
1505 cpu_buffer = buffer->buffers[cpu];
1506 return cpu_buffer->entries;
1507}
1508
1509/**
1510 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1511 * @buffer: The ring buffer
1512 * @cpu: The per CPU buffer to get the number of overruns from
1513 */
1514unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1515{
1516 struct ring_buffer_per_cpu *cpu_buffer;
1517
1518 if (!cpu_isset(cpu, buffer->cpumask))
1519 return 0;
1520
1521 cpu_buffer = buffer->buffers[cpu];
1522 return cpu_buffer->overrun;
1523}
1524
1525/**
1526 * ring_buffer_entries - get the number of entries in a buffer
1527 * @buffer: The ring buffer
1528 *
1529 * Returns the total number of entries in the ring buffer
1530 * (all CPU entries)
1531 */
1532unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1533{
1534 struct ring_buffer_per_cpu *cpu_buffer;
1535 unsigned long entries = 0;
1536 int cpu;
1537
1538 /* if you care about this being correct, lock the buffer */
1539 for_each_buffer_cpu(buffer, cpu) {
1540 cpu_buffer = buffer->buffers[cpu];
1541 entries += cpu_buffer->entries;
1542 }
1543
1544 return entries;
1545}
1546
1547/**
1548 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1549 * @buffer: The ring buffer
1550 *
1551 * Returns the total number of overruns in the ring buffer
1552 * (all CPU entries)
1553 */
1554unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1555{
1556 struct ring_buffer_per_cpu *cpu_buffer;
1557 unsigned long overruns = 0;
1558 int cpu;
1559
1560 /* if you care about this being correct, lock the buffer */
1561 for_each_buffer_cpu(buffer, cpu) {
1562 cpu_buffer = buffer->buffers[cpu];
1563 overruns += cpu_buffer->overrun;
1564 }
1565
1566 return overruns;
1567}
1568
Steven Rostedt642edba2008-11-12 00:01:26 -05001569static void rb_iter_reset(struct ring_buffer_iter *iter)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001570{
1571 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1572
Steven Rostedtd7690412008-10-01 00:29:53 -04001573 /* Iterator usage is expected to have record disabled */
1574 if (list_empty(&cpu_buffer->reader_page->list)) {
1575 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001576 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001577 } else {
1578 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001579 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001580 }
1581 if (iter->head)
1582 iter->read_stamp = cpu_buffer->read_stamp;
1583 else
Steven Rostedtabc9b562008-12-02 15:34:06 -05001584 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt642edba2008-11-12 00:01:26 -05001585}
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001586
Steven Rostedt642edba2008-11-12 00:01:26 -05001587/**
1588 * ring_buffer_iter_reset - reset an iterator
1589 * @iter: The iterator to reset
1590 *
1591 * Resets the iterator, so that it will start from the beginning
1592 * again.
1593 */
1594void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1595{
1596 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1597 unsigned long flags;
1598
1599 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1600 rb_iter_reset(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001601 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001602}
1603
1604/**
1605 * ring_buffer_iter_empty - check if an iterator has no more to read
1606 * @iter: The iterator to check
1607 */
1608int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1609{
1610 struct ring_buffer_per_cpu *cpu_buffer;
1611
1612 cpu_buffer = iter->cpu_buffer;
1613
Steven Rostedtbf41a152008-10-04 02:00:59 -04001614 return iter->head_page == cpu_buffer->commit_page &&
1615 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001616}
1617
1618static void
1619rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1620 struct ring_buffer_event *event)
1621{
1622 u64 delta;
1623
1624 switch (event->type) {
1625 case RINGBUF_TYPE_PADDING:
1626 return;
1627
1628 case RINGBUF_TYPE_TIME_EXTEND:
1629 delta = event->array[0];
1630 delta <<= TS_SHIFT;
1631 delta += event->time_delta;
1632 cpu_buffer->read_stamp += delta;
1633 return;
1634
1635 case RINGBUF_TYPE_TIME_STAMP:
1636 /* FIXME: not implemented */
1637 return;
1638
1639 case RINGBUF_TYPE_DATA:
1640 cpu_buffer->read_stamp += event->time_delta;
1641 return;
1642
1643 default:
1644 BUG();
1645 }
1646 return;
1647}
1648
1649static void
1650rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1651 struct ring_buffer_event *event)
1652{
1653 u64 delta;
1654
1655 switch (event->type) {
1656 case RINGBUF_TYPE_PADDING:
1657 return;
1658
1659 case RINGBUF_TYPE_TIME_EXTEND:
1660 delta = event->array[0];
1661 delta <<= TS_SHIFT;
1662 delta += event->time_delta;
1663 iter->read_stamp += delta;
1664 return;
1665
1666 case RINGBUF_TYPE_TIME_STAMP:
1667 /* FIXME: not implemented */
1668 return;
1669
1670 case RINGBUF_TYPE_DATA:
1671 iter->read_stamp += event->time_delta;
1672 return;
1673
1674 default:
1675 BUG();
1676 }
1677 return;
1678}
1679
Steven Rostedtd7690412008-10-01 00:29:53 -04001680static struct buffer_page *
1681rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001682{
Steven Rostedtd7690412008-10-01 00:29:53 -04001683 struct buffer_page *reader = NULL;
1684 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001685 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001686
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001687 local_irq_save(flags);
1688 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04001689
1690 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001691 /*
1692 * This should normally only loop twice. But because the
1693 * start of the reader inserts an empty page, it causes
1694 * a case where we will loop three times. There should be no
1695 * reason to loop four times (that I know of).
1696 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001697 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001698 reader = NULL;
1699 goto out;
1700 }
1701
Steven Rostedtd7690412008-10-01 00:29:53 -04001702 reader = cpu_buffer->reader_page;
1703
1704 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001705 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04001706 goto out;
1707
1708 /* Never should we have an index greater than the size */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001709 if (RB_WARN_ON(cpu_buffer,
1710 cpu_buffer->reader_page->read > rb_page_size(reader)))
1711 goto out;
Steven Rostedtd7690412008-10-01 00:29:53 -04001712
1713 /* check if we caught up to the tail */
1714 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001715 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04001716 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001717
1718 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04001719 * Splice the empty reader page into the list around the head.
1720 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001721 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001722
Steven Rostedtd7690412008-10-01 00:29:53 -04001723 reader = cpu_buffer->head_page;
1724 cpu_buffer->reader_page->list.next = reader->list.next;
1725 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001726
1727 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001728 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04001729
1730 /* Make the reader page now replace the head */
1731 reader->list.prev->next = &cpu_buffer->reader_page->list;
1732 reader->list.next->prev = &cpu_buffer->reader_page->list;
1733
1734 /*
1735 * If the tail is on the reader, then we must set the head
1736 * to the inserted page, otherwise we set it one before.
1737 */
1738 cpu_buffer->head_page = cpu_buffer->reader_page;
1739
Steven Rostedtbf41a152008-10-04 02:00:59 -04001740 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04001741 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1742
1743 /* Finally update the reader page to the new head */
1744 cpu_buffer->reader_page = reader;
1745 rb_reset_reader_page(cpu_buffer);
1746
1747 goto again;
1748
1749 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001750 __raw_spin_unlock(&cpu_buffer->lock);
1751 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04001752
1753 return reader;
1754}
1755
1756static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1757{
1758 struct ring_buffer_event *event;
1759 struct buffer_page *reader;
1760 unsigned length;
1761
1762 reader = rb_get_reader_page(cpu_buffer);
1763
1764 /* This function should not be called when buffer is empty */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001765 if (RB_WARN_ON(cpu_buffer, !reader))
1766 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04001767
1768 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001769
1770 if (event->type == RINGBUF_TYPE_DATA)
1771 cpu_buffer->entries--;
1772
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001773 rb_update_read_stamp(cpu_buffer, event);
1774
Steven Rostedtd7690412008-10-01 00:29:53 -04001775 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001776 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001777}
1778
1779static void rb_advance_iter(struct ring_buffer_iter *iter)
1780{
1781 struct ring_buffer *buffer;
1782 struct ring_buffer_per_cpu *cpu_buffer;
1783 struct ring_buffer_event *event;
1784 unsigned length;
1785
1786 cpu_buffer = iter->cpu_buffer;
1787 buffer = cpu_buffer->buffer;
1788
1789 /*
1790 * Check if we are at the end of the buffer.
1791 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001792 if (iter->head >= rb_page_size(iter->head_page)) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001793 if (RB_WARN_ON(buffer,
1794 iter->head_page == cpu_buffer->commit_page))
1795 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04001796 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001797 return;
1798 }
1799
1800 event = rb_iter_head_event(iter);
1801
1802 length = rb_event_length(event);
1803
1804 /*
1805 * This should not be called to advance the header if we are
1806 * at the tail of the buffer.
1807 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001808 if (RB_WARN_ON(cpu_buffer,
Steven Rostedtf536aaf2008-11-10 23:07:30 -05001809 (iter->head_page == cpu_buffer->commit_page) &&
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001810 (iter->head + length > rb_commit_index(cpu_buffer))))
1811 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001812
1813 rb_update_iter_read_stamp(iter, event);
1814
1815 iter->head += length;
1816
1817 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001818 if ((iter->head >= rb_page_size(iter->head_page)) &&
1819 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001820 rb_advance_iter(iter);
1821}
1822
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001823static struct ring_buffer_event *
1824rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001825{
1826 struct ring_buffer_per_cpu *cpu_buffer;
1827 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04001828 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001829 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001830
1831 if (!cpu_isset(cpu, buffer->cpumask))
1832 return NULL;
1833
1834 cpu_buffer = buffer->buffers[cpu];
1835
1836 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001837 /*
1838 * We repeat when a timestamp is encountered. It is possible
1839 * to get multiple timestamps from an interrupt entering just
1840 * as one timestamp is about to be written. The max times
1841 * that this can happen is the number of nested interrupts we
1842 * can have. Nesting 10 deep of interrupts is clearly
1843 * an anomaly.
1844 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001845 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001846 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001847
Steven Rostedtd7690412008-10-01 00:29:53 -04001848 reader = rb_get_reader_page(cpu_buffer);
1849 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001850 return NULL;
1851
Steven Rostedtd7690412008-10-01 00:29:53 -04001852 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001853
1854 switch (event->type) {
1855 case RINGBUF_TYPE_PADDING:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001856 RB_WARN_ON(cpu_buffer, 1);
Steven Rostedtd7690412008-10-01 00:29:53 -04001857 rb_advance_reader(cpu_buffer);
1858 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001859
1860 case RINGBUF_TYPE_TIME_EXTEND:
1861 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04001862 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001863 goto again;
1864
1865 case RINGBUF_TYPE_TIME_STAMP:
1866 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04001867 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001868 goto again;
1869
1870 case RINGBUF_TYPE_DATA:
1871 if (ts) {
1872 *ts = cpu_buffer->read_stamp + event->time_delta;
1873 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1874 }
1875 return event;
1876
1877 default:
1878 BUG();
1879 }
1880
1881 return NULL;
1882}
1883
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001884static struct ring_buffer_event *
1885rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001886{
1887 struct ring_buffer *buffer;
1888 struct ring_buffer_per_cpu *cpu_buffer;
1889 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001890 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001891
1892 if (ring_buffer_iter_empty(iter))
1893 return NULL;
1894
1895 cpu_buffer = iter->cpu_buffer;
1896 buffer = cpu_buffer->buffer;
1897
1898 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001899 /*
1900 * We repeat when a timestamp is encountered. It is possible
1901 * to get multiple timestamps from an interrupt entering just
1902 * as one timestamp is about to be written. The max times
1903 * that this can happen is the number of nested interrupts we
1904 * can have. Nesting 10 deep of interrupts is clearly
1905 * an anomaly.
1906 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001907 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001908 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001909
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001910 if (rb_per_cpu_empty(cpu_buffer))
1911 return NULL;
1912
1913 event = rb_iter_head_event(iter);
1914
1915 switch (event->type) {
1916 case RINGBUF_TYPE_PADDING:
Steven Rostedtd7690412008-10-01 00:29:53 -04001917 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001918 goto again;
1919
1920 case RINGBUF_TYPE_TIME_EXTEND:
1921 /* Internal data, OK to advance */
1922 rb_advance_iter(iter);
1923 goto again;
1924
1925 case RINGBUF_TYPE_TIME_STAMP:
1926 /* FIXME: not implemented */
1927 rb_advance_iter(iter);
1928 goto again;
1929
1930 case RINGBUF_TYPE_DATA:
1931 if (ts) {
1932 *ts = iter->read_stamp + event->time_delta;
1933 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1934 }
1935 return event;
1936
1937 default:
1938 BUG();
1939 }
1940
1941 return NULL;
1942}
1943
1944/**
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001945 * ring_buffer_peek - peek at the next event to be read
1946 * @buffer: The ring buffer to read
1947 * @cpu: The cpu to peak at
1948 * @ts: The timestamp counter of this event.
1949 *
1950 * This will return the event that will be read next, but does
1951 * not consume the data.
1952 */
1953struct ring_buffer_event *
1954ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1955{
1956 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1957 struct ring_buffer_event *event;
1958 unsigned long flags;
1959
1960 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1961 event = rb_buffer_peek(buffer, cpu, ts);
1962 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1963
1964 return event;
1965}
1966
1967/**
1968 * ring_buffer_iter_peek - peek at the next event to be read
1969 * @iter: The ring buffer iterator
1970 * @ts: The timestamp counter of this event.
1971 *
1972 * This will return the event that will be read next, but does
1973 * not increment the iterator.
1974 */
1975struct ring_buffer_event *
1976ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1977{
1978 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1979 struct ring_buffer_event *event;
1980 unsigned long flags;
1981
1982 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1983 event = rb_iter_peek(iter, ts);
1984 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1985
1986 return event;
1987}
1988
1989/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001990 * ring_buffer_consume - return an event and consume it
1991 * @buffer: The ring buffer to get the next event from
1992 *
1993 * Returns the next event in the ring buffer, and that event is consumed.
1994 * Meaning, that sequential reads will keep returning a different event,
1995 * and eventually empty the ring buffer if the producer is slower.
1996 */
1997struct ring_buffer_event *
1998ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1999{
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002000 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002001 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002002 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002003
2004 if (!cpu_isset(cpu, buffer->cpumask))
2005 return NULL;
2006
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002007 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002008
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002009 event = rb_buffer_peek(buffer, cpu, ts);
2010 if (!event)
2011 goto out;
2012
Steven Rostedtd7690412008-10-01 00:29:53 -04002013 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002014
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002015 out:
2016 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2017
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002018 return event;
2019}
2020
2021/**
2022 * ring_buffer_read_start - start a non consuming read of the buffer
2023 * @buffer: The ring buffer to read from
2024 * @cpu: The cpu buffer to iterate over
2025 *
2026 * This starts up an iteration through the buffer. It also disables
2027 * the recording to the buffer until the reading is finished.
2028 * This prevents the reading from being corrupted. This is not
2029 * a consuming read, so a producer is not expected.
2030 *
2031 * Must be paired with ring_buffer_finish.
2032 */
2033struct ring_buffer_iter *
2034ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2035{
2036 struct ring_buffer_per_cpu *cpu_buffer;
2037 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04002038 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002039
2040 if (!cpu_isset(cpu, buffer->cpumask))
2041 return NULL;
2042
2043 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2044 if (!iter)
2045 return NULL;
2046
2047 cpu_buffer = buffer->buffers[cpu];
2048
2049 iter->cpu_buffer = cpu_buffer;
2050
2051 atomic_inc(&cpu_buffer->record_disabled);
2052 synchronize_sched();
2053
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002054 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002055 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt642edba2008-11-12 00:01:26 -05002056 rb_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002057 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002058 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002059
2060 return iter;
2061}
2062
2063/**
2064 * ring_buffer_finish - finish reading the iterator of the buffer
2065 * @iter: The iterator retrieved by ring_buffer_start
2066 *
2067 * This re-enables the recording to the buffer, and frees the
2068 * iterator.
2069 */
2070void
2071ring_buffer_read_finish(struct ring_buffer_iter *iter)
2072{
2073 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2074
2075 atomic_dec(&cpu_buffer->record_disabled);
2076 kfree(iter);
2077}
2078
2079/**
2080 * ring_buffer_read - read the next item in the ring buffer by the iterator
2081 * @iter: The ring buffer iterator
2082 * @ts: The time stamp of the event read.
2083 *
2084 * This reads the next event in the ring buffer and increments the iterator.
2085 */
2086struct ring_buffer_event *
2087ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2088{
2089 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002090 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2091 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002092
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002093 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2094 event = rb_iter_peek(iter, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002095 if (!event)
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002096 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002097
2098 rb_advance_iter(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002099 out:
2100 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002101
2102 return event;
2103}
2104
2105/**
2106 * ring_buffer_size - return the size of the ring buffer (in bytes)
2107 * @buffer: The ring buffer.
2108 */
2109unsigned long ring_buffer_size(struct ring_buffer *buffer)
2110{
2111 return BUF_PAGE_SIZE * buffer->pages;
2112}
2113
2114static void
2115rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2116{
2117 cpu_buffer->head_page
2118 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002119 local_set(&cpu_buffer->head_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002120 local_set(&cpu_buffer->head_page->page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002121
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002122 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002123
2124 cpu_buffer->tail_page = cpu_buffer->head_page;
2125 cpu_buffer->commit_page = cpu_buffer->head_page;
2126
2127 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2128 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002129 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002130 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002131
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002132 cpu_buffer->overrun = 0;
2133 cpu_buffer->entries = 0;
2134}
2135
2136/**
2137 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2138 * @buffer: The ring buffer to reset a per cpu buffer of
2139 * @cpu: The CPU buffer to be reset
2140 */
2141void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2142{
2143 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2144 unsigned long flags;
2145
2146 if (!cpu_isset(cpu, buffer->cpumask))
2147 return;
2148
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002149 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2150
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002151 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002152
2153 rb_reset_cpu(cpu_buffer);
2154
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002155 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002156
2157 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002158}
2159
2160/**
2161 * ring_buffer_reset - reset a ring buffer
2162 * @buffer: The ring buffer to reset all cpu buffers
2163 */
2164void ring_buffer_reset(struct ring_buffer *buffer)
2165{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002166 int cpu;
2167
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002168 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002169 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002170}
2171
2172/**
2173 * rind_buffer_empty - is the ring buffer empty?
2174 * @buffer: The ring buffer to test
2175 */
2176int ring_buffer_empty(struct ring_buffer *buffer)
2177{
2178 struct ring_buffer_per_cpu *cpu_buffer;
2179 int cpu;
2180
2181 /* yes this is racy, but if you don't like the race, lock the buffer */
2182 for_each_buffer_cpu(buffer, cpu) {
2183 cpu_buffer = buffer->buffers[cpu];
2184 if (!rb_per_cpu_empty(cpu_buffer))
2185 return 0;
2186 }
2187 return 1;
2188}
2189
2190/**
2191 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2192 * @buffer: The ring buffer
2193 * @cpu: The CPU buffer to test
2194 */
2195int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2196{
2197 struct ring_buffer_per_cpu *cpu_buffer;
2198
2199 if (!cpu_isset(cpu, buffer->cpumask))
2200 return 1;
2201
2202 cpu_buffer = buffer->buffers[cpu];
2203 return rb_per_cpu_empty(cpu_buffer);
2204}
2205
2206/**
2207 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2208 * @buffer_a: One buffer to swap with
2209 * @buffer_b: The other buffer to swap with
2210 *
2211 * This function is useful for tracers that want to take a "snapshot"
2212 * of a CPU buffer and has another back up buffer lying around.
2213 * it is expected that the tracer handles the cpu buffer not being
2214 * used at the moment.
2215 */
2216int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2217 struct ring_buffer *buffer_b, int cpu)
2218{
2219 struct ring_buffer_per_cpu *cpu_buffer_a;
2220 struct ring_buffer_per_cpu *cpu_buffer_b;
2221
2222 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2223 !cpu_isset(cpu, buffer_b->cpumask))
2224 return -EINVAL;
2225
2226 /* At least make sure the two buffers are somewhat the same */
Lai Jiangshan6d102bc2008-12-17 17:48:23 +08002227 if (buffer_a->pages != buffer_b->pages)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002228 return -EINVAL;
2229
2230 cpu_buffer_a = buffer_a->buffers[cpu];
2231 cpu_buffer_b = buffer_b->buffers[cpu];
2232
2233 /*
2234 * We can't do a synchronize_sched here because this
2235 * function can be called in atomic context.
2236 * Normally this will be called from the same CPU as cpu.
2237 * If not it's up to the caller to protect this.
2238 */
2239 atomic_inc(&cpu_buffer_a->record_disabled);
2240 atomic_inc(&cpu_buffer_b->record_disabled);
2241
2242 buffer_a->buffers[cpu] = cpu_buffer_b;
2243 buffer_b->buffers[cpu] = cpu_buffer_a;
2244
2245 cpu_buffer_b->buffer = buffer_a;
2246 cpu_buffer_a->buffer = buffer_b;
2247
2248 atomic_dec(&cpu_buffer_a->record_disabled);
2249 atomic_dec(&cpu_buffer_b->record_disabled);
2250
2251 return 0;
2252}
2253
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002254static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -05002255 struct buffer_data_page *bpage)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002256{
2257 struct ring_buffer_event *event;
2258 unsigned long head;
2259
2260 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt044fa782008-12-02 23:50:03 -05002261 for (head = 0; head < local_read(&bpage->commit);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002262 head += rb_event_length(event)) {
2263
Steven Rostedt044fa782008-12-02 23:50:03 -05002264 event = __rb_data_page_index(bpage, head);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002265 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2266 return;
2267 /* Only count data entries */
2268 if (event->type != RINGBUF_TYPE_DATA)
2269 continue;
2270 cpu_buffer->entries--;
2271 }
2272 __raw_spin_unlock(&cpu_buffer->lock);
2273}
2274
2275/**
2276 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2277 * @buffer: the buffer to allocate for.
2278 *
2279 * This function is used in conjunction with ring_buffer_read_page.
2280 * When reading a full page from the ring buffer, these functions
2281 * can be used to speed up the process. The calling function should
2282 * allocate a few pages first with this function. Then when it
2283 * needs to get pages from the ring buffer, it passes the result
2284 * of this function into ring_buffer_read_page, which will swap
2285 * the page that was allocated, with the read page of the buffer.
2286 *
2287 * Returns:
2288 * The page allocated, or NULL on error.
2289 */
2290void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2291{
2292 unsigned long addr;
Steven Rostedt044fa782008-12-02 23:50:03 -05002293 struct buffer_data_page *bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002294
2295 addr = __get_free_page(GFP_KERNEL);
2296 if (!addr)
2297 return NULL;
2298
Steven Rostedt044fa782008-12-02 23:50:03 -05002299 bpage = (void *)addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002300
Steven Rostedt044fa782008-12-02 23:50:03 -05002301 return bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002302}
2303
2304/**
2305 * ring_buffer_free_read_page - free an allocated read page
2306 * @buffer: the buffer the page was allocate for
2307 * @data: the page to free
2308 *
2309 * Free a page allocated from ring_buffer_alloc_read_page.
2310 */
2311void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2312{
2313 free_page((unsigned long)data);
2314}
2315
2316/**
2317 * ring_buffer_read_page - extract a page from the ring buffer
2318 * @buffer: buffer to extract from
2319 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2320 * @cpu: the cpu of the buffer to extract
2321 * @full: should the extraction only happen when the page is full.
2322 *
2323 * This function will pull out a page from the ring buffer and consume it.
2324 * @data_page must be the address of the variable that was returned
2325 * from ring_buffer_alloc_read_page. This is because the page might be used
2326 * to swap with a page in the ring buffer.
2327 *
2328 * for example:
2329 * rpage = ring_buffer_alloc_page(buffer);
2330 * if (!rpage)
2331 * return error;
2332 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2333 * if (ret)
2334 * process_page(rpage);
2335 *
2336 * When @full is set, the function will not return true unless
2337 * the writer is off the reader page.
2338 *
2339 * Note: it is up to the calling functions to handle sleeps and wakeups.
2340 * The ring buffer can be used anywhere in the kernel and can not
2341 * blindly call wake_up. The layer that uses the ring buffer must be
2342 * responsible for that.
2343 *
2344 * Returns:
2345 * 1 if data has been transferred
2346 * 0 if no data has been transferred.
2347 */
2348int ring_buffer_read_page(struct ring_buffer *buffer,
2349 void **data_page, int cpu, int full)
2350{
2351 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2352 struct ring_buffer_event *event;
Steven Rostedt044fa782008-12-02 23:50:03 -05002353 struct buffer_data_page *bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002354 unsigned long flags;
2355 int ret = 0;
2356
2357 if (!data_page)
2358 return 0;
2359
Steven Rostedt044fa782008-12-02 23:50:03 -05002360 bpage = *data_page;
2361 if (!bpage)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002362 return 0;
2363
2364 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2365
2366 /*
2367 * rb_buffer_peek will get the next ring buffer if
2368 * the current reader page is empty.
2369 */
2370 event = rb_buffer_peek(buffer, cpu, NULL);
2371 if (!event)
2372 goto out;
2373
2374 /* check for data */
2375 if (!local_read(&cpu_buffer->reader_page->page->commit))
2376 goto out;
2377 /*
2378 * If the writer is already off of the read page, then simply
2379 * switch the read page with the given page. Otherwise
2380 * we need to copy the data from the reader to the writer.
2381 */
2382 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2383 unsigned int read = cpu_buffer->reader_page->read;
2384
2385 if (full)
2386 goto out;
2387 /* The writer is still on the reader page, we must copy */
Steven Rostedt044fa782008-12-02 23:50:03 -05002388 bpage = cpu_buffer->reader_page->page;
2389 memcpy(bpage->data,
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002390 cpu_buffer->reader_page->page->data + read,
Steven Rostedt044fa782008-12-02 23:50:03 -05002391 local_read(&bpage->commit) - read);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002392
2393 /* consume what was read */
2394 cpu_buffer->reader_page += read;
2395
2396 } else {
2397 /* swap the pages */
Steven Rostedt044fa782008-12-02 23:50:03 -05002398 rb_init_page(bpage);
2399 bpage = cpu_buffer->reader_page->page;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002400 cpu_buffer->reader_page->page = *data_page;
2401 cpu_buffer->reader_page->read = 0;
Steven Rostedt044fa782008-12-02 23:50:03 -05002402 *data_page = bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002403 }
2404 ret = 1;
2405
2406 /* update the entry counter */
Steven Rostedt044fa782008-12-02 23:50:03 -05002407 rb_remove_entries(cpu_buffer, bpage);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002408 out:
2409 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2410
2411 return ret;
2412}
2413
Steven Rostedta3583242008-11-11 15:01:42 -05002414static ssize_t
2415rb_simple_read(struct file *filp, char __user *ubuf,
2416 size_t cnt, loff_t *ppos)
2417{
Steven Rostedt033601a2008-11-21 12:41:55 -05002418 long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05002419 char buf[64];
2420 int r;
2421
Steven Rostedt033601a2008-11-21 12:41:55 -05002422 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2423 r = sprintf(buf, "permanently disabled\n");
2424 else
2425 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
Steven Rostedta3583242008-11-11 15:01:42 -05002426
2427 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2428}
2429
2430static ssize_t
2431rb_simple_write(struct file *filp, const char __user *ubuf,
2432 size_t cnt, loff_t *ppos)
2433{
Steven Rostedt033601a2008-11-21 12:41:55 -05002434 long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05002435 char buf[64];
2436 long val;
2437 int ret;
2438
2439 if (cnt >= sizeof(buf))
2440 return -EINVAL;
2441
2442 if (copy_from_user(&buf, ubuf, cnt))
2443 return -EFAULT;
2444
2445 buf[cnt] = 0;
2446
2447 ret = strict_strtoul(buf, 10, &val);
2448 if (ret < 0)
2449 return ret;
2450
Steven Rostedt033601a2008-11-21 12:41:55 -05002451 if (val)
2452 set_bit(RB_BUFFERS_ON_BIT, p);
2453 else
2454 clear_bit(RB_BUFFERS_ON_BIT, p);
Steven Rostedta3583242008-11-11 15:01:42 -05002455
2456 (*ppos)++;
2457
2458 return cnt;
2459}
2460
2461static struct file_operations rb_simple_fops = {
2462 .open = tracing_open_generic,
2463 .read = rb_simple_read,
2464 .write = rb_simple_write,
2465};
2466
2467
2468static __init int rb_init_debugfs(void)
2469{
2470 struct dentry *d_tracer;
2471 struct dentry *entry;
2472
2473 d_tracer = tracing_init_dentry();
2474
2475 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
Steven Rostedt033601a2008-11-21 12:41:55 -05002476 &ring_buffer_flags, &rb_simple_fops);
Steven Rostedta3583242008-11-11 15:01:42 -05002477 if (!entry)
2478 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2479
2480 return 0;
2481}
2482
2483fs_initcall(rb_init_debugfs);