blob: 9542990f515b3855904850bfc7354634346bd7b4 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
Steven Rostedt182e9f52008-11-03 23:15:56 -050019#include "trace.h"
20
Steven Rostedt033601a2008-11-21 12:41:55 -050021/*
22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
27 *
28 * There's three layers that must be on in order to write
29 * to the ring buffer.
30 *
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
34 *
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
37 */
38
39/*
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
42 *
43 * ON DISABLED
44 * ---- ----------
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
48 */
49
50enum {
51 RB_BUFFERS_ON_BIT = 0,
52 RB_BUFFERS_DISABLED_BIT = 1,
53};
54
55enum {
56 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
58};
59
60static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
Steven Rostedta3583242008-11-11 15:01:42 -050061
62/**
63 * tracing_on - enable all tracing buffers
64 *
65 * This function enables all tracing buffers that may have been
66 * disabled with tracing_off.
67 */
68void tracing_on(void)
69{
Steven Rostedt033601a2008-11-21 12:41:55 -050070 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -050071}
Robert Richterc4f50182008-12-11 16:49:22 +010072EXPORT_SYMBOL_GPL(tracing_on);
Steven Rostedta3583242008-11-11 15:01:42 -050073
74/**
75 * tracing_off - turn off all tracing buffers
76 *
77 * This function stops all tracing buffers from recording data.
78 * It does not disable any overhead the tracers themselves may
79 * be causing. This function simply causes all recording to
80 * the ring buffers to fail.
81 */
82void tracing_off(void)
83{
Steven Rostedt033601a2008-11-21 12:41:55 -050084 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
85}
Robert Richterc4f50182008-12-11 16:49:22 +010086EXPORT_SYMBOL_GPL(tracing_off);
Steven Rostedt033601a2008-11-21 12:41:55 -050087
88/**
89 * tracing_off_permanent - permanently disable ring buffers
90 *
91 * This function, once called, will disable all ring buffers
92 * permanenty.
93 */
94void tracing_off_permanent(void)
95{
96 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -050097}
98
Ingo Molnard06bbd62008-11-12 10:11:37 +010099#include "trace.h"
100
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400101/* Up this if you want to test the TIME_EXTENTS and normalization */
102#define DEBUG_SHIFT 0
103
104/* FIXME!!! */
105u64 ring_buffer_time_stamp(int cpu)
106{
Steven Rostedt47e74f22008-11-12 00:01:27 -0500107 u64 time;
108
109 preempt_disable_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400110 /* shift to debug/test normalization and TIME_EXTENTS */
Steven Rostedt47e74f22008-11-12 00:01:27 -0500111 time = sched_clock() << DEBUG_SHIFT;
Frederic Weisbecker2c2d7322008-12-16 22:08:58 +0100112 preempt_enable_no_resched_notrace();
Steven Rostedt47e74f22008-11-12 00:01:27 -0500113
114 return time;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400115}
Robert Richterc4f50182008-12-11 16:49:22 +0100116EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400117
118void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
119{
120 /* Just stupid testing the normalize function and deltas */
121 *ts >>= DEBUG_SHIFT;
122}
Robert Richterc4f50182008-12-11 16:49:22 +0100123EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400124
125#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
126#define RB_ALIGNMENT_SHIFT 2
127#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
128#define RB_MAX_SMALL_DATA 28
129
130enum {
131 RB_LEN_TIME_EXTEND = 8,
132 RB_LEN_TIME_STAMP = 16,
133};
134
135/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800136static unsigned
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400137rb_event_length(struct ring_buffer_event *event)
138{
139 unsigned length;
140
141 switch (event->type) {
142 case RINGBUF_TYPE_PADDING:
143 /* undefined */
144 return -1;
145
146 case RINGBUF_TYPE_TIME_EXTEND:
147 return RB_LEN_TIME_EXTEND;
148
149 case RINGBUF_TYPE_TIME_STAMP:
150 return RB_LEN_TIME_STAMP;
151
152 case RINGBUF_TYPE_DATA:
153 if (event->len)
154 length = event->len << RB_ALIGNMENT_SHIFT;
155 else
156 length = event->array[0];
157 return length + RB_EVNT_HDR_SIZE;
158 default:
159 BUG();
160 }
161 /* not hit */
162 return 0;
163}
164
165/**
166 * ring_buffer_event_length - return the length of the event
167 * @event: the event to get the length of
168 */
169unsigned ring_buffer_event_length(struct ring_buffer_event *event)
170{
Robert Richter465634a2009-01-07 15:32:11 +0100171 unsigned length = rb_event_length(event);
172 if (event->type != RINGBUF_TYPE_DATA)
173 return length;
174 length -= RB_EVNT_HDR_SIZE;
175 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
176 length -= sizeof(event->array[0]);
177 return length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400178}
Robert Richterc4f50182008-12-11 16:49:22 +0100179EXPORT_SYMBOL_GPL(ring_buffer_event_length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400180
181/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800182static void *
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400183rb_event_data(struct ring_buffer_event *event)
184{
185 BUG_ON(event->type != RINGBUF_TYPE_DATA);
186 /* If length is in len field, then array[0] has the data */
187 if (event->len)
188 return (void *)&event->array[0];
189 /* Otherwise length is in array[0] and array[1] has the data */
190 return (void *)&event->array[1];
191}
192
193/**
194 * ring_buffer_event_data - return the data of the event
195 * @event: the event to get the data from
196 */
197void *ring_buffer_event_data(struct ring_buffer_event *event)
198{
199 return rb_event_data(event);
200}
Robert Richterc4f50182008-12-11 16:49:22 +0100201EXPORT_SYMBOL_GPL(ring_buffer_event_data);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400202
203#define for_each_buffer_cpu(buffer, cpu) \
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030204 for_each_cpu(cpu, buffer->cpumask)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400205
206#define TS_SHIFT 27
207#define TS_MASK ((1ULL << TS_SHIFT) - 1)
208#define TS_DELTA_TEST (~TS_MASK)
209
Steven Rostedtabc9b562008-12-02 15:34:06 -0500210struct buffer_data_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400211 u64 time_stamp; /* page time stamp */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400212 local_t commit; /* write commited index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500213 unsigned char data[]; /* data of buffer page */
214};
215
216struct buffer_page {
217 local_t write; /* index for next write */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400218 unsigned read; /* index for next read */
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400219 struct list_head list; /* list of free pages */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500220 struct buffer_data_page *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400221};
222
Steven Rostedt044fa782008-12-02 23:50:03 -0500223static void rb_init_page(struct buffer_data_page *bpage)
Steven Rostedtabc9b562008-12-02 15:34:06 -0500224{
Steven Rostedt044fa782008-12-02 23:50:03 -0500225 local_set(&bpage->commit, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500226}
227
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400228/*
Steven Rostedted568292008-09-29 23:02:40 -0400229 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
230 * this issue out.
231 */
Andrew Morton34a148b2009-01-09 12:27:09 -0800232static void free_buffer_page(struct buffer_page *bpage)
Steven Rostedted568292008-09-29 23:02:40 -0400233{
Andrew Morton34a148b2009-01-09 12:27:09 -0800234 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400235 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400236}
237
238/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400239 * We need to fit the time_stamp delta into 27 bits.
240 */
241static inline int test_time_stamp(u64 delta)
242{
243 if (delta & TS_DELTA_TEST)
244 return 1;
245 return 0;
246}
247
Steven Rostedtabc9b562008-12-02 15:34:06 -0500248#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400249
250/*
251 * head_page == tail_page && head == tail then buffer is empty.
252 */
253struct ring_buffer_per_cpu {
254 int cpu;
255 struct ring_buffer *buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100256 spinlock_t reader_lock; /* serialize readers */
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500257 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400258 struct lock_class_key lock_key;
259 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400260 struct buffer_page *head_page; /* read from head */
261 struct buffer_page *tail_page; /* write to tail */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400262 struct buffer_page *commit_page; /* commited pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400263 struct buffer_page *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400264 unsigned long overrun;
265 unsigned long entries;
266 u64 write_stamp;
267 u64 read_stamp;
268 atomic_t record_disabled;
269};
270
271struct ring_buffer {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400272 unsigned pages;
273 unsigned flags;
274 int cpus;
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030275 cpumask_var_t cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400276 atomic_t record_disabled;
277
278 struct mutex mutex;
279
280 struct ring_buffer_per_cpu **buffers;
281};
282
283struct ring_buffer_iter {
284 struct ring_buffer_per_cpu *cpu_buffer;
285 unsigned long head;
286 struct buffer_page *head_page;
287 u64 read_stamp;
288};
289
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500290/* buffer may be either ring_buffer or ring_buffer_per_cpu */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400291#define RB_WARN_ON(buffer, cond) \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500292 ({ \
293 int _____ret = unlikely(cond); \
294 if (_____ret) { \
Steven Rostedtbf41a152008-10-04 02:00:59 -0400295 atomic_inc(&buffer->record_disabled); \
296 WARN_ON(1); \
297 } \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500298 _____ret; \
299 })
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500300
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400301/**
302 * check_pages - integrity check of buffer pages
303 * @cpu_buffer: CPU buffer with pages to test
304 *
305 * As a safty measure we check to make sure the data pages have not
306 * been corrupted.
307 */
308static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
309{
310 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500311 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400312
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500313 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
314 return -1;
315 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
316 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400317
Steven Rostedt044fa782008-12-02 23:50:03 -0500318 list_for_each_entry_safe(bpage, tmp, head, list) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500319 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500320 bpage->list.next->prev != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500321 return -1;
322 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500323 bpage->list.prev->next != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500324 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400325 }
326
327 return 0;
328}
329
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400330static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
331 unsigned nr_pages)
332{
333 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500334 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400335 unsigned long addr;
336 LIST_HEAD(pages);
337 unsigned i;
338
339 for (i = 0; i < nr_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500340 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400341 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500342 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400343 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500344 list_add(&bpage->list, &pages);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400345
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400346 addr = __get_free_page(GFP_KERNEL);
347 if (!addr)
348 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500349 bpage->page = (void *)addr;
350 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400351 }
352
353 list_splice(&pages, head);
354
355 rb_check_pages(cpu_buffer);
356
357 return 0;
358
359 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500360 list_for_each_entry_safe(bpage, tmp, &pages, list) {
361 list_del_init(&bpage->list);
362 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400363 }
364 return -ENOMEM;
365}
366
367static struct ring_buffer_per_cpu *
368rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
369{
370 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt044fa782008-12-02 23:50:03 -0500371 struct buffer_page *bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400372 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400373 int ret;
374
375 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
376 GFP_KERNEL, cpu_to_node(cpu));
377 if (!cpu_buffer)
378 return NULL;
379
380 cpu_buffer->cpu = cpu;
381 cpu_buffer->buffer = buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100382 spin_lock_init(&cpu_buffer->reader_lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500383 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400384 INIT_LIST_HEAD(&cpu_buffer->pages);
385
Steven Rostedt044fa782008-12-02 23:50:03 -0500386 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400387 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500388 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400389 goto fail_free_buffer;
390
Steven Rostedt044fa782008-12-02 23:50:03 -0500391 cpu_buffer->reader_page = bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400392 addr = __get_free_page(GFP_KERNEL);
393 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400394 goto fail_free_reader;
Steven Rostedt044fa782008-12-02 23:50:03 -0500395 bpage->page = (void *)addr;
396 rb_init_page(bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400397
Steven Rostedtd7690412008-10-01 00:29:53 -0400398 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400399
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400400 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
401 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400402 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400403
404 cpu_buffer->head_page
405 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400406 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400407
408 return cpu_buffer;
409
Steven Rostedtd7690412008-10-01 00:29:53 -0400410 fail_free_reader:
411 free_buffer_page(cpu_buffer->reader_page);
412
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400413 fail_free_buffer:
414 kfree(cpu_buffer);
415 return NULL;
416}
417
418static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
419{
420 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500421 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400422
Steven Rostedtd7690412008-10-01 00:29:53 -0400423 list_del_init(&cpu_buffer->reader_page->list);
424 free_buffer_page(cpu_buffer->reader_page);
425
Steven Rostedt044fa782008-12-02 23:50:03 -0500426 list_for_each_entry_safe(bpage, tmp, head, list) {
427 list_del_init(&bpage->list);
428 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400429 }
430 kfree(cpu_buffer);
431}
432
Steven Rostedta7b13742008-09-29 23:02:39 -0400433/*
434 * Causes compile errors if the struct buffer_page gets bigger
435 * than the struct page.
436 */
437extern int ring_buffer_page_too_big(void);
438
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400439/**
440 * ring_buffer_alloc - allocate a new ring_buffer
Robert Richter68814b52008-11-24 12:24:12 +0100441 * @size: the size in bytes per cpu that is needed.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400442 * @flags: attributes to set for the ring buffer.
443 *
444 * Currently the only flag that is available is the RB_FL_OVERWRITE
445 * flag. This flag means that the buffer will overwrite old data
446 * when the buffer wraps. If this flag is not set, the buffer will
447 * drop data when the tail hits the head.
448 */
449struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
450{
451 struct ring_buffer *buffer;
452 int bsize;
453 int cpu;
454
Steven Rostedta7b13742008-09-29 23:02:39 -0400455 /* Paranoid! Optimizes out when all is well */
456 if (sizeof(struct buffer_page) > sizeof(struct page))
457 ring_buffer_page_too_big();
458
459
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400460 /* keep it in its own cache line */
461 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
462 GFP_KERNEL);
463 if (!buffer)
464 return NULL;
465
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030466 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
467 goto fail_free_buffer;
468
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400469 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
470 buffer->flags = flags;
471
472 /* need at least two pages */
473 if (buffer->pages == 1)
474 buffer->pages++;
475
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030476 cpumask_copy(buffer->cpumask, cpu_possible_mask);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400477 buffer->cpus = nr_cpu_ids;
478
479 bsize = sizeof(void *) * nr_cpu_ids;
480 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
481 GFP_KERNEL);
482 if (!buffer->buffers)
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030483 goto fail_free_cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400484
485 for_each_buffer_cpu(buffer, cpu) {
486 buffer->buffers[cpu] =
487 rb_allocate_cpu_buffer(buffer, cpu);
488 if (!buffer->buffers[cpu])
489 goto fail_free_buffers;
490 }
491
492 mutex_init(&buffer->mutex);
493
494 return buffer;
495
496 fail_free_buffers:
497 for_each_buffer_cpu(buffer, cpu) {
498 if (buffer->buffers[cpu])
499 rb_free_cpu_buffer(buffer->buffers[cpu]);
500 }
501 kfree(buffer->buffers);
502
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030503 fail_free_cpumask:
504 free_cpumask_var(buffer->cpumask);
505
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400506 fail_free_buffer:
507 kfree(buffer);
508 return NULL;
509}
Robert Richterc4f50182008-12-11 16:49:22 +0100510EXPORT_SYMBOL_GPL(ring_buffer_alloc);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400511
512/**
513 * ring_buffer_free - free a ring buffer.
514 * @buffer: the buffer to free.
515 */
516void
517ring_buffer_free(struct ring_buffer *buffer)
518{
519 int cpu;
520
521 for_each_buffer_cpu(buffer, cpu)
522 rb_free_cpu_buffer(buffer->buffers[cpu]);
523
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030524 free_cpumask_var(buffer->cpumask);
525
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400526 kfree(buffer);
527}
Robert Richterc4f50182008-12-11 16:49:22 +0100528EXPORT_SYMBOL_GPL(ring_buffer_free);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400529
530static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
531
532static void
533rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
534{
Steven Rostedt044fa782008-12-02 23:50:03 -0500535 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400536 struct list_head *p;
537 unsigned i;
538
539 atomic_inc(&cpu_buffer->record_disabled);
540 synchronize_sched();
541
542 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500543 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
544 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400545 p = cpu_buffer->pages.next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500546 bpage = list_entry(p, struct buffer_page, list);
547 list_del_init(&bpage->list);
548 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400549 }
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500550 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
551 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400552
553 rb_reset_cpu(cpu_buffer);
554
555 rb_check_pages(cpu_buffer);
556
557 atomic_dec(&cpu_buffer->record_disabled);
558
559}
560
561static void
562rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
563 struct list_head *pages, unsigned nr_pages)
564{
Steven Rostedt044fa782008-12-02 23:50:03 -0500565 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400566 struct list_head *p;
567 unsigned i;
568
569 atomic_inc(&cpu_buffer->record_disabled);
570 synchronize_sched();
571
572 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500573 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
574 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400575 p = pages->next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500576 bpage = list_entry(p, struct buffer_page, list);
577 list_del_init(&bpage->list);
578 list_add_tail(&bpage->list, &cpu_buffer->pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400579 }
580 rb_reset_cpu(cpu_buffer);
581
582 rb_check_pages(cpu_buffer);
583
584 atomic_dec(&cpu_buffer->record_disabled);
585}
586
587/**
588 * ring_buffer_resize - resize the ring buffer
589 * @buffer: the buffer to resize.
590 * @size: the new size.
591 *
592 * The tracer is responsible for making sure that the buffer is
593 * not being used while changing the size.
594 * Note: We may be able to change the above requirement by using
595 * RCU synchronizations.
596 *
597 * Minimum size is 2 * BUF_PAGE_SIZE.
598 *
599 * Returns -1 on failure.
600 */
601int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
602{
603 struct ring_buffer_per_cpu *cpu_buffer;
604 unsigned nr_pages, rm_pages, new_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500605 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400606 unsigned long buffer_size;
607 unsigned long addr;
608 LIST_HEAD(pages);
609 int i, cpu;
610
Ingo Molnaree51a1d2008-11-13 14:58:31 +0100611 /*
612 * Always succeed at resizing a non-existent buffer:
613 */
614 if (!buffer)
615 return size;
616
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400617 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
618 size *= BUF_PAGE_SIZE;
619 buffer_size = buffer->pages * BUF_PAGE_SIZE;
620
621 /* we need a minimum of two pages */
622 if (size < BUF_PAGE_SIZE * 2)
623 size = BUF_PAGE_SIZE * 2;
624
625 if (size == buffer_size)
626 return size;
627
628 mutex_lock(&buffer->mutex);
629
630 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
631
632 if (size < buffer_size) {
633
634 /* easy case, just free pages */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500635 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
636 mutex_unlock(&buffer->mutex);
637 return -1;
638 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400639
640 rm_pages = buffer->pages - nr_pages;
641
642 for_each_buffer_cpu(buffer, cpu) {
643 cpu_buffer = buffer->buffers[cpu];
644 rb_remove_pages(cpu_buffer, rm_pages);
645 }
646 goto out;
647 }
648
649 /*
650 * This is a bit more difficult. We only want to add pages
651 * when we can allocate enough for all CPUs. We do this
652 * by allocating all the pages and storing them on a local
653 * link list. If we succeed in our allocation, then we
654 * add these pages to the cpu_buffers. Otherwise we just free
655 * them all and return -ENOMEM;
656 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500657 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
658 mutex_unlock(&buffer->mutex);
659 return -1;
660 }
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500661
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400662 new_pages = nr_pages - buffer->pages;
663
664 for_each_buffer_cpu(buffer, cpu) {
665 for (i = 0; i < new_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500666 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400667 cache_line_size()),
668 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500669 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400670 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500671 list_add(&bpage->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400672 addr = __get_free_page(GFP_KERNEL);
673 if (!addr)
674 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500675 bpage->page = (void *)addr;
676 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400677 }
678 }
679
680 for_each_buffer_cpu(buffer, cpu) {
681 cpu_buffer = buffer->buffers[cpu];
682 rb_insert_pages(cpu_buffer, &pages, new_pages);
683 }
684
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500685 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
686 mutex_unlock(&buffer->mutex);
687 return -1;
688 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400689
690 out:
691 buffer->pages = nr_pages;
692 mutex_unlock(&buffer->mutex);
693
694 return size;
695
696 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500697 list_for_each_entry_safe(bpage, tmp, &pages, list) {
698 list_del_init(&bpage->list);
699 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400700 }
Vegard Nossum641d2f62008-11-18 19:22:13 +0100701 mutex_unlock(&buffer->mutex);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400702 return -ENOMEM;
703}
Robert Richterc4f50182008-12-11 16:49:22 +0100704EXPORT_SYMBOL_GPL(ring_buffer_resize);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400705
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400706static inline int rb_null_event(struct ring_buffer_event *event)
707{
708 return event->type == RINGBUF_TYPE_PADDING;
709}
710
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500711static inline void *
Steven Rostedt044fa782008-12-02 23:50:03 -0500712__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500713{
Steven Rostedt044fa782008-12-02 23:50:03 -0500714 return bpage->data + index;
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500715}
716
Steven Rostedt044fa782008-12-02 23:50:03 -0500717static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400718{
Steven Rostedt044fa782008-12-02 23:50:03 -0500719 return bpage->page->data + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400720}
721
722static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400723rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400724{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400725 return __rb_page_index(cpu_buffer->reader_page,
726 cpu_buffer->reader_page->read);
727}
728
729static inline struct ring_buffer_event *
730rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
731{
732 return __rb_page_index(cpu_buffer->head_page,
733 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400734}
735
736static inline struct ring_buffer_event *
737rb_iter_head_event(struct ring_buffer_iter *iter)
738{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400739 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400740}
741
Steven Rostedtbf41a152008-10-04 02:00:59 -0400742static inline unsigned rb_page_write(struct buffer_page *bpage)
743{
744 return local_read(&bpage->write);
745}
746
747static inline unsigned rb_page_commit(struct buffer_page *bpage)
748{
Steven Rostedtabc9b562008-12-02 15:34:06 -0500749 return local_read(&bpage->page->commit);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400750}
751
752/* Size is determined by what has been commited */
753static inline unsigned rb_page_size(struct buffer_page *bpage)
754{
755 return rb_page_commit(bpage);
756}
757
758static inline unsigned
759rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
760{
761 return rb_page_commit(cpu_buffer->commit_page);
762}
763
764static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
765{
766 return rb_page_commit(cpu_buffer->head_page);
767}
768
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400769/*
770 * When the tail hits the head and the buffer is in overwrite mode,
771 * the head jumps to the next page and all content on the previous
772 * page is discarded. But before doing so, we update the overrun
773 * variable of the buffer.
774 */
775static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
776{
777 struct ring_buffer_event *event;
778 unsigned long head;
779
780 for (head = 0; head < rb_head_size(cpu_buffer);
781 head += rb_event_length(event)) {
782
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400783 event = __rb_page_index(cpu_buffer->head_page, head);
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500784 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
785 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400786 /* Only count data entries */
787 if (event->type != RINGBUF_TYPE_DATA)
788 continue;
789 cpu_buffer->overrun++;
790 cpu_buffer->entries--;
791 }
792}
793
794static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500795 struct buffer_page **bpage)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400796{
Steven Rostedt044fa782008-12-02 23:50:03 -0500797 struct list_head *p = (*bpage)->list.next;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400798
799 if (p == &cpu_buffer->pages)
800 p = p->next;
801
Steven Rostedt044fa782008-12-02 23:50:03 -0500802 *bpage = list_entry(p, struct buffer_page, list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400803}
804
Steven Rostedtbf41a152008-10-04 02:00:59 -0400805static inline unsigned
806rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400807{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400808 unsigned long addr = (unsigned long)event;
809
810 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400811}
812
Andrew Morton34a148b2009-01-09 12:27:09 -0800813static int
Steven Rostedtbf41a152008-10-04 02:00:59 -0400814rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
815 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400816{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400817 unsigned long addr = (unsigned long)event;
818 unsigned long index;
819
820 index = rb_event_index(event);
821 addr &= PAGE_MASK;
822
823 return cpu_buffer->commit_page->page == (void *)addr &&
824 rb_commit_index(cpu_buffer) == index;
825}
826
Andrew Morton34a148b2009-01-09 12:27:09 -0800827static void
Steven Rostedtbf41a152008-10-04 02:00:59 -0400828rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
829 struct ring_buffer_event *event)
830{
831 unsigned long addr = (unsigned long)event;
832 unsigned long index;
833
834 index = rb_event_index(event);
835 addr &= PAGE_MASK;
836
837 while (cpu_buffer->commit_page->page != (void *)addr) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500838 if (RB_WARN_ON(cpu_buffer,
839 cpu_buffer->commit_page == cpu_buffer->tail_page))
840 return;
Steven Rostedtabc9b562008-12-02 15:34:06 -0500841 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -0400842 cpu_buffer->commit_page->write;
843 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500844 cpu_buffer->write_stamp =
845 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400846 }
847
848 /* Now set the commit to the event's index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500849 local_set(&cpu_buffer->commit_page->page->commit, index);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400850}
851
Andrew Morton34a148b2009-01-09 12:27:09 -0800852static void
Steven Rostedtbf41a152008-10-04 02:00:59 -0400853rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
854{
855 /*
856 * We only race with interrupts and NMIs on this CPU.
857 * If we own the commit event, then we can commit
858 * all others that interrupted us, since the interruptions
859 * are in stack format (they finish before they come
860 * back to us). This allows us to do a simple loop to
861 * assign the commit to the tail.
862 */
Steven Rostedta8ccf1d2008-12-23 11:32:24 -0500863 again:
Steven Rostedtbf41a152008-10-04 02:00:59 -0400864 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
Steven Rostedtabc9b562008-12-02 15:34:06 -0500865 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -0400866 cpu_buffer->commit_page->write;
867 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500868 cpu_buffer->write_stamp =
869 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400870 /* add barrier to keep gcc from optimizing too much */
871 barrier();
872 }
873 while (rb_commit_index(cpu_buffer) !=
874 rb_page_write(cpu_buffer->commit_page)) {
Steven Rostedtabc9b562008-12-02 15:34:06 -0500875 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -0400876 cpu_buffer->commit_page->write;
877 barrier();
878 }
Steven Rostedta8ccf1d2008-12-23 11:32:24 -0500879
880 /* again, keep gcc from optimizing */
881 barrier();
882
883 /*
884 * If an interrupt came in just after the first while loop
885 * and pushed the tail page forward, we will be left with
886 * a dangling commit that will never go forward.
887 */
888 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
889 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400890}
891
Steven Rostedtd7690412008-10-01 00:29:53 -0400892static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400893{
Steven Rostedtabc9b562008-12-02 15:34:06 -0500894 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400895 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -0400896}
897
Andrew Morton34a148b2009-01-09 12:27:09 -0800898static void rb_inc_iter(struct ring_buffer_iter *iter)
Steven Rostedtd7690412008-10-01 00:29:53 -0400899{
900 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
901
902 /*
903 * The iterator could be on the reader page (it starts there).
904 * But the head could have moved, since the reader was
905 * found. Check for this case and assign the iterator
906 * to the head page instead of next.
907 */
908 if (iter->head_page == cpu_buffer->reader_page)
909 iter->head_page = cpu_buffer->head_page;
910 else
911 rb_inc_page(cpu_buffer, &iter->head_page);
912
Steven Rostedtabc9b562008-12-02 15:34:06 -0500913 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400914 iter->head = 0;
915}
916
917/**
918 * ring_buffer_update_event - update event type and data
919 * @event: the even to update
920 * @type: the type of event
921 * @length: the size of the event field in the ring buffer
922 *
923 * Update the type and data fields of the event. The length
924 * is the actual size that is written to the ring buffer,
925 * and with this, we can determine what to place into the
926 * data field.
927 */
Andrew Morton34a148b2009-01-09 12:27:09 -0800928static void
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400929rb_update_event(struct ring_buffer_event *event,
930 unsigned type, unsigned length)
931{
932 event->type = type;
933
934 switch (type) {
935
936 case RINGBUF_TYPE_PADDING:
937 break;
938
939 case RINGBUF_TYPE_TIME_EXTEND:
940 event->len =
941 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
942 >> RB_ALIGNMENT_SHIFT;
943 break;
944
945 case RINGBUF_TYPE_TIME_STAMP:
946 event->len =
947 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
948 >> RB_ALIGNMENT_SHIFT;
949 break;
950
951 case RINGBUF_TYPE_DATA:
952 length -= RB_EVNT_HDR_SIZE;
953 if (length > RB_MAX_SMALL_DATA) {
954 event->len = 0;
955 event->array[0] = length;
956 } else
957 event->len =
958 (length + (RB_ALIGNMENT-1))
959 >> RB_ALIGNMENT_SHIFT;
960 break;
961 default:
962 BUG();
963 }
964}
965
Andrew Morton34a148b2009-01-09 12:27:09 -0800966static unsigned rb_calculate_event_length(unsigned length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400967{
968 struct ring_buffer_event event; /* Used only for sizeof array */
969
970 /* zero length can cause confusions */
971 if (!length)
972 length = 1;
973
974 if (length > RB_MAX_SMALL_DATA)
975 length += sizeof(event.array[0]);
976
977 length += RB_EVNT_HDR_SIZE;
978 length = ALIGN(length, RB_ALIGNMENT);
979
980 return length;
981}
982
983static struct ring_buffer_event *
984__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
985 unsigned type, unsigned long length, u64 *ts)
986{
Steven Rostedt98db8df2008-12-23 11:32:25 -0500987 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400988 unsigned long tail, write;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400989 struct ring_buffer *buffer = cpu_buffer->buffer;
990 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400991 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400992
Steven Rostedt98db8df2008-12-23 11:32:25 -0500993 commit_page = cpu_buffer->commit_page;
994 /* we just need to protect against interrupts */
995 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400996 tail_page = cpu_buffer->tail_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400997 write = local_add_return(length, &tail_page->write);
998 tail = write - length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400999
Steven Rostedtbf41a152008-10-04 02:00:59 -04001000 /* See if we shot pass the end of this buffer page */
1001 if (write > BUF_PAGE_SIZE) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001002 struct buffer_page *next_page = tail_page;
1003
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001004 local_irq_save(flags);
1005 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001006
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001007 rb_inc_page(cpu_buffer, &next_page);
1008
Steven Rostedtd7690412008-10-01 00:29:53 -04001009 head_page = cpu_buffer->head_page;
1010 reader_page = cpu_buffer->reader_page;
1011
1012 /* we grabbed the lock before incrementing */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001013 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1014 goto out_unlock;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001015
1016 /*
1017 * If for some reason, we had an interrupt storm that made
1018 * it all the way around the buffer, bail, and warn
1019 * about it.
1020 */
Steven Rostedt98db8df2008-12-23 11:32:25 -05001021 if (unlikely(next_page == commit_page)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001022 WARN_ON_ONCE(1);
1023 goto out_unlock;
1024 }
Steven Rostedtd7690412008-10-01 00:29:53 -04001025
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001026 if (next_page == head_page) {
Steven Rostedtd7690412008-10-01 00:29:53 -04001027 if (!(buffer->flags & RB_FL_OVERWRITE)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001028 /* reset write */
1029 if (tail <= BUF_PAGE_SIZE)
1030 local_set(&tail_page->write, tail);
1031 goto out_unlock;
Steven Rostedtd7690412008-10-01 00:29:53 -04001032 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001033
Steven Rostedtbf41a152008-10-04 02:00:59 -04001034 /* tail_page has not moved yet? */
1035 if (tail_page == cpu_buffer->tail_page) {
1036 /* count overflows */
1037 rb_update_overflow(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001038
Steven Rostedtbf41a152008-10-04 02:00:59 -04001039 rb_inc_page(cpu_buffer, &head_page);
1040 cpu_buffer->head_page = head_page;
1041 cpu_buffer->head_page->read = 0;
1042 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001043 }
1044
Steven Rostedtbf41a152008-10-04 02:00:59 -04001045 /*
1046 * If the tail page is still the same as what we think
1047 * it is, then it is up to us to update the tail
1048 * pointer.
1049 */
1050 if (tail_page == cpu_buffer->tail_page) {
1051 local_set(&next_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001052 local_set(&next_page->page->commit, 0);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001053 cpu_buffer->tail_page = next_page;
1054
1055 /* reread the time stamp */
1056 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001057 cpu_buffer->tail_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001058 }
1059
1060 /*
1061 * The actual tail page has moved forward.
1062 */
1063 if (tail < BUF_PAGE_SIZE) {
1064 /* Mark the rest of the page with padding */
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001065 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001066 event->type = RINGBUF_TYPE_PADDING;
1067 }
1068
Steven Rostedtbf41a152008-10-04 02:00:59 -04001069 if (tail <= BUF_PAGE_SIZE)
1070 /* Set the write back to the previous setting */
1071 local_set(&tail_page->write, tail);
1072
1073 /*
1074 * If this was a commit entry that failed,
1075 * increment that too
1076 */
1077 if (tail_page == cpu_buffer->commit_page &&
1078 tail == rb_commit_index(cpu_buffer)) {
1079 rb_set_commit_to_write(cpu_buffer);
1080 }
1081
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001082 __raw_spin_unlock(&cpu_buffer->lock);
1083 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001084
1085 /* fail and let the caller try again */
1086 return ERR_PTR(-EAGAIN);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001087 }
1088
Steven Rostedtbf41a152008-10-04 02:00:59 -04001089 /* We reserved something on the buffer */
1090
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001091 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1092 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001093
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001094 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001095 rb_update_event(event, type, length);
1096
Steven Rostedtbf41a152008-10-04 02:00:59 -04001097 /*
1098 * If this is a commit and the tail is zero, then update
1099 * this page's time stamp.
1100 */
1101 if (!tail && rb_is_commit(cpu_buffer, event))
Steven Rostedtabc9b562008-12-02 15:34:06 -05001102 cpu_buffer->commit_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001103
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001104 return event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001105
1106 out_unlock:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001107 __raw_spin_unlock(&cpu_buffer->lock);
1108 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001109 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001110}
1111
1112static int
1113rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1114 u64 *ts, u64 *delta)
1115{
1116 struct ring_buffer_event *event;
1117 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001118 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001119
1120 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1121 printk(KERN_WARNING "Delta way too big! %llu"
1122 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +11001123 (unsigned long long)*delta,
1124 (unsigned long long)*ts,
1125 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001126 WARN_ON(1);
1127 }
1128
1129 /*
1130 * The delta is too big, we to add a
1131 * new timestamp.
1132 */
1133 event = __rb_reserve_next(cpu_buffer,
1134 RINGBUF_TYPE_TIME_EXTEND,
1135 RB_LEN_TIME_EXTEND,
1136 ts);
1137 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001138 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001139
Steven Rostedtbf41a152008-10-04 02:00:59 -04001140 if (PTR_ERR(event) == -EAGAIN)
1141 return -EAGAIN;
1142
1143 /* Only a commited time event can update the write stamp */
1144 if (rb_is_commit(cpu_buffer, event)) {
1145 /*
1146 * If this is the first on the page, then we need to
1147 * update the page itself, and just put in a zero.
1148 */
1149 if (rb_event_index(event)) {
1150 event->time_delta = *delta & TS_MASK;
1151 event->array[0] = *delta >> TS_SHIFT;
1152 } else {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001153 cpu_buffer->commit_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001154 event->time_delta = 0;
1155 event->array[0] = 0;
1156 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001157 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001158 /* let the caller know this was the commit */
1159 ret = 1;
1160 } else {
1161 /* Darn, this is just wasted space */
1162 event->time_delta = 0;
1163 event->array[0] = 0;
1164 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001165 }
1166
Steven Rostedtbf41a152008-10-04 02:00:59 -04001167 *delta = 0;
1168
1169 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001170}
1171
1172static struct ring_buffer_event *
1173rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1174 unsigned type, unsigned long length)
1175{
1176 struct ring_buffer_event *event;
1177 u64 ts, delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001178 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001179 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001180
Steven Rostedtbf41a152008-10-04 02:00:59 -04001181 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001182 /*
1183 * We allow for interrupts to reenter here and do a trace.
1184 * If one does, it will cause this original code to loop
1185 * back here. Even with heavy interrupts happening, this
1186 * should only happen a few times in a row. If this happens
1187 * 1000 times in a row, there must be either an interrupt
1188 * storm or we have something buggy.
1189 * Bail!
1190 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001191 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001192 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001193
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001194 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1195
Steven Rostedtbf41a152008-10-04 02:00:59 -04001196 /*
1197 * Only the first commit can update the timestamp.
1198 * Yes there is a race here. If an interrupt comes in
1199 * just after the conditional and it traces too, then it
1200 * will also check the deltas. More than one timestamp may
1201 * also be made. But only the entry that did the actual
1202 * commit will be something other than zero.
1203 */
1204 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1205 rb_page_write(cpu_buffer->tail_page) ==
1206 rb_commit_index(cpu_buffer)) {
1207
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001208 delta = ts - cpu_buffer->write_stamp;
1209
Steven Rostedtbf41a152008-10-04 02:00:59 -04001210 /* make sure this delta is calculated here */
1211 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001212
Steven Rostedtbf41a152008-10-04 02:00:59 -04001213 /* Did the write stamp get updated already? */
1214 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt4143c5c2008-11-10 21:46:01 -05001215 delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001216
1217 if (test_time_stamp(delta)) {
1218
1219 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1220
1221 if (commit == -EBUSY)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001222 return NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001223
1224 if (commit == -EAGAIN)
1225 goto again;
1226
1227 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001228 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001229 } else
1230 /* Non commits have zero deltas */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001231 delta = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001232
1233 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001234 if (PTR_ERR(event) == -EAGAIN)
1235 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001236
Steven Rostedtbf41a152008-10-04 02:00:59 -04001237 if (!event) {
1238 if (unlikely(commit))
1239 /*
1240 * Ouch! We needed a timestamp and it was commited. But
1241 * we didn't get our event reserved.
1242 */
1243 rb_set_commit_to_write(cpu_buffer);
1244 return NULL;
1245 }
1246
1247 /*
1248 * If the timestamp was commited, make the commit our entry
1249 * now so that we will update it when needed.
1250 */
1251 if (commit)
1252 rb_set_commit_event(cpu_buffer, event);
1253 else if (!rb_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001254 delta = 0;
1255
1256 event->time_delta = delta;
1257
1258 return event;
1259}
1260
Steven Rostedtbf41a152008-10-04 02:00:59 -04001261static DEFINE_PER_CPU(int, rb_need_resched);
1262
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001263/**
1264 * ring_buffer_lock_reserve - reserve a part of the buffer
1265 * @buffer: the ring buffer to reserve from
1266 * @length: the length of the data to reserve (excluding event header)
1267 * @flags: a pointer to save the interrupt flags
1268 *
1269 * Returns a reseverd event on the ring buffer to copy directly to.
1270 * The user of this interface will need to get the body to write into
1271 * and can use the ring_buffer_event_data() interface.
1272 *
1273 * The length is the length of the data needed, not the event length
1274 * which also includes the event header.
1275 *
1276 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1277 * If NULL is returned, then nothing has been allocated or locked.
1278 */
1279struct ring_buffer_event *
1280ring_buffer_lock_reserve(struct ring_buffer *buffer,
1281 unsigned long length,
1282 unsigned long *flags)
1283{
1284 struct ring_buffer_per_cpu *cpu_buffer;
1285 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001286 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001287
Steven Rostedt033601a2008-11-21 12:41:55 -05001288 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001289 return NULL;
1290
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001291 if (atomic_read(&buffer->record_disabled))
1292 return NULL;
1293
Steven Rostedtbf41a152008-10-04 02:00:59 -04001294 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001295 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001296
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001297 cpu = raw_smp_processor_id();
1298
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301299 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001300 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001301
1302 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001303
1304 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001305 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001306
1307 length = rb_calculate_event_length(length);
1308 if (length > BUF_PAGE_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001309 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001310
1311 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1312 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001313 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001314
Steven Rostedtbf41a152008-10-04 02:00:59 -04001315 /*
1316 * Need to store resched state on this cpu.
1317 * Only the first needs to.
1318 */
1319
1320 if (preempt_count() == 1)
1321 per_cpu(rb_need_resched, cpu) = resched;
1322
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001323 return event;
1324
Steven Rostedtd7690412008-10-01 00:29:53 -04001325 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001326 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001327 return NULL;
1328}
Robert Richterc4f50182008-12-11 16:49:22 +01001329EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001330
1331static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1332 struct ring_buffer_event *event)
1333{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001334 cpu_buffer->entries++;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001335
1336 /* Only process further if we own the commit */
1337 if (!rb_is_commit(cpu_buffer, event))
1338 return;
1339
1340 cpu_buffer->write_stamp += event->time_delta;
1341
1342 rb_set_commit_to_write(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001343}
1344
1345/**
1346 * ring_buffer_unlock_commit - commit a reserved
1347 * @buffer: The buffer to commit to
1348 * @event: The event pointer to commit.
1349 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1350 *
1351 * This commits the data to the ring buffer, and releases any locks held.
1352 *
1353 * Must be paired with ring_buffer_lock_reserve.
1354 */
1355int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1356 struct ring_buffer_event *event,
1357 unsigned long flags)
1358{
1359 struct ring_buffer_per_cpu *cpu_buffer;
1360 int cpu = raw_smp_processor_id();
1361
1362 cpu_buffer = buffer->buffers[cpu];
1363
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001364 rb_commit(cpu_buffer, event);
1365
Steven Rostedtbf41a152008-10-04 02:00:59 -04001366 /*
1367 * Only the last preempt count needs to restore preemption.
1368 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001369 if (preempt_count() == 1)
1370 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1371 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04001372 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001373
1374 return 0;
1375}
Robert Richterc4f50182008-12-11 16:49:22 +01001376EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001377
1378/**
1379 * ring_buffer_write - write data to the buffer without reserving
1380 * @buffer: The ring buffer to write to.
1381 * @length: The length of the data being written (excluding the event header)
1382 * @data: The data to write to the buffer.
1383 *
1384 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1385 * one function. If you already have the data to write to the buffer, it
1386 * may be easier to simply call this function.
1387 *
1388 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1389 * and not the length of the event which would hold the header.
1390 */
1391int ring_buffer_write(struct ring_buffer *buffer,
1392 unsigned long length,
1393 void *data)
1394{
1395 struct ring_buffer_per_cpu *cpu_buffer;
1396 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001397 unsigned long event_length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001398 void *body;
1399 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001400 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001401
Steven Rostedt033601a2008-11-21 12:41:55 -05001402 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001403 return -EBUSY;
1404
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001405 if (atomic_read(&buffer->record_disabled))
1406 return -EBUSY;
1407
Steven Rostedt182e9f52008-11-03 23:15:56 -05001408 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001409
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001410 cpu = raw_smp_processor_id();
1411
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301412 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001413 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001414
1415 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001416
1417 if (atomic_read(&cpu_buffer->record_disabled))
1418 goto out;
1419
1420 event_length = rb_calculate_event_length(length);
1421 event = rb_reserve_next_event(cpu_buffer,
1422 RINGBUF_TYPE_DATA, event_length);
1423 if (!event)
1424 goto out;
1425
1426 body = rb_event_data(event);
1427
1428 memcpy(body, data, length);
1429
1430 rb_commit(cpu_buffer, event);
1431
1432 ret = 0;
1433 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001434 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001435
1436 return ret;
1437}
Robert Richterc4f50182008-12-11 16:49:22 +01001438EXPORT_SYMBOL_GPL(ring_buffer_write);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001439
Andrew Morton34a148b2009-01-09 12:27:09 -08001440static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001441{
1442 struct buffer_page *reader = cpu_buffer->reader_page;
1443 struct buffer_page *head = cpu_buffer->head_page;
1444 struct buffer_page *commit = cpu_buffer->commit_page;
1445
1446 return reader->read == rb_page_commit(reader) &&
1447 (commit == reader ||
1448 (commit == head &&
1449 head->read == rb_page_commit(commit)));
1450}
1451
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001452/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001453 * ring_buffer_record_disable - stop all writes into the buffer
1454 * @buffer: The ring buffer to stop writes to.
1455 *
1456 * This prevents all writes to the buffer. Any attempt to write
1457 * to the buffer after this will fail and return NULL.
1458 *
1459 * The caller should call synchronize_sched() after this.
1460 */
1461void ring_buffer_record_disable(struct ring_buffer *buffer)
1462{
1463 atomic_inc(&buffer->record_disabled);
1464}
Robert Richterc4f50182008-12-11 16:49:22 +01001465EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001466
1467/**
1468 * ring_buffer_record_enable - enable writes to the buffer
1469 * @buffer: The ring buffer to enable writes
1470 *
1471 * Note, multiple disables will need the same number of enables
1472 * to truely enable the writing (much like preempt_disable).
1473 */
1474void ring_buffer_record_enable(struct ring_buffer *buffer)
1475{
1476 atomic_dec(&buffer->record_disabled);
1477}
Robert Richterc4f50182008-12-11 16:49:22 +01001478EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001479
1480/**
1481 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1482 * @buffer: The ring buffer to stop writes to.
1483 * @cpu: The CPU buffer to stop
1484 *
1485 * This prevents all writes to the buffer. Any attempt to write
1486 * to the buffer after this will fail and return NULL.
1487 *
1488 * The caller should call synchronize_sched() after this.
1489 */
1490void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1491{
1492 struct ring_buffer_per_cpu *cpu_buffer;
1493
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301494 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001495 return;
1496
1497 cpu_buffer = buffer->buffers[cpu];
1498 atomic_inc(&cpu_buffer->record_disabled);
1499}
Robert Richterc4f50182008-12-11 16:49:22 +01001500EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001501
1502/**
1503 * ring_buffer_record_enable_cpu - enable writes to the buffer
1504 * @buffer: The ring buffer to enable writes
1505 * @cpu: The CPU to enable.
1506 *
1507 * Note, multiple disables will need the same number of enables
1508 * to truely enable the writing (much like preempt_disable).
1509 */
1510void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1511{
1512 struct ring_buffer_per_cpu *cpu_buffer;
1513
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301514 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001515 return;
1516
1517 cpu_buffer = buffer->buffers[cpu];
1518 atomic_dec(&cpu_buffer->record_disabled);
1519}
Robert Richterc4f50182008-12-11 16:49:22 +01001520EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001521
1522/**
1523 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1524 * @buffer: The ring buffer
1525 * @cpu: The per CPU buffer to get the entries from.
1526 */
1527unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1528{
1529 struct ring_buffer_per_cpu *cpu_buffer;
1530
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301531 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001532 return 0;
1533
1534 cpu_buffer = buffer->buffers[cpu];
1535 return cpu_buffer->entries;
1536}
Robert Richterc4f50182008-12-11 16:49:22 +01001537EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001538
1539/**
1540 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1541 * @buffer: The ring buffer
1542 * @cpu: The per CPU buffer to get the number of overruns from
1543 */
1544unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1545{
1546 struct ring_buffer_per_cpu *cpu_buffer;
1547
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301548 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001549 return 0;
1550
1551 cpu_buffer = buffer->buffers[cpu];
1552 return cpu_buffer->overrun;
1553}
Robert Richterc4f50182008-12-11 16:49:22 +01001554EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001555
1556/**
1557 * ring_buffer_entries - get the number of entries in a buffer
1558 * @buffer: The ring buffer
1559 *
1560 * Returns the total number of entries in the ring buffer
1561 * (all CPU entries)
1562 */
1563unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1564{
1565 struct ring_buffer_per_cpu *cpu_buffer;
1566 unsigned long entries = 0;
1567 int cpu;
1568
1569 /* if you care about this being correct, lock the buffer */
1570 for_each_buffer_cpu(buffer, cpu) {
1571 cpu_buffer = buffer->buffers[cpu];
1572 entries += cpu_buffer->entries;
1573 }
1574
1575 return entries;
1576}
Robert Richterc4f50182008-12-11 16:49:22 +01001577EXPORT_SYMBOL_GPL(ring_buffer_entries);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001578
1579/**
1580 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1581 * @buffer: The ring buffer
1582 *
1583 * Returns the total number of overruns in the ring buffer
1584 * (all CPU entries)
1585 */
1586unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1587{
1588 struct ring_buffer_per_cpu *cpu_buffer;
1589 unsigned long overruns = 0;
1590 int cpu;
1591
1592 /* if you care about this being correct, lock the buffer */
1593 for_each_buffer_cpu(buffer, cpu) {
1594 cpu_buffer = buffer->buffers[cpu];
1595 overruns += cpu_buffer->overrun;
1596 }
1597
1598 return overruns;
1599}
Robert Richterc4f50182008-12-11 16:49:22 +01001600EXPORT_SYMBOL_GPL(ring_buffer_overruns);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001601
Steven Rostedt642edba2008-11-12 00:01:26 -05001602static void rb_iter_reset(struct ring_buffer_iter *iter)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001603{
1604 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1605
Steven Rostedtd7690412008-10-01 00:29:53 -04001606 /* Iterator usage is expected to have record disabled */
1607 if (list_empty(&cpu_buffer->reader_page->list)) {
1608 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001609 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001610 } else {
1611 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001612 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001613 }
1614 if (iter->head)
1615 iter->read_stamp = cpu_buffer->read_stamp;
1616 else
Steven Rostedtabc9b562008-12-02 15:34:06 -05001617 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt642edba2008-11-12 00:01:26 -05001618}
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001619
Steven Rostedt642edba2008-11-12 00:01:26 -05001620/**
1621 * ring_buffer_iter_reset - reset an iterator
1622 * @iter: The iterator to reset
1623 *
1624 * Resets the iterator, so that it will start from the beginning
1625 * again.
1626 */
1627void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1628{
1629 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1630 unsigned long flags;
1631
1632 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1633 rb_iter_reset(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001634 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001635}
Robert Richterc4f50182008-12-11 16:49:22 +01001636EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001637
1638/**
1639 * ring_buffer_iter_empty - check if an iterator has no more to read
1640 * @iter: The iterator to check
1641 */
1642int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1643{
1644 struct ring_buffer_per_cpu *cpu_buffer;
1645
1646 cpu_buffer = iter->cpu_buffer;
1647
Steven Rostedtbf41a152008-10-04 02:00:59 -04001648 return iter->head_page == cpu_buffer->commit_page &&
1649 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001650}
Robert Richterc4f50182008-12-11 16:49:22 +01001651EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001652
1653static void
1654rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1655 struct ring_buffer_event *event)
1656{
1657 u64 delta;
1658
1659 switch (event->type) {
1660 case RINGBUF_TYPE_PADDING:
1661 return;
1662
1663 case RINGBUF_TYPE_TIME_EXTEND:
1664 delta = event->array[0];
1665 delta <<= TS_SHIFT;
1666 delta += event->time_delta;
1667 cpu_buffer->read_stamp += delta;
1668 return;
1669
1670 case RINGBUF_TYPE_TIME_STAMP:
1671 /* FIXME: not implemented */
1672 return;
1673
1674 case RINGBUF_TYPE_DATA:
1675 cpu_buffer->read_stamp += event->time_delta;
1676 return;
1677
1678 default:
1679 BUG();
1680 }
1681 return;
1682}
1683
1684static void
1685rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1686 struct ring_buffer_event *event)
1687{
1688 u64 delta;
1689
1690 switch (event->type) {
1691 case RINGBUF_TYPE_PADDING:
1692 return;
1693
1694 case RINGBUF_TYPE_TIME_EXTEND:
1695 delta = event->array[0];
1696 delta <<= TS_SHIFT;
1697 delta += event->time_delta;
1698 iter->read_stamp += delta;
1699 return;
1700
1701 case RINGBUF_TYPE_TIME_STAMP:
1702 /* FIXME: not implemented */
1703 return;
1704
1705 case RINGBUF_TYPE_DATA:
1706 iter->read_stamp += event->time_delta;
1707 return;
1708
1709 default:
1710 BUG();
1711 }
1712 return;
1713}
1714
Steven Rostedtd7690412008-10-01 00:29:53 -04001715static struct buffer_page *
1716rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001717{
Steven Rostedtd7690412008-10-01 00:29:53 -04001718 struct buffer_page *reader = NULL;
1719 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001720 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001721
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001722 local_irq_save(flags);
1723 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04001724
1725 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001726 /*
1727 * This should normally only loop twice. But because the
1728 * start of the reader inserts an empty page, it causes
1729 * a case where we will loop three times. There should be no
1730 * reason to loop four times (that I know of).
1731 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001732 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001733 reader = NULL;
1734 goto out;
1735 }
1736
Steven Rostedtd7690412008-10-01 00:29:53 -04001737 reader = cpu_buffer->reader_page;
1738
1739 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001740 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04001741 goto out;
1742
1743 /* Never should we have an index greater than the size */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001744 if (RB_WARN_ON(cpu_buffer,
1745 cpu_buffer->reader_page->read > rb_page_size(reader)))
1746 goto out;
Steven Rostedtd7690412008-10-01 00:29:53 -04001747
1748 /* check if we caught up to the tail */
1749 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001750 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04001751 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001752
1753 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04001754 * Splice the empty reader page into the list around the head.
1755 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001756 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001757
Steven Rostedtd7690412008-10-01 00:29:53 -04001758 reader = cpu_buffer->head_page;
1759 cpu_buffer->reader_page->list.next = reader->list.next;
1760 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001761
1762 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001763 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04001764
1765 /* Make the reader page now replace the head */
1766 reader->list.prev->next = &cpu_buffer->reader_page->list;
1767 reader->list.next->prev = &cpu_buffer->reader_page->list;
1768
1769 /*
1770 * If the tail is on the reader, then we must set the head
1771 * to the inserted page, otherwise we set it one before.
1772 */
1773 cpu_buffer->head_page = cpu_buffer->reader_page;
1774
Steven Rostedtbf41a152008-10-04 02:00:59 -04001775 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04001776 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1777
1778 /* Finally update the reader page to the new head */
1779 cpu_buffer->reader_page = reader;
1780 rb_reset_reader_page(cpu_buffer);
1781
1782 goto again;
1783
1784 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001785 __raw_spin_unlock(&cpu_buffer->lock);
1786 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04001787
1788 return reader;
1789}
1790
1791static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1792{
1793 struct ring_buffer_event *event;
1794 struct buffer_page *reader;
1795 unsigned length;
1796
1797 reader = rb_get_reader_page(cpu_buffer);
1798
1799 /* This function should not be called when buffer is empty */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001800 if (RB_WARN_ON(cpu_buffer, !reader))
1801 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04001802
1803 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001804
1805 if (event->type == RINGBUF_TYPE_DATA)
1806 cpu_buffer->entries--;
1807
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001808 rb_update_read_stamp(cpu_buffer, event);
1809
Steven Rostedtd7690412008-10-01 00:29:53 -04001810 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001811 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001812}
1813
1814static void rb_advance_iter(struct ring_buffer_iter *iter)
1815{
1816 struct ring_buffer *buffer;
1817 struct ring_buffer_per_cpu *cpu_buffer;
1818 struct ring_buffer_event *event;
1819 unsigned length;
1820
1821 cpu_buffer = iter->cpu_buffer;
1822 buffer = cpu_buffer->buffer;
1823
1824 /*
1825 * Check if we are at the end of the buffer.
1826 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001827 if (iter->head >= rb_page_size(iter->head_page)) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001828 if (RB_WARN_ON(buffer,
1829 iter->head_page == cpu_buffer->commit_page))
1830 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04001831 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001832 return;
1833 }
1834
1835 event = rb_iter_head_event(iter);
1836
1837 length = rb_event_length(event);
1838
1839 /*
1840 * This should not be called to advance the header if we are
1841 * at the tail of the buffer.
1842 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001843 if (RB_WARN_ON(cpu_buffer,
Steven Rostedtf536aaf2008-11-10 23:07:30 -05001844 (iter->head_page == cpu_buffer->commit_page) &&
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001845 (iter->head + length > rb_commit_index(cpu_buffer))))
1846 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001847
1848 rb_update_iter_read_stamp(iter, event);
1849
1850 iter->head += length;
1851
1852 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001853 if ((iter->head >= rb_page_size(iter->head_page)) &&
1854 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001855 rb_advance_iter(iter);
1856}
1857
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001858static struct ring_buffer_event *
1859rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001860{
1861 struct ring_buffer_per_cpu *cpu_buffer;
1862 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04001863 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001864 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001865
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301866 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001867 return NULL;
1868
1869 cpu_buffer = buffer->buffers[cpu];
1870
1871 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001872 /*
1873 * We repeat when a timestamp is encountered. It is possible
1874 * to get multiple timestamps from an interrupt entering just
1875 * as one timestamp is about to be written. The max times
1876 * that this can happen is the number of nested interrupts we
1877 * can have. Nesting 10 deep of interrupts is clearly
1878 * an anomaly.
1879 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001880 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001881 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001882
Steven Rostedtd7690412008-10-01 00:29:53 -04001883 reader = rb_get_reader_page(cpu_buffer);
1884 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001885 return NULL;
1886
Steven Rostedtd7690412008-10-01 00:29:53 -04001887 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001888
1889 switch (event->type) {
1890 case RINGBUF_TYPE_PADDING:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001891 RB_WARN_ON(cpu_buffer, 1);
Steven Rostedtd7690412008-10-01 00:29:53 -04001892 rb_advance_reader(cpu_buffer);
1893 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001894
1895 case RINGBUF_TYPE_TIME_EXTEND:
1896 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04001897 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001898 goto again;
1899
1900 case RINGBUF_TYPE_TIME_STAMP:
1901 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04001902 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001903 goto again;
1904
1905 case RINGBUF_TYPE_DATA:
1906 if (ts) {
1907 *ts = cpu_buffer->read_stamp + event->time_delta;
1908 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1909 }
1910 return event;
1911
1912 default:
1913 BUG();
1914 }
1915
1916 return NULL;
1917}
Robert Richterc4f50182008-12-11 16:49:22 +01001918EXPORT_SYMBOL_GPL(ring_buffer_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001919
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001920static struct ring_buffer_event *
1921rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001922{
1923 struct ring_buffer *buffer;
1924 struct ring_buffer_per_cpu *cpu_buffer;
1925 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001926 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001927
1928 if (ring_buffer_iter_empty(iter))
1929 return NULL;
1930
1931 cpu_buffer = iter->cpu_buffer;
1932 buffer = cpu_buffer->buffer;
1933
1934 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001935 /*
1936 * We repeat when a timestamp is encountered. It is possible
1937 * to get multiple timestamps from an interrupt entering just
1938 * as one timestamp is about to be written. The max times
1939 * that this can happen is the number of nested interrupts we
1940 * can have. Nesting 10 deep of interrupts is clearly
1941 * an anomaly.
1942 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001943 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001944 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001945
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001946 if (rb_per_cpu_empty(cpu_buffer))
1947 return NULL;
1948
1949 event = rb_iter_head_event(iter);
1950
1951 switch (event->type) {
1952 case RINGBUF_TYPE_PADDING:
Steven Rostedtd7690412008-10-01 00:29:53 -04001953 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001954 goto again;
1955
1956 case RINGBUF_TYPE_TIME_EXTEND:
1957 /* Internal data, OK to advance */
1958 rb_advance_iter(iter);
1959 goto again;
1960
1961 case RINGBUF_TYPE_TIME_STAMP:
1962 /* FIXME: not implemented */
1963 rb_advance_iter(iter);
1964 goto again;
1965
1966 case RINGBUF_TYPE_DATA:
1967 if (ts) {
1968 *ts = iter->read_stamp + event->time_delta;
1969 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1970 }
1971 return event;
1972
1973 default:
1974 BUG();
1975 }
1976
1977 return NULL;
1978}
Robert Richterc4f50182008-12-11 16:49:22 +01001979EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001980
1981/**
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001982 * ring_buffer_peek - peek at the next event to be read
1983 * @buffer: The ring buffer to read
1984 * @cpu: The cpu to peak at
1985 * @ts: The timestamp counter of this event.
1986 *
1987 * This will return the event that will be read next, but does
1988 * not consume the data.
1989 */
1990struct ring_buffer_event *
1991ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1992{
1993 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1994 struct ring_buffer_event *event;
1995 unsigned long flags;
1996
1997 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1998 event = rb_buffer_peek(buffer, cpu, ts);
1999 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2000
2001 return event;
2002}
2003
2004/**
2005 * ring_buffer_iter_peek - peek at the next event to be read
2006 * @iter: The ring buffer iterator
2007 * @ts: The timestamp counter of this event.
2008 *
2009 * This will return the event that will be read next, but does
2010 * not increment the iterator.
2011 */
2012struct ring_buffer_event *
2013ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2014{
2015 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2016 struct ring_buffer_event *event;
2017 unsigned long flags;
2018
2019 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2020 event = rb_iter_peek(iter, ts);
2021 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2022
2023 return event;
2024}
2025
2026/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002027 * ring_buffer_consume - return an event and consume it
2028 * @buffer: The ring buffer to get the next event from
2029 *
2030 * Returns the next event in the ring buffer, and that event is consumed.
2031 * Meaning, that sequential reads will keep returning a different event,
2032 * and eventually empty the ring buffer if the producer is slower.
2033 */
2034struct ring_buffer_event *
2035ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2036{
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002037 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002038 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002039 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002040
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302041 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002042 return NULL;
2043
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002044 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002045
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002046 event = rb_buffer_peek(buffer, cpu, ts);
2047 if (!event)
2048 goto out;
2049
Steven Rostedtd7690412008-10-01 00:29:53 -04002050 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002051
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002052 out:
2053 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2054
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002055 return event;
2056}
Robert Richterc4f50182008-12-11 16:49:22 +01002057EXPORT_SYMBOL_GPL(ring_buffer_consume);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002058
2059/**
2060 * ring_buffer_read_start - start a non consuming read of the buffer
2061 * @buffer: The ring buffer to read from
2062 * @cpu: The cpu buffer to iterate over
2063 *
2064 * This starts up an iteration through the buffer. It also disables
2065 * the recording to the buffer until the reading is finished.
2066 * This prevents the reading from being corrupted. This is not
2067 * a consuming read, so a producer is not expected.
2068 *
2069 * Must be paired with ring_buffer_finish.
2070 */
2071struct ring_buffer_iter *
2072ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2073{
2074 struct ring_buffer_per_cpu *cpu_buffer;
2075 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04002076 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002077
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302078 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002079 return NULL;
2080
2081 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2082 if (!iter)
2083 return NULL;
2084
2085 cpu_buffer = buffer->buffers[cpu];
2086
2087 iter->cpu_buffer = cpu_buffer;
2088
2089 atomic_inc(&cpu_buffer->record_disabled);
2090 synchronize_sched();
2091
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002092 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002093 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt642edba2008-11-12 00:01:26 -05002094 rb_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002095 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002096 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002097
2098 return iter;
2099}
Robert Richterc4f50182008-12-11 16:49:22 +01002100EXPORT_SYMBOL_GPL(ring_buffer_read_start);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002101
2102/**
2103 * ring_buffer_finish - finish reading the iterator of the buffer
2104 * @iter: The iterator retrieved by ring_buffer_start
2105 *
2106 * This re-enables the recording to the buffer, and frees the
2107 * iterator.
2108 */
2109void
2110ring_buffer_read_finish(struct ring_buffer_iter *iter)
2111{
2112 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2113
2114 atomic_dec(&cpu_buffer->record_disabled);
2115 kfree(iter);
2116}
Robert Richterc4f50182008-12-11 16:49:22 +01002117EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002118
2119/**
2120 * ring_buffer_read - read the next item in the ring buffer by the iterator
2121 * @iter: The ring buffer iterator
2122 * @ts: The time stamp of the event read.
2123 *
2124 * This reads the next event in the ring buffer and increments the iterator.
2125 */
2126struct ring_buffer_event *
2127ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2128{
2129 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002130 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2131 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002132
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002133 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2134 event = rb_iter_peek(iter, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002135 if (!event)
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002136 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002137
2138 rb_advance_iter(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002139 out:
2140 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002141
2142 return event;
2143}
Robert Richterc4f50182008-12-11 16:49:22 +01002144EXPORT_SYMBOL_GPL(ring_buffer_read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002145
2146/**
2147 * ring_buffer_size - return the size of the ring buffer (in bytes)
2148 * @buffer: The ring buffer.
2149 */
2150unsigned long ring_buffer_size(struct ring_buffer *buffer)
2151{
2152 return BUF_PAGE_SIZE * buffer->pages;
2153}
Robert Richterc4f50182008-12-11 16:49:22 +01002154EXPORT_SYMBOL_GPL(ring_buffer_size);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002155
2156static void
2157rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2158{
2159 cpu_buffer->head_page
2160 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002161 local_set(&cpu_buffer->head_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002162 local_set(&cpu_buffer->head_page->page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002163
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002164 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002165
2166 cpu_buffer->tail_page = cpu_buffer->head_page;
2167 cpu_buffer->commit_page = cpu_buffer->head_page;
2168
2169 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2170 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002171 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002172 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002173
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002174 cpu_buffer->overrun = 0;
2175 cpu_buffer->entries = 0;
2176}
2177
2178/**
2179 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2180 * @buffer: The ring buffer to reset a per cpu buffer of
2181 * @cpu: The CPU buffer to be reset
2182 */
2183void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2184{
2185 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2186 unsigned long flags;
2187
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302188 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002189 return;
2190
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002191 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2192
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002193 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002194
2195 rb_reset_cpu(cpu_buffer);
2196
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002197 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002198
2199 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002200}
Robert Richterc4f50182008-12-11 16:49:22 +01002201EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002202
2203/**
2204 * ring_buffer_reset - reset a ring buffer
2205 * @buffer: The ring buffer to reset all cpu buffers
2206 */
2207void ring_buffer_reset(struct ring_buffer *buffer)
2208{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002209 int cpu;
2210
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002211 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002212 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002213}
Robert Richterc4f50182008-12-11 16:49:22 +01002214EXPORT_SYMBOL_GPL(ring_buffer_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002215
2216/**
2217 * rind_buffer_empty - is the ring buffer empty?
2218 * @buffer: The ring buffer to test
2219 */
2220int ring_buffer_empty(struct ring_buffer *buffer)
2221{
2222 struct ring_buffer_per_cpu *cpu_buffer;
2223 int cpu;
2224
2225 /* yes this is racy, but if you don't like the race, lock the buffer */
2226 for_each_buffer_cpu(buffer, cpu) {
2227 cpu_buffer = buffer->buffers[cpu];
2228 if (!rb_per_cpu_empty(cpu_buffer))
2229 return 0;
2230 }
2231 return 1;
2232}
Robert Richterc4f50182008-12-11 16:49:22 +01002233EXPORT_SYMBOL_GPL(ring_buffer_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002234
2235/**
2236 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2237 * @buffer: The ring buffer
2238 * @cpu: The CPU buffer to test
2239 */
2240int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2241{
2242 struct ring_buffer_per_cpu *cpu_buffer;
2243
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302244 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002245 return 1;
2246
2247 cpu_buffer = buffer->buffers[cpu];
2248 return rb_per_cpu_empty(cpu_buffer);
2249}
Robert Richterc4f50182008-12-11 16:49:22 +01002250EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002251
2252/**
2253 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2254 * @buffer_a: One buffer to swap with
2255 * @buffer_b: The other buffer to swap with
2256 *
2257 * This function is useful for tracers that want to take a "snapshot"
2258 * of a CPU buffer and has another back up buffer lying around.
2259 * it is expected that the tracer handles the cpu buffer not being
2260 * used at the moment.
2261 */
2262int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2263 struct ring_buffer *buffer_b, int cpu)
2264{
2265 struct ring_buffer_per_cpu *cpu_buffer_a;
2266 struct ring_buffer_per_cpu *cpu_buffer_b;
2267
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302268 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2269 !cpumask_test_cpu(cpu, buffer_b->cpumask))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002270 return -EINVAL;
2271
2272 /* At least make sure the two buffers are somewhat the same */
Lai Jiangshan6d102bc2008-12-17 17:48:23 +08002273 if (buffer_a->pages != buffer_b->pages)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002274 return -EINVAL;
2275
2276 cpu_buffer_a = buffer_a->buffers[cpu];
2277 cpu_buffer_b = buffer_b->buffers[cpu];
2278
2279 /*
2280 * We can't do a synchronize_sched here because this
2281 * function can be called in atomic context.
2282 * Normally this will be called from the same CPU as cpu.
2283 * If not it's up to the caller to protect this.
2284 */
2285 atomic_inc(&cpu_buffer_a->record_disabled);
2286 atomic_inc(&cpu_buffer_b->record_disabled);
2287
2288 buffer_a->buffers[cpu] = cpu_buffer_b;
2289 buffer_b->buffers[cpu] = cpu_buffer_a;
2290
2291 cpu_buffer_b->buffer = buffer_a;
2292 cpu_buffer_a->buffer = buffer_b;
2293
2294 atomic_dec(&cpu_buffer_a->record_disabled);
2295 atomic_dec(&cpu_buffer_b->record_disabled);
2296
2297 return 0;
2298}
Robert Richterc4f50182008-12-11 16:49:22 +01002299EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002300
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002301static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -05002302 struct buffer_data_page *bpage)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002303{
2304 struct ring_buffer_event *event;
2305 unsigned long head;
2306
2307 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt044fa782008-12-02 23:50:03 -05002308 for (head = 0; head < local_read(&bpage->commit);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002309 head += rb_event_length(event)) {
2310
Steven Rostedt044fa782008-12-02 23:50:03 -05002311 event = __rb_data_page_index(bpage, head);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002312 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2313 return;
2314 /* Only count data entries */
2315 if (event->type != RINGBUF_TYPE_DATA)
2316 continue;
2317 cpu_buffer->entries--;
2318 }
2319 __raw_spin_unlock(&cpu_buffer->lock);
2320}
2321
2322/**
2323 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2324 * @buffer: the buffer to allocate for.
2325 *
2326 * This function is used in conjunction with ring_buffer_read_page.
2327 * When reading a full page from the ring buffer, these functions
2328 * can be used to speed up the process. The calling function should
2329 * allocate a few pages first with this function. Then when it
2330 * needs to get pages from the ring buffer, it passes the result
2331 * of this function into ring_buffer_read_page, which will swap
2332 * the page that was allocated, with the read page of the buffer.
2333 *
2334 * Returns:
2335 * The page allocated, or NULL on error.
2336 */
2337void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2338{
2339 unsigned long addr;
Steven Rostedt044fa782008-12-02 23:50:03 -05002340 struct buffer_data_page *bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002341
2342 addr = __get_free_page(GFP_KERNEL);
2343 if (!addr)
2344 return NULL;
2345
Steven Rostedt044fa782008-12-02 23:50:03 -05002346 bpage = (void *)addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002347
Steven Rostedt044fa782008-12-02 23:50:03 -05002348 return bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002349}
2350
2351/**
2352 * ring_buffer_free_read_page - free an allocated read page
2353 * @buffer: the buffer the page was allocate for
2354 * @data: the page to free
2355 *
2356 * Free a page allocated from ring_buffer_alloc_read_page.
2357 */
2358void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2359{
2360 free_page((unsigned long)data);
2361}
2362
2363/**
2364 * ring_buffer_read_page - extract a page from the ring buffer
2365 * @buffer: buffer to extract from
2366 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2367 * @cpu: the cpu of the buffer to extract
2368 * @full: should the extraction only happen when the page is full.
2369 *
2370 * This function will pull out a page from the ring buffer and consume it.
2371 * @data_page must be the address of the variable that was returned
2372 * from ring_buffer_alloc_read_page. This is because the page might be used
2373 * to swap with a page in the ring buffer.
2374 *
2375 * for example:
2376 * rpage = ring_buffer_alloc_page(buffer);
2377 * if (!rpage)
2378 * return error;
2379 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2380 * if (ret)
2381 * process_page(rpage);
2382 *
2383 * When @full is set, the function will not return true unless
2384 * the writer is off the reader page.
2385 *
2386 * Note: it is up to the calling functions to handle sleeps and wakeups.
2387 * The ring buffer can be used anywhere in the kernel and can not
2388 * blindly call wake_up. The layer that uses the ring buffer must be
2389 * responsible for that.
2390 *
2391 * Returns:
2392 * 1 if data has been transferred
2393 * 0 if no data has been transferred.
2394 */
2395int ring_buffer_read_page(struct ring_buffer *buffer,
2396 void **data_page, int cpu, int full)
2397{
2398 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2399 struct ring_buffer_event *event;
Steven Rostedt044fa782008-12-02 23:50:03 -05002400 struct buffer_data_page *bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002401 unsigned long flags;
2402 int ret = 0;
2403
2404 if (!data_page)
2405 return 0;
2406
Steven Rostedt044fa782008-12-02 23:50:03 -05002407 bpage = *data_page;
2408 if (!bpage)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002409 return 0;
2410
2411 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2412
2413 /*
2414 * rb_buffer_peek will get the next ring buffer if
2415 * the current reader page is empty.
2416 */
2417 event = rb_buffer_peek(buffer, cpu, NULL);
2418 if (!event)
2419 goto out;
2420
2421 /* check for data */
2422 if (!local_read(&cpu_buffer->reader_page->page->commit))
2423 goto out;
2424 /*
2425 * If the writer is already off of the read page, then simply
2426 * switch the read page with the given page. Otherwise
2427 * we need to copy the data from the reader to the writer.
2428 */
2429 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2430 unsigned int read = cpu_buffer->reader_page->read;
2431
2432 if (full)
2433 goto out;
2434 /* The writer is still on the reader page, we must copy */
Steven Rostedt044fa782008-12-02 23:50:03 -05002435 bpage = cpu_buffer->reader_page->page;
2436 memcpy(bpage->data,
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002437 cpu_buffer->reader_page->page->data + read,
Steven Rostedt044fa782008-12-02 23:50:03 -05002438 local_read(&bpage->commit) - read);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002439
2440 /* consume what was read */
2441 cpu_buffer->reader_page += read;
2442
2443 } else {
2444 /* swap the pages */
Steven Rostedt044fa782008-12-02 23:50:03 -05002445 rb_init_page(bpage);
2446 bpage = cpu_buffer->reader_page->page;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002447 cpu_buffer->reader_page->page = *data_page;
2448 cpu_buffer->reader_page->read = 0;
Steven Rostedt044fa782008-12-02 23:50:03 -05002449 *data_page = bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002450 }
2451 ret = 1;
2452
2453 /* update the entry counter */
Steven Rostedt044fa782008-12-02 23:50:03 -05002454 rb_remove_entries(cpu_buffer, bpage);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002455 out:
2456 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2457
2458 return ret;
2459}
2460
Steven Rostedta3583242008-11-11 15:01:42 -05002461static ssize_t
2462rb_simple_read(struct file *filp, char __user *ubuf,
2463 size_t cnt, loff_t *ppos)
2464{
Steven Rostedt033601a2008-11-21 12:41:55 -05002465 long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05002466 char buf[64];
2467 int r;
2468
Steven Rostedt033601a2008-11-21 12:41:55 -05002469 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2470 r = sprintf(buf, "permanently disabled\n");
2471 else
2472 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
Steven Rostedta3583242008-11-11 15:01:42 -05002473
2474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2475}
2476
2477static ssize_t
2478rb_simple_write(struct file *filp, const char __user *ubuf,
2479 size_t cnt, loff_t *ppos)
2480{
Steven Rostedt033601a2008-11-21 12:41:55 -05002481 long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05002482 char buf[64];
2483 long val;
2484 int ret;
2485
2486 if (cnt >= sizeof(buf))
2487 return -EINVAL;
2488
2489 if (copy_from_user(&buf, ubuf, cnt))
2490 return -EFAULT;
2491
2492 buf[cnt] = 0;
2493
2494 ret = strict_strtoul(buf, 10, &val);
2495 if (ret < 0)
2496 return ret;
2497
Steven Rostedt033601a2008-11-21 12:41:55 -05002498 if (val)
2499 set_bit(RB_BUFFERS_ON_BIT, p);
2500 else
2501 clear_bit(RB_BUFFERS_ON_BIT, p);
Steven Rostedta3583242008-11-11 15:01:42 -05002502
2503 (*ppos)++;
2504
2505 return cnt;
2506}
2507
2508static struct file_operations rb_simple_fops = {
2509 .open = tracing_open_generic,
2510 .read = rb_simple_read,
2511 .write = rb_simple_write,
2512};
2513
2514
2515static __init int rb_init_debugfs(void)
2516{
2517 struct dentry *d_tracer;
2518 struct dentry *entry;
2519
2520 d_tracer = tracing_init_dentry();
2521
2522 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
Steven Rostedt033601a2008-11-21 12:41:55 -05002523 &ring_buffer_flags, &rb_simple_fops);
Steven Rostedta3583242008-11-11 15:01:42 -05002524 if (!entry)
2525 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2526
2527 return 0;
2528}
2529
2530fs_initcall(rb_init_debugfs);