blob: 384ca5d9d729dc8df1e547f8f64eaa2bd6fe5f3d [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
Ingo Molnar14131f22009-02-26 18:47:11 +01007#include <linux/trace_clock.h>
Steven Rostedt78d904b2009-02-05 18:43:07 -05008#include <linux/ftrace_irq.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04009#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
Steven Rostedta81bd802009-02-06 01:45:16 -050012#include <linux/hardirq.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040013#include <linux/module.h>
14#include <linux/percpu.h>
15#include <linux/mutex.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040016#include <linux/init.h>
17#include <linux/hash.h>
18#include <linux/list.h>
Steven Rostedt554f7862009-03-11 22:00:13 -040019#include <linux/cpu.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040020#include <linux/fs.h>
21
Steven Rostedt182e9f52008-11-03 23:15:56 -050022#include "trace.h"
23
Steven Rostedt033601a2008-11-21 12:41:55 -050024/*
Steven Rostedt5cc98542009-03-12 22:24:17 -040025 * The ring buffer is made up of a list of pages. A separate list of pages is
26 * allocated for each CPU. A writer may only write to a buffer that is
27 * associated with the CPU it is currently executing on. A reader may read
28 * from any per cpu buffer.
29 *
30 * The reader is special. For each per cpu buffer, the reader has its own
31 * reader page. When a reader has read the entire reader page, this reader
32 * page is swapped with another page in the ring buffer.
33 *
34 * Now, as long as the writer is off the reader page, the reader can do what
35 * ever it wants with that page. The writer will never write to that page
36 * again (as long as it is out of the ring buffer).
37 *
38 * Here's some silly ASCII art.
39 *
40 * +------+
41 * |reader| RING BUFFER
42 * |page |
43 * +------+ +---+ +---+ +---+
44 * | |-->| |-->| |
45 * +---+ +---+ +---+
46 * ^ |
47 * | |
48 * +---------------+
49 *
50 *
51 * +------+
52 * |reader| RING BUFFER
53 * |page |------------------v
54 * +------+ +---+ +---+ +---+
55 * | |-->| |-->| |
56 * +---+ +---+ +---+
57 * ^ |
58 * | |
59 * +---------------+
60 *
61 *
62 * +------+
63 * |reader| RING BUFFER
64 * |page |------------------v
65 * +------+ +---+ +---+ +---+
66 * ^ | |-->| |-->| |
67 * | +---+ +---+ +---+
68 * | |
69 * | |
70 * +------------------------------+
71 *
72 *
73 * +------+
74 * |buffer| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
77 * ^ | | | |-->| |
78 * | New +---+ +---+ +---+
79 * | Reader------^ |
80 * | page |
81 * +------------------------------+
82 *
83 *
84 * After we make this swap, the reader can hand this page off to the splice
85 * code and be done with it. It can even allocate a new page if it needs to
86 * and swap that into the ring buffer.
87 *
88 * We will be using cmpxchg soon to make all this lockless.
89 *
90 */
91
92/*
Steven Rostedt033601a2008-11-21 12:41:55 -050093 * A fast way to enable or disable all ring buffers is to
94 * call tracing_on or tracing_off. Turning off the ring buffers
95 * prevents all ring buffers from being recorded to.
96 * Turning this switch on, makes it OK to write to the
97 * ring buffer, if the ring buffer is enabled itself.
98 *
99 * There's three layers that must be on in order to write
100 * to the ring buffer.
101 *
102 * 1) This global flag must be set.
103 * 2) The ring buffer must be enabled for recording.
104 * 3) The per cpu buffer must be enabled for recording.
105 *
106 * In case of an anomaly, this global flag has a bit set that
107 * will permantly disable all ring buffers.
108 */
109
110/*
111 * Global flag to disable all recording to ring buffers
112 * This has two bits: ON, DISABLED
113 *
114 * ON DISABLED
115 * ---- ----------
116 * 0 0 : ring buffers are off
117 * 1 0 : ring buffers are on
118 * X 1 : ring buffers are permanently disabled
119 */
120
121enum {
122 RB_BUFFERS_ON_BIT = 0,
123 RB_BUFFERS_DISABLED_BIT = 1,
124};
125
126enum {
127 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
128 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
129};
130
Hannes Eder5e398412009-02-10 19:44:34 +0100131static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
Steven Rostedta3583242008-11-11 15:01:42 -0500132
Steven Rostedt474d32b2009-03-03 19:51:40 -0500133#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
134
Steven Rostedta3583242008-11-11 15:01:42 -0500135/**
136 * tracing_on - enable all tracing buffers
137 *
138 * This function enables all tracing buffers that may have been
139 * disabled with tracing_off.
140 */
141void tracing_on(void)
142{
Steven Rostedt033601a2008-11-21 12:41:55 -0500143 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500144}
Robert Richterc4f50182008-12-11 16:49:22 +0100145EXPORT_SYMBOL_GPL(tracing_on);
Steven Rostedta3583242008-11-11 15:01:42 -0500146
147/**
148 * tracing_off - turn off all tracing buffers
149 *
150 * This function stops all tracing buffers from recording data.
151 * It does not disable any overhead the tracers themselves may
152 * be causing. This function simply causes all recording to
153 * the ring buffers to fail.
154 */
155void tracing_off(void)
156{
Steven Rostedt033601a2008-11-21 12:41:55 -0500157 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
158}
Robert Richterc4f50182008-12-11 16:49:22 +0100159EXPORT_SYMBOL_GPL(tracing_off);
Steven Rostedt033601a2008-11-21 12:41:55 -0500160
161/**
162 * tracing_off_permanent - permanently disable ring buffers
163 *
164 * This function, once called, will disable all ring buffers
Wenji Huangc3706f02009-02-10 01:03:18 -0500165 * permanently.
Steven Rostedt033601a2008-11-21 12:41:55 -0500166 */
167void tracing_off_permanent(void)
168{
169 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500170}
171
Steven Rostedt988ae9d2009-02-14 19:17:02 -0500172/**
173 * tracing_is_on - show state of ring buffers enabled
174 */
175int tracing_is_on(void)
176{
177 return ring_buffer_flags == RB_BUFFERS_ON;
178}
179EXPORT_SYMBOL_GPL(tracing_is_on);
180
Ingo Molnard06bbd62008-11-12 10:11:37 +0100181#include "trace.h"
182
Steven Rostedte3d6bf02009-03-03 13:53:07 -0500183#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
Andrew Morton67d34722009-01-09 12:27:09 -0800184#define RB_ALIGNMENT 4U
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400185#define RB_MAX_SMALL_DATA 28
186
187enum {
188 RB_LEN_TIME_EXTEND = 8,
189 RB_LEN_TIME_STAMP = 16,
190};
191
192/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800193static unsigned
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400194rb_event_length(struct ring_buffer_event *event)
195{
196 unsigned length;
197
198 switch (event->type) {
199 case RINGBUF_TYPE_PADDING:
200 /* undefined */
201 return -1;
202
203 case RINGBUF_TYPE_TIME_EXTEND:
204 return RB_LEN_TIME_EXTEND;
205
206 case RINGBUF_TYPE_TIME_STAMP:
207 return RB_LEN_TIME_STAMP;
208
209 case RINGBUF_TYPE_DATA:
210 if (event->len)
Andrew Morton67d34722009-01-09 12:27:09 -0800211 length = event->len * RB_ALIGNMENT;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400212 else
213 length = event->array[0];
214 return length + RB_EVNT_HDR_SIZE;
215 default:
216 BUG();
217 }
218 /* not hit */
219 return 0;
220}
221
222/**
223 * ring_buffer_event_length - return the length of the event
224 * @event: the event to get the length of
225 */
226unsigned ring_buffer_event_length(struct ring_buffer_event *event)
227{
Robert Richter465634a2009-01-07 15:32:11 +0100228 unsigned length = rb_event_length(event);
229 if (event->type != RINGBUF_TYPE_DATA)
230 return length;
231 length -= RB_EVNT_HDR_SIZE;
232 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
233 length -= sizeof(event->array[0]);
234 return length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400235}
Robert Richterc4f50182008-12-11 16:49:22 +0100236EXPORT_SYMBOL_GPL(ring_buffer_event_length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400237
238/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800239static void *
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400240rb_event_data(struct ring_buffer_event *event)
241{
242 BUG_ON(event->type != RINGBUF_TYPE_DATA);
243 /* If length is in len field, then array[0] has the data */
244 if (event->len)
245 return (void *)&event->array[0];
246 /* Otherwise length is in array[0] and array[1] has the data */
247 return (void *)&event->array[1];
248}
249
250/**
251 * ring_buffer_event_data - return the data of the event
252 * @event: the event to get the data from
253 */
254void *ring_buffer_event_data(struct ring_buffer_event *event)
255{
256 return rb_event_data(event);
257}
Robert Richterc4f50182008-12-11 16:49:22 +0100258EXPORT_SYMBOL_GPL(ring_buffer_event_data);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400259
260#define for_each_buffer_cpu(buffer, cpu) \
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030261 for_each_cpu(cpu, buffer->cpumask)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400262
263#define TS_SHIFT 27
264#define TS_MASK ((1ULL << TS_SHIFT) - 1)
265#define TS_DELTA_TEST (~TS_MASK)
266
Steven Rostedtabc9b562008-12-02 15:34:06 -0500267struct buffer_data_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400268 u64 time_stamp; /* page time stamp */
Wenji Huangc3706f02009-02-10 01:03:18 -0500269 local_t commit; /* write committed index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500270 unsigned char data[]; /* data of buffer page */
271};
272
273struct buffer_page {
274 local_t write; /* index for next write */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400275 unsigned read; /* index for next read */
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400276 struct list_head list; /* list of free pages */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500277 struct buffer_data_page *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400278};
279
Steven Rostedt044fa782008-12-02 23:50:03 -0500280static void rb_init_page(struct buffer_data_page *bpage)
Steven Rostedtabc9b562008-12-02 15:34:06 -0500281{
Steven Rostedt044fa782008-12-02 23:50:03 -0500282 local_set(&bpage->commit, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500283}
284
Steven Rostedt474d32b2009-03-03 19:51:40 -0500285/**
286 * ring_buffer_page_len - the size of data on the page.
287 * @page: The page to read
288 *
289 * Returns the amount of data on the page, including buffer page header.
290 */
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500291size_t ring_buffer_page_len(void *page)
292{
Steven Rostedt474d32b2009-03-03 19:51:40 -0500293 return local_read(&((struct buffer_data_page *)page)->commit)
294 + BUF_PAGE_HDR_SIZE;
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500295}
296
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400297/*
Steven Rostedted568292008-09-29 23:02:40 -0400298 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
299 * this issue out.
300 */
Andrew Morton34a148b2009-01-09 12:27:09 -0800301static void free_buffer_page(struct buffer_page *bpage)
Steven Rostedted568292008-09-29 23:02:40 -0400302{
Andrew Morton34a148b2009-01-09 12:27:09 -0800303 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400304 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400305}
306
307/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400308 * We need to fit the time_stamp delta into 27 bits.
309 */
310static inline int test_time_stamp(u64 delta)
311{
312 if (delta & TS_DELTA_TEST)
313 return 1;
314 return 0;
315}
316
Steven Rostedt474d32b2009-03-03 19:51:40 -0500317#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400318
319/*
320 * head_page == tail_page && head == tail then buffer is empty.
321 */
322struct ring_buffer_per_cpu {
323 int cpu;
324 struct ring_buffer *buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100325 spinlock_t reader_lock; /* serialize readers */
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500326 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400327 struct lock_class_key lock_key;
328 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400329 struct buffer_page *head_page; /* read from head */
330 struct buffer_page *tail_page; /* write to tail */
Wenji Huangc3706f02009-02-10 01:03:18 -0500331 struct buffer_page *commit_page; /* committed pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400332 struct buffer_page *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400333 unsigned long overrun;
334 unsigned long entries;
335 u64 write_stamp;
336 u64 read_stamp;
337 atomic_t record_disabled;
338};
339
340struct ring_buffer {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400341 unsigned pages;
342 unsigned flags;
343 int cpus;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400344 atomic_t record_disabled;
Arnaldo Carvalho de Melo00f62f62009-02-09 17:04:06 -0200345 cpumask_var_t cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400346
347 struct mutex mutex;
348
349 struct ring_buffer_per_cpu **buffers;
Steven Rostedt554f7862009-03-11 22:00:13 -0400350
Steven Rostedt59222ef2009-03-12 11:46:03 -0400351#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400352 struct notifier_block cpu_notify;
353#endif
Steven Rostedt37886f62009-03-17 17:22:06 -0400354 u64 (*clock)(void);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400355};
356
357struct ring_buffer_iter {
358 struct ring_buffer_per_cpu *cpu_buffer;
359 unsigned long head;
360 struct buffer_page *head_page;
361 u64 read_stamp;
362};
363
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500364/* buffer may be either ring_buffer or ring_buffer_per_cpu */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400365#define RB_WARN_ON(buffer, cond) \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500366 ({ \
367 int _____ret = unlikely(cond); \
368 if (_____ret) { \
Steven Rostedtbf41a152008-10-04 02:00:59 -0400369 atomic_inc(&buffer->record_disabled); \
370 WARN_ON(1); \
371 } \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500372 _____ret; \
373 })
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500374
Steven Rostedt37886f62009-03-17 17:22:06 -0400375/* Up this if you want to test the TIME_EXTENTS and normalization */
376#define DEBUG_SHIFT 0
377
378u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
379{
380 u64 time;
381
382 preempt_disable_notrace();
383 /* shift to debug/test normalization and TIME_EXTENTS */
384 time = buffer->clock() << DEBUG_SHIFT;
385 preempt_enable_no_resched_notrace();
386
387 return time;
388}
389EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
390
391void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
392 int cpu, u64 *ts)
393{
394 /* Just stupid testing the normalize function and deltas */
395 *ts >>= DEBUG_SHIFT;
396}
397EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
398
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400399/**
400 * check_pages - integrity check of buffer pages
401 * @cpu_buffer: CPU buffer with pages to test
402 *
Wenji Huangc3706f02009-02-10 01:03:18 -0500403 * As a safety measure we check to make sure the data pages have not
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400404 * been corrupted.
405 */
406static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
407{
408 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500409 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400410
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500411 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
412 return -1;
413 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
414 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400415
Steven Rostedt044fa782008-12-02 23:50:03 -0500416 list_for_each_entry_safe(bpage, tmp, head, list) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500417 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500418 bpage->list.next->prev != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500419 return -1;
420 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500421 bpage->list.prev->next != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500422 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400423 }
424
425 return 0;
426}
427
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400428static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
429 unsigned nr_pages)
430{
431 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500432 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400433 unsigned long addr;
434 LIST_HEAD(pages);
435 unsigned i;
436
437 for (i = 0; i < nr_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500438 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400439 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500440 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400441 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500442 list_add(&bpage->list, &pages);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400443
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400444 addr = __get_free_page(GFP_KERNEL);
445 if (!addr)
446 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500447 bpage->page = (void *)addr;
448 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400449 }
450
451 list_splice(&pages, head);
452
453 rb_check_pages(cpu_buffer);
454
455 return 0;
456
457 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500458 list_for_each_entry_safe(bpage, tmp, &pages, list) {
459 list_del_init(&bpage->list);
460 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400461 }
462 return -ENOMEM;
463}
464
465static struct ring_buffer_per_cpu *
466rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
467{
468 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt044fa782008-12-02 23:50:03 -0500469 struct buffer_page *bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400470 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400471 int ret;
472
473 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
474 GFP_KERNEL, cpu_to_node(cpu));
475 if (!cpu_buffer)
476 return NULL;
477
478 cpu_buffer->cpu = cpu;
479 cpu_buffer->buffer = buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100480 spin_lock_init(&cpu_buffer->reader_lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500481 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400482 INIT_LIST_HEAD(&cpu_buffer->pages);
483
Steven Rostedt044fa782008-12-02 23:50:03 -0500484 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400485 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500486 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400487 goto fail_free_buffer;
488
Steven Rostedt044fa782008-12-02 23:50:03 -0500489 cpu_buffer->reader_page = bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400490 addr = __get_free_page(GFP_KERNEL);
491 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400492 goto fail_free_reader;
Steven Rostedt044fa782008-12-02 23:50:03 -0500493 bpage->page = (void *)addr;
494 rb_init_page(bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400495
Steven Rostedtd7690412008-10-01 00:29:53 -0400496 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400497
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400498 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
499 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400500 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400501
502 cpu_buffer->head_page
503 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400504 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400505
506 return cpu_buffer;
507
Steven Rostedtd7690412008-10-01 00:29:53 -0400508 fail_free_reader:
509 free_buffer_page(cpu_buffer->reader_page);
510
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400511 fail_free_buffer:
512 kfree(cpu_buffer);
513 return NULL;
514}
515
516static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
517{
518 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500519 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400520
Steven Rostedtd7690412008-10-01 00:29:53 -0400521 list_del_init(&cpu_buffer->reader_page->list);
522 free_buffer_page(cpu_buffer->reader_page);
523
Steven Rostedt044fa782008-12-02 23:50:03 -0500524 list_for_each_entry_safe(bpage, tmp, head, list) {
525 list_del_init(&bpage->list);
526 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400527 }
528 kfree(cpu_buffer);
529}
530
Steven Rostedta7b13742008-09-29 23:02:39 -0400531/*
532 * Causes compile errors if the struct buffer_page gets bigger
533 * than the struct page.
534 */
535extern int ring_buffer_page_too_big(void);
536
Steven Rostedt59222ef2009-03-12 11:46:03 -0400537#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400538static int __cpuinit rb_cpu_notify(struct notifier_block *self,
539 unsigned long action, void *hcpu);
540#endif
541
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400542/**
543 * ring_buffer_alloc - allocate a new ring_buffer
Robert Richter68814b52008-11-24 12:24:12 +0100544 * @size: the size in bytes per cpu that is needed.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400545 * @flags: attributes to set for the ring buffer.
546 *
547 * Currently the only flag that is available is the RB_FL_OVERWRITE
548 * flag. This flag means that the buffer will overwrite old data
549 * when the buffer wraps. If this flag is not set, the buffer will
550 * drop data when the tail hits the head.
551 */
552struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
553{
554 struct ring_buffer *buffer;
555 int bsize;
556 int cpu;
557
Steven Rostedta7b13742008-09-29 23:02:39 -0400558 /* Paranoid! Optimizes out when all is well */
559 if (sizeof(struct buffer_page) > sizeof(struct page))
560 ring_buffer_page_too_big();
561
562
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400563 /* keep it in its own cache line */
564 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
565 GFP_KERNEL);
566 if (!buffer)
567 return NULL;
568
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030569 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
570 goto fail_free_buffer;
571
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400572 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
573 buffer->flags = flags;
Steven Rostedt37886f62009-03-17 17:22:06 -0400574 buffer->clock = trace_clock_local;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400575
576 /* need at least two pages */
577 if (buffer->pages == 1)
578 buffer->pages++;
579
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100580 /*
581 * In case of non-hotplug cpu, if the ring-buffer is allocated
582 * in early initcall, it will not be notified of secondary cpus.
583 * In that off case, we need to allocate for all possible cpus.
584 */
585#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400586 get_online_cpus();
587 cpumask_copy(buffer->cpumask, cpu_online_mask);
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100588#else
589 cpumask_copy(buffer->cpumask, cpu_possible_mask);
590#endif
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400591 buffer->cpus = nr_cpu_ids;
592
593 bsize = sizeof(void *) * nr_cpu_ids;
594 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
595 GFP_KERNEL);
596 if (!buffer->buffers)
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030597 goto fail_free_cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400598
599 for_each_buffer_cpu(buffer, cpu) {
600 buffer->buffers[cpu] =
601 rb_allocate_cpu_buffer(buffer, cpu);
602 if (!buffer->buffers[cpu])
603 goto fail_free_buffers;
604 }
605
Steven Rostedt59222ef2009-03-12 11:46:03 -0400606#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400607 buffer->cpu_notify.notifier_call = rb_cpu_notify;
608 buffer->cpu_notify.priority = 0;
609 register_cpu_notifier(&buffer->cpu_notify);
610#endif
611
612 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400613 mutex_init(&buffer->mutex);
614
615 return buffer;
616
617 fail_free_buffers:
618 for_each_buffer_cpu(buffer, cpu) {
619 if (buffer->buffers[cpu])
620 rb_free_cpu_buffer(buffer->buffers[cpu]);
621 }
622 kfree(buffer->buffers);
623
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030624 fail_free_cpumask:
625 free_cpumask_var(buffer->cpumask);
Steven Rostedt554f7862009-03-11 22:00:13 -0400626 put_online_cpus();
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030627
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400628 fail_free_buffer:
629 kfree(buffer);
630 return NULL;
631}
Robert Richterc4f50182008-12-11 16:49:22 +0100632EXPORT_SYMBOL_GPL(ring_buffer_alloc);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400633
634/**
635 * ring_buffer_free - free a ring buffer.
636 * @buffer: the buffer to free.
637 */
638void
639ring_buffer_free(struct ring_buffer *buffer)
640{
641 int cpu;
642
Steven Rostedt554f7862009-03-11 22:00:13 -0400643 get_online_cpus();
644
Steven Rostedt59222ef2009-03-12 11:46:03 -0400645#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400646 unregister_cpu_notifier(&buffer->cpu_notify);
647#endif
648
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400649 for_each_buffer_cpu(buffer, cpu)
650 rb_free_cpu_buffer(buffer->buffers[cpu]);
651
Steven Rostedt554f7862009-03-11 22:00:13 -0400652 put_online_cpus();
653
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030654 free_cpumask_var(buffer->cpumask);
655
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400656 kfree(buffer);
657}
Robert Richterc4f50182008-12-11 16:49:22 +0100658EXPORT_SYMBOL_GPL(ring_buffer_free);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400659
Steven Rostedt37886f62009-03-17 17:22:06 -0400660void ring_buffer_set_clock(struct ring_buffer *buffer,
661 u64 (*clock)(void))
662{
663 buffer->clock = clock;
664}
665
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400666static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
667
668static void
669rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
670{
Steven Rostedt044fa782008-12-02 23:50:03 -0500671 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400672 struct list_head *p;
673 unsigned i;
674
675 atomic_inc(&cpu_buffer->record_disabled);
676 synchronize_sched();
677
678 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500679 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
680 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400681 p = cpu_buffer->pages.next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500682 bpage = list_entry(p, struct buffer_page, list);
683 list_del_init(&bpage->list);
684 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400685 }
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500686 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
687 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400688
689 rb_reset_cpu(cpu_buffer);
690
691 rb_check_pages(cpu_buffer);
692
693 atomic_dec(&cpu_buffer->record_disabled);
694
695}
696
697static void
698rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
699 struct list_head *pages, unsigned nr_pages)
700{
Steven Rostedt044fa782008-12-02 23:50:03 -0500701 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400702 struct list_head *p;
703 unsigned i;
704
705 atomic_inc(&cpu_buffer->record_disabled);
706 synchronize_sched();
707
708 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500709 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
710 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400711 p = pages->next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500712 bpage = list_entry(p, struct buffer_page, list);
713 list_del_init(&bpage->list);
714 list_add_tail(&bpage->list, &cpu_buffer->pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400715 }
716 rb_reset_cpu(cpu_buffer);
717
718 rb_check_pages(cpu_buffer);
719
720 atomic_dec(&cpu_buffer->record_disabled);
721}
722
723/**
724 * ring_buffer_resize - resize the ring buffer
725 * @buffer: the buffer to resize.
726 * @size: the new size.
727 *
728 * The tracer is responsible for making sure that the buffer is
729 * not being used while changing the size.
730 * Note: We may be able to change the above requirement by using
731 * RCU synchronizations.
732 *
733 * Minimum size is 2 * BUF_PAGE_SIZE.
734 *
735 * Returns -1 on failure.
736 */
737int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
738{
739 struct ring_buffer_per_cpu *cpu_buffer;
740 unsigned nr_pages, rm_pages, new_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500741 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400742 unsigned long buffer_size;
743 unsigned long addr;
744 LIST_HEAD(pages);
745 int i, cpu;
746
Ingo Molnaree51a1d2008-11-13 14:58:31 +0100747 /*
748 * Always succeed at resizing a non-existent buffer:
749 */
750 if (!buffer)
751 return size;
752
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400753 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
754 size *= BUF_PAGE_SIZE;
755 buffer_size = buffer->pages * BUF_PAGE_SIZE;
756
757 /* we need a minimum of two pages */
758 if (size < BUF_PAGE_SIZE * 2)
759 size = BUF_PAGE_SIZE * 2;
760
761 if (size == buffer_size)
762 return size;
763
764 mutex_lock(&buffer->mutex);
Steven Rostedt554f7862009-03-11 22:00:13 -0400765 get_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400766
767 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
768
769 if (size < buffer_size) {
770
771 /* easy case, just free pages */
Steven Rostedt554f7862009-03-11 22:00:13 -0400772 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
773 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400774
775 rm_pages = buffer->pages - nr_pages;
776
777 for_each_buffer_cpu(buffer, cpu) {
778 cpu_buffer = buffer->buffers[cpu];
779 rb_remove_pages(cpu_buffer, rm_pages);
780 }
781 goto out;
782 }
783
784 /*
785 * This is a bit more difficult. We only want to add pages
786 * when we can allocate enough for all CPUs. We do this
787 * by allocating all the pages and storing them on a local
788 * link list. If we succeed in our allocation, then we
789 * add these pages to the cpu_buffers. Otherwise we just free
790 * them all and return -ENOMEM;
791 */
Steven Rostedt554f7862009-03-11 22:00:13 -0400792 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
793 goto out_fail;
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500794
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400795 new_pages = nr_pages - buffer->pages;
796
797 for_each_buffer_cpu(buffer, cpu) {
798 for (i = 0; i < new_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500799 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400800 cache_line_size()),
801 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500802 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400803 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500804 list_add(&bpage->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400805 addr = __get_free_page(GFP_KERNEL);
806 if (!addr)
807 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500808 bpage->page = (void *)addr;
809 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400810 }
811 }
812
813 for_each_buffer_cpu(buffer, cpu) {
814 cpu_buffer = buffer->buffers[cpu];
815 rb_insert_pages(cpu_buffer, &pages, new_pages);
816 }
817
Steven Rostedt554f7862009-03-11 22:00:13 -0400818 if (RB_WARN_ON(buffer, !list_empty(&pages)))
819 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400820
821 out:
822 buffer->pages = nr_pages;
Steven Rostedt554f7862009-03-11 22:00:13 -0400823 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400824 mutex_unlock(&buffer->mutex);
825
826 return size;
827
828 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500829 list_for_each_entry_safe(bpage, tmp, &pages, list) {
830 list_del_init(&bpage->list);
831 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400832 }
Steven Rostedt554f7862009-03-11 22:00:13 -0400833 put_online_cpus();
Vegard Nossum641d2f62008-11-18 19:22:13 +0100834 mutex_unlock(&buffer->mutex);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400835 return -ENOMEM;
Steven Rostedt554f7862009-03-11 22:00:13 -0400836
837 /*
838 * Something went totally wrong, and we are too paranoid
839 * to even clean up the mess.
840 */
841 out_fail:
842 put_online_cpus();
843 mutex_unlock(&buffer->mutex);
844 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400845}
Robert Richterc4f50182008-12-11 16:49:22 +0100846EXPORT_SYMBOL_GPL(ring_buffer_resize);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400847
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400848static inline int rb_null_event(struct ring_buffer_event *event)
849{
850 return event->type == RINGBUF_TYPE_PADDING;
851}
852
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500853static inline void *
Steven Rostedt044fa782008-12-02 23:50:03 -0500854__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500855{
Steven Rostedt044fa782008-12-02 23:50:03 -0500856 return bpage->data + index;
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500857}
858
Steven Rostedt044fa782008-12-02 23:50:03 -0500859static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400860{
Steven Rostedt044fa782008-12-02 23:50:03 -0500861 return bpage->page->data + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400862}
863
864static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400865rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400866{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400867 return __rb_page_index(cpu_buffer->reader_page,
868 cpu_buffer->reader_page->read);
869}
870
871static inline struct ring_buffer_event *
872rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
873{
874 return __rb_page_index(cpu_buffer->head_page,
875 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400876}
877
878static inline struct ring_buffer_event *
879rb_iter_head_event(struct ring_buffer_iter *iter)
880{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400881 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400882}
883
Steven Rostedtbf41a152008-10-04 02:00:59 -0400884static inline unsigned rb_page_write(struct buffer_page *bpage)
885{
886 return local_read(&bpage->write);
887}
888
889static inline unsigned rb_page_commit(struct buffer_page *bpage)
890{
Steven Rostedtabc9b562008-12-02 15:34:06 -0500891 return local_read(&bpage->page->commit);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400892}
893
894/* Size is determined by what has been commited */
895static inline unsigned rb_page_size(struct buffer_page *bpage)
896{
897 return rb_page_commit(bpage);
898}
899
900static inline unsigned
901rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
902{
903 return rb_page_commit(cpu_buffer->commit_page);
904}
905
906static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
907{
908 return rb_page_commit(cpu_buffer->head_page);
909}
910
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400911/*
912 * When the tail hits the head and the buffer is in overwrite mode,
913 * the head jumps to the next page and all content on the previous
914 * page is discarded. But before doing so, we update the overrun
915 * variable of the buffer.
916 */
917static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
918{
919 struct ring_buffer_event *event;
920 unsigned long head;
921
922 for (head = 0; head < rb_head_size(cpu_buffer);
923 head += rb_event_length(event)) {
924
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400925 event = __rb_page_index(cpu_buffer->head_page, head);
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500926 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
927 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400928 /* Only count data entries */
929 if (event->type != RINGBUF_TYPE_DATA)
930 continue;
931 cpu_buffer->overrun++;
932 cpu_buffer->entries--;
933 }
934}
935
936static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500937 struct buffer_page **bpage)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400938{
Steven Rostedt044fa782008-12-02 23:50:03 -0500939 struct list_head *p = (*bpage)->list.next;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400940
941 if (p == &cpu_buffer->pages)
942 p = p->next;
943
Steven Rostedt044fa782008-12-02 23:50:03 -0500944 *bpage = list_entry(p, struct buffer_page, list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400945}
946
Steven Rostedtbf41a152008-10-04 02:00:59 -0400947static inline unsigned
948rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400949{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400950 unsigned long addr = (unsigned long)event;
951
952 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400953}
954
Andrew Morton34a148b2009-01-09 12:27:09 -0800955static int
Steven Rostedtbf41a152008-10-04 02:00:59 -0400956rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
957 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400958{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400959 unsigned long addr = (unsigned long)event;
960 unsigned long index;
961
962 index = rb_event_index(event);
963 addr &= PAGE_MASK;
964
965 return cpu_buffer->commit_page->page == (void *)addr &&
966 rb_commit_index(cpu_buffer) == index;
967}
968
Andrew Morton34a148b2009-01-09 12:27:09 -0800969static void
Steven Rostedtbf41a152008-10-04 02:00:59 -0400970rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
971 struct ring_buffer_event *event)
972{
973 unsigned long addr = (unsigned long)event;
974 unsigned long index;
975
976 index = rb_event_index(event);
977 addr &= PAGE_MASK;
978
979 while (cpu_buffer->commit_page->page != (void *)addr) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500980 if (RB_WARN_ON(cpu_buffer,
981 cpu_buffer->commit_page == cpu_buffer->tail_page))
982 return;
Steven Rostedtabc9b562008-12-02 15:34:06 -0500983 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -0400984 cpu_buffer->commit_page->write;
985 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500986 cpu_buffer->write_stamp =
987 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -0400988 }
989
990 /* Now set the commit to the event's index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500991 local_set(&cpu_buffer->commit_page->page->commit, index);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400992}
993
Andrew Morton34a148b2009-01-09 12:27:09 -0800994static void
Steven Rostedtbf41a152008-10-04 02:00:59 -0400995rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
996{
997 /*
998 * We only race with interrupts and NMIs on this CPU.
999 * If we own the commit event, then we can commit
1000 * all others that interrupted us, since the interruptions
1001 * are in stack format (they finish before they come
1002 * back to us). This allows us to do a simple loop to
1003 * assign the commit to the tail.
1004 */
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001005 again:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001006 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001007 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001008 cpu_buffer->commit_page->write;
1009 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001010 cpu_buffer->write_stamp =
1011 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001012 /* add barrier to keep gcc from optimizing too much */
1013 barrier();
1014 }
1015 while (rb_commit_index(cpu_buffer) !=
1016 rb_page_write(cpu_buffer->commit_page)) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001017 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001018 cpu_buffer->commit_page->write;
1019 barrier();
1020 }
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001021
1022 /* again, keep gcc from optimizing */
1023 barrier();
1024
1025 /*
1026 * If an interrupt came in just after the first while loop
1027 * and pushed the tail page forward, we will be left with
1028 * a dangling commit that will never go forward.
1029 */
1030 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1031 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001032}
1033
Steven Rostedtd7690412008-10-01 00:29:53 -04001034static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001035{
Steven Rostedtabc9b562008-12-02 15:34:06 -05001036 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001037 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001038}
1039
Andrew Morton34a148b2009-01-09 12:27:09 -08001040static void rb_inc_iter(struct ring_buffer_iter *iter)
Steven Rostedtd7690412008-10-01 00:29:53 -04001041{
1042 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1043
1044 /*
1045 * The iterator could be on the reader page (it starts there).
1046 * But the head could have moved, since the reader was
1047 * found. Check for this case and assign the iterator
1048 * to the head page instead of next.
1049 */
1050 if (iter->head_page == cpu_buffer->reader_page)
1051 iter->head_page = cpu_buffer->head_page;
1052 else
1053 rb_inc_page(cpu_buffer, &iter->head_page);
1054
Steven Rostedtabc9b562008-12-02 15:34:06 -05001055 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001056 iter->head = 0;
1057}
1058
1059/**
1060 * ring_buffer_update_event - update event type and data
1061 * @event: the even to update
1062 * @type: the type of event
1063 * @length: the size of the event field in the ring buffer
1064 *
1065 * Update the type and data fields of the event. The length
1066 * is the actual size that is written to the ring buffer,
1067 * and with this, we can determine what to place into the
1068 * data field.
1069 */
Andrew Morton34a148b2009-01-09 12:27:09 -08001070static void
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001071rb_update_event(struct ring_buffer_event *event,
1072 unsigned type, unsigned length)
1073{
1074 event->type = type;
1075
1076 switch (type) {
1077
1078 case RINGBUF_TYPE_PADDING:
1079 break;
1080
1081 case RINGBUF_TYPE_TIME_EXTEND:
Andrew Morton67d34722009-01-09 12:27:09 -08001082 event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001083 break;
1084
1085 case RINGBUF_TYPE_TIME_STAMP:
Andrew Morton67d34722009-01-09 12:27:09 -08001086 event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001087 break;
1088
1089 case RINGBUF_TYPE_DATA:
1090 length -= RB_EVNT_HDR_SIZE;
1091 if (length > RB_MAX_SMALL_DATA) {
1092 event->len = 0;
1093 event->array[0] = length;
1094 } else
Andrew Morton67d34722009-01-09 12:27:09 -08001095 event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001096 break;
1097 default:
1098 BUG();
1099 }
1100}
1101
Andrew Morton34a148b2009-01-09 12:27:09 -08001102static unsigned rb_calculate_event_length(unsigned length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001103{
1104 struct ring_buffer_event event; /* Used only for sizeof array */
1105
1106 /* zero length can cause confusions */
1107 if (!length)
1108 length = 1;
1109
1110 if (length > RB_MAX_SMALL_DATA)
1111 length += sizeof(event.array[0]);
1112
1113 length += RB_EVNT_HDR_SIZE;
1114 length = ALIGN(length, RB_ALIGNMENT);
1115
1116 return length;
1117}
1118
1119static struct ring_buffer_event *
1120__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1121 unsigned type, unsigned long length, u64 *ts)
1122{
Steven Rostedt98db8df2008-12-23 11:32:25 -05001123 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001124 unsigned long tail, write;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001125 struct ring_buffer *buffer = cpu_buffer->buffer;
1126 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001127 unsigned long flags;
Steven Rostedt78d904b2009-02-05 18:43:07 -05001128 bool lock_taken = false;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001129
Steven Rostedt98db8df2008-12-23 11:32:25 -05001130 commit_page = cpu_buffer->commit_page;
1131 /* we just need to protect against interrupts */
1132 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001133 tail_page = cpu_buffer->tail_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001134 write = local_add_return(length, &tail_page->write);
1135 tail = write - length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001136
Steven Rostedtbf41a152008-10-04 02:00:59 -04001137 /* See if we shot pass the end of this buffer page */
1138 if (write > BUF_PAGE_SIZE) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001139 struct buffer_page *next_page = tail_page;
1140
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001141 local_irq_save(flags);
Steven Rostedt78d904b2009-02-05 18:43:07 -05001142 /*
Steven Rostedta81bd802009-02-06 01:45:16 -05001143 * Since the write to the buffer is still not
1144 * fully lockless, we must be careful with NMIs.
1145 * The locks in the writers are taken when a write
1146 * crosses to a new page. The locks protect against
1147 * races with the readers (this will soon be fixed
1148 * with a lockless solution).
1149 *
1150 * Because we can not protect against NMIs, and we
1151 * want to keep traces reentrant, we need to manage
1152 * what happens when we are in an NMI.
1153 *
Steven Rostedt78d904b2009-02-05 18:43:07 -05001154 * NMIs can happen after we take the lock.
1155 * If we are in an NMI, only take the lock
1156 * if it is not already taken. Otherwise
1157 * simply fail.
1158 */
Steven Rostedta81bd802009-02-06 01:45:16 -05001159 if (unlikely(in_nmi())) {
Steven Rostedt78d904b2009-02-05 18:43:07 -05001160 if (!__raw_spin_trylock(&cpu_buffer->lock))
Steven Rostedt45141d42009-02-12 13:19:48 -05001161 goto out_reset;
Steven Rostedt78d904b2009-02-05 18:43:07 -05001162 } else
1163 __raw_spin_lock(&cpu_buffer->lock);
1164
1165 lock_taken = true;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001166
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001167 rb_inc_page(cpu_buffer, &next_page);
1168
Steven Rostedtd7690412008-10-01 00:29:53 -04001169 head_page = cpu_buffer->head_page;
1170 reader_page = cpu_buffer->reader_page;
1171
1172 /* we grabbed the lock before incrementing */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001173 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
Steven Rostedt45141d42009-02-12 13:19:48 -05001174 goto out_reset;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001175
1176 /*
1177 * If for some reason, we had an interrupt storm that made
1178 * it all the way around the buffer, bail, and warn
1179 * about it.
1180 */
Steven Rostedt98db8df2008-12-23 11:32:25 -05001181 if (unlikely(next_page == commit_page)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001182 WARN_ON_ONCE(1);
Steven Rostedt45141d42009-02-12 13:19:48 -05001183 goto out_reset;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001184 }
Steven Rostedtd7690412008-10-01 00:29:53 -04001185
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001186 if (next_page == head_page) {
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001187 if (!(buffer->flags & RB_FL_OVERWRITE))
Steven Rostedt45141d42009-02-12 13:19:48 -05001188 goto out_reset;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001189
Steven Rostedtbf41a152008-10-04 02:00:59 -04001190 /* tail_page has not moved yet? */
1191 if (tail_page == cpu_buffer->tail_page) {
1192 /* count overflows */
1193 rb_update_overflow(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001194
Steven Rostedtbf41a152008-10-04 02:00:59 -04001195 rb_inc_page(cpu_buffer, &head_page);
1196 cpu_buffer->head_page = head_page;
1197 cpu_buffer->head_page->read = 0;
1198 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001199 }
1200
Steven Rostedtbf41a152008-10-04 02:00:59 -04001201 /*
1202 * If the tail page is still the same as what we think
1203 * it is, then it is up to us to update the tail
1204 * pointer.
1205 */
1206 if (tail_page == cpu_buffer->tail_page) {
1207 local_set(&next_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001208 local_set(&next_page->page->commit, 0);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001209 cpu_buffer->tail_page = next_page;
1210
1211 /* reread the time stamp */
Steven Rostedt37886f62009-03-17 17:22:06 -04001212 *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001213 cpu_buffer->tail_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001214 }
1215
1216 /*
1217 * The actual tail page has moved forward.
1218 */
1219 if (tail < BUF_PAGE_SIZE) {
1220 /* Mark the rest of the page with padding */
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001221 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001222 event->type = RINGBUF_TYPE_PADDING;
1223 }
1224
Steven Rostedtbf41a152008-10-04 02:00:59 -04001225 if (tail <= BUF_PAGE_SIZE)
1226 /* Set the write back to the previous setting */
1227 local_set(&tail_page->write, tail);
1228
1229 /*
1230 * If this was a commit entry that failed,
1231 * increment that too
1232 */
1233 if (tail_page == cpu_buffer->commit_page &&
1234 tail == rb_commit_index(cpu_buffer)) {
1235 rb_set_commit_to_write(cpu_buffer);
1236 }
1237
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001238 __raw_spin_unlock(&cpu_buffer->lock);
1239 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001240
1241 /* fail and let the caller try again */
1242 return ERR_PTR(-EAGAIN);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001243 }
1244
Steven Rostedtbf41a152008-10-04 02:00:59 -04001245 /* We reserved something on the buffer */
1246
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001247 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1248 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001249
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001250 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001251 rb_update_event(event, type, length);
1252
Steven Rostedtbf41a152008-10-04 02:00:59 -04001253 /*
1254 * If this is a commit and the tail is zero, then update
1255 * this page's time stamp.
1256 */
1257 if (!tail && rb_is_commit(cpu_buffer, event))
Steven Rostedtabc9b562008-12-02 15:34:06 -05001258 cpu_buffer->commit_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001259
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001260 return event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001261
Steven Rostedt45141d42009-02-12 13:19:48 -05001262 out_reset:
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001263 /* reset write */
1264 if (tail <= BUF_PAGE_SIZE)
1265 local_set(&tail_page->write, tail);
1266
Steven Rostedt78d904b2009-02-05 18:43:07 -05001267 if (likely(lock_taken))
1268 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001269 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001270 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001271}
1272
1273static int
1274rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1275 u64 *ts, u64 *delta)
1276{
1277 struct ring_buffer_event *event;
1278 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001279 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001280
1281 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1282 printk(KERN_WARNING "Delta way too big! %llu"
1283 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +11001284 (unsigned long long)*delta,
1285 (unsigned long long)*ts,
1286 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001287 WARN_ON(1);
1288 }
1289
1290 /*
1291 * The delta is too big, we to add a
1292 * new timestamp.
1293 */
1294 event = __rb_reserve_next(cpu_buffer,
1295 RINGBUF_TYPE_TIME_EXTEND,
1296 RB_LEN_TIME_EXTEND,
1297 ts);
1298 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001299 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001300
Steven Rostedtbf41a152008-10-04 02:00:59 -04001301 if (PTR_ERR(event) == -EAGAIN)
1302 return -EAGAIN;
1303
1304 /* Only a commited time event can update the write stamp */
1305 if (rb_is_commit(cpu_buffer, event)) {
1306 /*
1307 * If this is the first on the page, then we need to
1308 * update the page itself, and just put in a zero.
1309 */
1310 if (rb_event_index(event)) {
1311 event->time_delta = *delta & TS_MASK;
1312 event->array[0] = *delta >> TS_SHIFT;
1313 } else {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001314 cpu_buffer->commit_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001315 event->time_delta = 0;
1316 event->array[0] = 0;
1317 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001318 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001319 /* let the caller know this was the commit */
1320 ret = 1;
1321 } else {
1322 /* Darn, this is just wasted space */
1323 event->time_delta = 0;
1324 event->array[0] = 0;
1325 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001326 }
1327
Steven Rostedtbf41a152008-10-04 02:00:59 -04001328 *delta = 0;
1329
1330 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001331}
1332
1333static struct ring_buffer_event *
1334rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1335 unsigned type, unsigned long length)
1336{
1337 struct ring_buffer_event *event;
1338 u64 ts, delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001339 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001340 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001341
Steven Rostedtbf41a152008-10-04 02:00:59 -04001342 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001343 /*
1344 * We allow for interrupts to reenter here and do a trace.
1345 * If one does, it will cause this original code to loop
1346 * back here. Even with heavy interrupts happening, this
1347 * should only happen a few times in a row. If this happens
1348 * 1000 times in a row, there must be either an interrupt
1349 * storm or we have something buggy.
1350 * Bail!
1351 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001352 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001353 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001354
Steven Rostedt37886f62009-03-17 17:22:06 -04001355 ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001356
Steven Rostedtbf41a152008-10-04 02:00:59 -04001357 /*
1358 * Only the first commit can update the timestamp.
1359 * Yes there is a race here. If an interrupt comes in
1360 * just after the conditional and it traces too, then it
1361 * will also check the deltas. More than one timestamp may
1362 * also be made. But only the entry that did the actual
1363 * commit will be something other than zero.
1364 */
1365 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1366 rb_page_write(cpu_buffer->tail_page) ==
1367 rb_commit_index(cpu_buffer)) {
1368
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001369 delta = ts - cpu_buffer->write_stamp;
1370
Steven Rostedtbf41a152008-10-04 02:00:59 -04001371 /* make sure this delta is calculated here */
1372 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001373
Steven Rostedtbf41a152008-10-04 02:00:59 -04001374 /* Did the write stamp get updated already? */
1375 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt4143c5c2008-11-10 21:46:01 -05001376 delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001377
1378 if (test_time_stamp(delta)) {
1379
1380 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1381
1382 if (commit == -EBUSY)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001383 return NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001384
1385 if (commit == -EAGAIN)
1386 goto again;
1387
1388 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001389 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001390 } else
1391 /* Non commits have zero deltas */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001392 delta = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001393
1394 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001395 if (PTR_ERR(event) == -EAGAIN)
1396 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001397
Steven Rostedtbf41a152008-10-04 02:00:59 -04001398 if (!event) {
1399 if (unlikely(commit))
1400 /*
1401 * Ouch! We needed a timestamp and it was commited. But
1402 * we didn't get our event reserved.
1403 */
1404 rb_set_commit_to_write(cpu_buffer);
1405 return NULL;
1406 }
1407
1408 /*
1409 * If the timestamp was commited, make the commit our entry
1410 * now so that we will update it when needed.
1411 */
1412 if (commit)
1413 rb_set_commit_event(cpu_buffer, event);
1414 else if (!rb_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001415 delta = 0;
1416
1417 event->time_delta = delta;
1418
1419 return event;
1420}
1421
Steven Rostedtbf41a152008-10-04 02:00:59 -04001422static DEFINE_PER_CPU(int, rb_need_resched);
1423
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001424/**
1425 * ring_buffer_lock_reserve - reserve a part of the buffer
1426 * @buffer: the ring buffer to reserve from
1427 * @length: the length of the data to reserve (excluding event header)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001428 *
1429 * Returns a reseverd event on the ring buffer to copy directly to.
1430 * The user of this interface will need to get the body to write into
1431 * and can use the ring_buffer_event_data() interface.
1432 *
1433 * The length is the length of the data needed, not the event length
1434 * which also includes the event header.
1435 *
1436 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1437 * If NULL is returned, then nothing has been allocated or locked.
1438 */
1439struct ring_buffer_event *
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001440ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001441{
1442 struct ring_buffer_per_cpu *cpu_buffer;
1443 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001444 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001445
Steven Rostedt033601a2008-11-21 12:41:55 -05001446 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001447 return NULL;
1448
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001449 if (atomic_read(&buffer->record_disabled))
1450 return NULL;
1451
Steven Rostedtbf41a152008-10-04 02:00:59 -04001452 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001453 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001454
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001455 cpu = raw_smp_processor_id();
1456
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301457 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001458 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001459
1460 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001461
1462 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001463 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001464
1465 length = rb_calculate_event_length(length);
1466 if (length > BUF_PAGE_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001467 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001468
1469 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1470 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001471 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001472
Steven Rostedtbf41a152008-10-04 02:00:59 -04001473 /*
1474 * Need to store resched state on this cpu.
1475 * Only the first needs to.
1476 */
1477
1478 if (preempt_count() == 1)
1479 per_cpu(rb_need_resched, cpu) = resched;
1480
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001481 return event;
1482
Steven Rostedtd7690412008-10-01 00:29:53 -04001483 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001484 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001485 return NULL;
1486}
Robert Richterc4f50182008-12-11 16:49:22 +01001487EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001488
1489static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1490 struct ring_buffer_event *event)
1491{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001492 cpu_buffer->entries++;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001493
1494 /* Only process further if we own the commit */
1495 if (!rb_is_commit(cpu_buffer, event))
1496 return;
1497
1498 cpu_buffer->write_stamp += event->time_delta;
1499
1500 rb_set_commit_to_write(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001501}
1502
1503/**
1504 * ring_buffer_unlock_commit - commit a reserved
1505 * @buffer: The buffer to commit to
1506 * @event: The event pointer to commit.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001507 *
1508 * This commits the data to the ring buffer, and releases any locks held.
1509 *
1510 * Must be paired with ring_buffer_lock_reserve.
1511 */
1512int ring_buffer_unlock_commit(struct ring_buffer *buffer,
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001513 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001514{
1515 struct ring_buffer_per_cpu *cpu_buffer;
1516 int cpu = raw_smp_processor_id();
1517
1518 cpu_buffer = buffer->buffers[cpu];
1519
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001520 rb_commit(cpu_buffer, event);
1521
Steven Rostedtbf41a152008-10-04 02:00:59 -04001522 /*
1523 * Only the last preempt count needs to restore preemption.
1524 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001525 if (preempt_count() == 1)
1526 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1527 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04001528 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001529
1530 return 0;
1531}
Robert Richterc4f50182008-12-11 16:49:22 +01001532EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001533
1534/**
1535 * ring_buffer_write - write data to the buffer without reserving
1536 * @buffer: The ring buffer to write to.
1537 * @length: The length of the data being written (excluding the event header)
1538 * @data: The data to write to the buffer.
1539 *
1540 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1541 * one function. If you already have the data to write to the buffer, it
1542 * may be easier to simply call this function.
1543 *
1544 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1545 * and not the length of the event which would hold the header.
1546 */
1547int ring_buffer_write(struct ring_buffer *buffer,
1548 unsigned long length,
1549 void *data)
1550{
1551 struct ring_buffer_per_cpu *cpu_buffer;
1552 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001553 unsigned long event_length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001554 void *body;
1555 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001556 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001557
Steven Rostedt033601a2008-11-21 12:41:55 -05001558 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001559 return -EBUSY;
1560
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001561 if (atomic_read(&buffer->record_disabled))
1562 return -EBUSY;
1563
Steven Rostedt182e9f52008-11-03 23:15:56 -05001564 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001565
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001566 cpu = raw_smp_processor_id();
1567
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301568 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001569 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001570
1571 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001572
1573 if (atomic_read(&cpu_buffer->record_disabled))
1574 goto out;
1575
1576 event_length = rb_calculate_event_length(length);
1577 event = rb_reserve_next_event(cpu_buffer,
1578 RINGBUF_TYPE_DATA, event_length);
1579 if (!event)
1580 goto out;
1581
1582 body = rb_event_data(event);
1583
1584 memcpy(body, data, length);
1585
1586 rb_commit(cpu_buffer, event);
1587
1588 ret = 0;
1589 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001590 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001591
1592 return ret;
1593}
Robert Richterc4f50182008-12-11 16:49:22 +01001594EXPORT_SYMBOL_GPL(ring_buffer_write);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001595
Andrew Morton34a148b2009-01-09 12:27:09 -08001596static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001597{
1598 struct buffer_page *reader = cpu_buffer->reader_page;
1599 struct buffer_page *head = cpu_buffer->head_page;
1600 struct buffer_page *commit = cpu_buffer->commit_page;
1601
1602 return reader->read == rb_page_commit(reader) &&
1603 (commit == reader ||
1604 (commit == head &&
1605 head->read == rb_page_commit(commit)));
1606}
1607
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001608/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001609 * ring_buffer_record_disable - stop all writes into the buffer
1610 * @buffer: The ring buffer to stop writes to.
1611 *
1612 * This prevents all writes to the buffer. Any attempt to write
1613 * to the buffer after this will fail and return NULL.
1614 *
1615 * The caller should call synchronize_sched() after this.
1616 */
1617void ring_buffer_record_disable(struct ring_buffer *buffer)
1618{
1619 atomic_inc(&buffer->record_disabled);
1620}
Robert Richterc4f50182008-12-11 16:49:22 +01001621EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001622
1623/**
1624 * ring_buffer_record_enable - enable writes to the buffer
1625 * @buffer: The ring buffer to enable writes
1626 *
1627 * Note, multiple disables will need the same number of enables
1628 * to truely enable the writing (much like preempt_disable).
1629 */
1630void ring_buffer_record_enable(struct ring_buffer *buffer)
1631{
1632 atomic_dec(&buffer->record_disabled);
1633}
Robert Richterc4f50182008-12-11 16:49:22 +01001634EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001635
1636/**
1637 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1638 * @buffer: The ring buffer to stop writes to.
1639 * @cpu: The CPU buffer to stop
1640 *
1641 * This prevents all writes to the buffer. Any attempt to write
1642 * to the buffer after this will fail and return NULL.
1643 *
1644 * The caller should call synchronize_sched() after this.
1645 */
1646void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1647{
1648 struct ring_buffer_per_cpu *cpu_buffer;
1649
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301650 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001651 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001652
1653 cpu_buffer = buffer->buffers[cpu];
1654 atomic_inc(&cpu_buffer->record_disabled);
1655}
Robert Richterc4f50182008-12-11 16:49:22 +01001656EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001657
1658/**
1659 * ring_buffer_record_enable_cpu - enable writes to the buffer
1660 * @buffer: The ring buffer to enable writes
1661 * @cpu: The CPU to enable.
1662 *
1663 * Note, multiple disables will need the same number of enables
1664 * to truely enable the writing (much like preempt_disable).
1665 */
1666void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1667{
1668 struct ring_buffer_per_cpu *cpu_buffer;
1669
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301670 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001671 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001672
1673 cpu_buffer = buffer->buffers[cpu];
1674 atomic_dec(&cpu_buffer->record_disabled);
1675}
Robert Richterc4f50182008-12-11 16:49:22 +01001676EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001677
1678/**
1679 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1680 * @buffer: The ring buffer
1681 * @cpu: The per CPU buffer to get the entries from.
1682 */
1683unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1684{
1685 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001686 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001687
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301688 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001689 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001690
1691 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04001692 ret = cpu_buffer->entries;
Steven Rostedt554f7862009-03-11 22:00:13 -04001693
1694 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001695}
Robert Richterc4f50182008-12-11 16:49:22 +01001696EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001697
1698/**
1699 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1700 * @buffer: The ring buffer
1701 * @cpu: The per CPU buffer to get the number of overruns from
1702 */
1703unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1704{
1705 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001706 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001707
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301708 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001709 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001710
1711 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04001712 ret = cpu_buffer->overrun;
Steven Rostedt554f7862009-03-11 22:00:13 -04001713
1714 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001715}
Robert Richterc4f50182008-12-11 16:49:22 +01001716EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001717
1718/**
1719 * ring_buffer_entries - get the number of entries in a buffer
1720 * @buffer: The ring buffer
1721 *
1722 * Returns the total number of entries in the ring buffer
1723 * (all CPU entries)
1724 */
1725unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1726{
1727 struct ring_buffer_per_cpu *cpu_buffer;
1728 unsigned long entries = 0;
1729 int cpu;
1730
1731 /* if you care about this being correct, lock the buffer */
1732 for_each_buffer_cpu(buffer, cpu) {
1733 cpu_buffer = buffer->buffers[cpu];
1734 entries += cpu_buffer->entries;
1735 }
1736
1737 return entries;
1738}
Robert Richterc4f50182008-12-11 16:49:22 +01001739EXPORT_SYMBOL_GPL(ring_buffer_entries);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001740
1741/**
1742 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1743 * @buffer: The ring buffer
1744 *
1745 * Returns the total number of overruns in the ring buffer
1746 * (all CPU entries)
1747 */
1748unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1749{
1750 struct ring_buffer_per_cpu *cpu_buffer;
1751 unsigned long overruns = 0;
1752 int cpu;
1753
1754 /* if you care about this being correct, lock the buffer */
1755 for_each_buffer_cpu(buffer, cpu) {
1756 cpu_buffer = buffer->buffers[cpu];
1757 overruns += cpu_buffer->overrun;
1758 }
1759
1760 return overruns;
1761}
Robert Richterc4f50182008-12-11 16:49:22 +01001762EXPORT_SYMBOL_GPL(ring_buffer_overruns);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001763
Steven Rostedt642edba2008-11-12 00:01:26 -05001764static void rb_iter_reset(struct ring_buffer_iter *iter)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001765{
1766 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1767
Steven Rostedtd7690412008-10-01 00:29:53 -04001768 /* Iterator usage is expected to have record disabled */
1769 if (list_empty(&cpu_buffer->reader_page->list)) {
1770 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001771 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001772 } else {
1773 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001774 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001775 }
1776 if (iter->head)
1777 iter->read_stamp = cpu_buffer->read_stamp;
1778 else
Steven Rostedtabc9b562008-12-02 15:34:06 -05001779 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt642edba2008-11-12 00:01:26 -05001780}
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001781
Steven Rostedt642edba2008-11-12 00:01:26 -05001782/**
1783 * ring_buffer_iter_reset - reset an iterator
1784 * @iter: The iterator to reset
1785 *
1786 * Resets the iterator, so that it will start from the beginning
1787 * again.
1788 */
1789void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1790{
Steven Rostedt554f7862009-03-11 22:00:13 -04001791 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt642edba2008-11-12 00:01:26 -05001792 unsigned long flags;
1793
Steven Rostedt554f7862009-03-11 22:00:13 -04001794 if (!iter)
1795 return;
1796
1797 cpu_buffer = iter->cpu_buffer;
1798
Steven Rostedt642edba2008-11-12 00:01:26 -05001799 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1800 rb_iter_reset(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001801 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001802}
Robert Richterc4f50182008-12-11 16:49:22 +01001803EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001804
1805/**
1806 * ring_buffer_iter_empty - check if an iterator has no more to read
1807 * @iter: The iterator to check
1808 */
1809int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1810{
1811 struct ring_buffer_per_cpu *cpu_buffer;
1812
1813 cpu_buffer = iter->cpu_buffer;
1814
Steven Rostedtbf41a152008-10-04 02:00:59 -04001815 return iter->head_page == cpu_buffer->commit_page &&
1816 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001817}
Robert Richterc4f50182008-12-11 16:49:22 +01001818EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001819
1820static void
1821rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1822 struct ring_buffer_event *event)
1823{
1824 u64 delta;
1825
1826 switch (event->type) {
1827 case RINGBUF_TYPE_PADDING:
1828 return;
1829
1830 case RINGBUF_TYPE_TIME_EXTEND:
1831 delta = event->array[0];
1832 delta <<= TS_SHIFT;
1833 delta += event->time_delta;
1834 cpu_buffer->read_stamp += delta;
1835 return;
1836
1837 case RINGBUF_TYPE_TIME_STAMP:
1838 /* FIXME: not implemented */
1839 return;
1840
1841 case RINGBUF_TYPE_DATA:
1842 cpu_buffer->read_stamp += event->time_delta;
1843 return;
1844
1845 default:
1846 BUG();
1847 }
1848 return;
1849}
1850
1851static void
1852rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1853 struct ring_buffer_event *event)
1854{
1855 u64 delta;
1856
1857 switch (event->type) {
1858 case RINGBUF_TYPE_PADDING:
1859 return;
1860
1861 case RINGBUF_TYPE_TIME_EXTEND:
1862 delta = event->array[0];
1863 delta <<= TS_SHIFT;
1864 delta += event->time_delta;
1865 iter->read_stamp += delta;
1866 return;
1867
1868 case RINGBUF_TYPE_TIME_STAMP:
1869 /* FIXME: not implemented */
1870 return;
1871
1872 case RINGBUF_TYPE_DATA:
1873 iter->read_stamp += event->time_delta;
1874 return;
1875
1876 default:
1877 BUG();
1878 }
1879 return;
1880}
1881
Steven Rostedtd7690412008-10-01 00:29:53 -04001882static struct buffer_page *
1883rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001884{
Steven Rostedtd7690412008-10-01 00:29:53 -04001885 struct buffer_page *reader = NULL;
1886 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001887 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001888
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001889 local_irq_save(flags);
1890 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04001891
1892 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001893 /*
1894 * This should normally only loop twice. But because the
1895 * start of the reader inserts an empty page, it causes
1896 * a case where we will loop three times. There should be no
1897 * reason to loop four times (that I know of).
1898 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001899 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001900 reader = NULL;
1901 goto out;
1902 }
1903
Steven Rostedtd7690412008-10-01 00:29:53 -04001904 reader = cpu_buffer->reader_page;
1905
1906 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001907 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04001908 goto out;
1909
1910 /* Never should we have an index greater than the size */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001911 if (RB_WARN_ON(cpu_buffer,
1912 cpu_buffer->reader_page->read > rb_page_size(reader)))
1913 goto out;
Steven Rostedtd7690412008-10-01 00:29:53 -04001914
1915 /* check if we caught up to the tail */
1916 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001917 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04001918 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001919
1920 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04001921 * Splice the empty reader page into the list around the head.
1922 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001923 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001924
Steven Rostedtd7690412008-10-01 00:29:53 -04001925 reader = cpu_buffer->head_page;
1926 cpu_buffer->reader_page->list.next = reader->list.next;
1927 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001928
1929 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001930 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04001931
1932 /* Make the reader page now replace the head */
1933 reader->list.prev->next = &cpu_buffer->reader_page->list;
1934 reader->list.next->prev = &cpu_buffer->reader_page->list;
1935
1936 /*
1937 * If the tail is on the reader, then we must set the head
1938 * to the inserted page, otherwise we set it one before.
1939 */
1940 cpu_buffer->head_page = cpu_buffer->reader_page;
1941
Steven Rostedtbf41a152008-10-04 02:00:59 -04001942 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04001943 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1944
1945 /* Finally update the reader page to the new head */
1946 cpu_buffer->reader_page = reader;
1947 rb_reset_reader_page(cpu_buffer);
1948
1949 goto again;
1950
1951 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001952 __raw_spin_unlock(&cpu_buffer->lock);
1953 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04001954
1955 return reader;
1956}
1957
1958static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1959{
1960 struct ring_buffer_event *event;
1961 struct buffer_page *reader;
1962 unsigned length;
1963
1964 reader = rb_get_reader_page(cpu_buffer);
1965
1966 /* This function should not be called when buffer is empty */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001967 if (RB_WARN_ON(cpu_buffer, !reader))
1968 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04001969
1970 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001971
1972 if (event->type == RINGBUF_TYPE_DATA)
1973 cpu_buffer->entries--;
1974
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001975 rb_update_read_stamp(cpu_buffer, event);
1976
Steven Rostedtd7690412008-10-01 00:29:53 -04001977 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001978 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001979}
1980
1981static void rb_advance_iter(struct ring_buffer_iter *iter)
1982{
1983 struct ring_buffer *buffer;
1984 struct ring_buffer_per_cpu *cpu_buffer;
1985 struct ring_buffer_event *event;
1986 unsigned length;
1987
1988 cpu_buffer = iter->cpu_buffer;
1989 buffer = cpu_buffer->buffer;
1990
1991 /*
1992 * Check if we are at the end of the buffer.
1993 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001994 if (iter->head >= rb_page_size(iter->head_page)) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001995 if (RB_WARN_ON(buffer,
1996 iter->head_page == cpu_buffer->commit_page))
1997 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04001998 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001999 return;
2000 }
2001
2002 event = rb_iter_head_event(iter);
2003
2004 length = rb_event_length(event);
2005
2006 /*
2007 * This should not be called to advance the header if we are
2008 * at the tail of the buffer.
2009 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002010 if (RB_WARN_ON(cpu_buffer,
Steven Rostedtf536aaf2008-11-10 23:07:30 -05002011 (iter->head_page == cpu_buffer->commit_page) &&
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002012 (iter->head + length > rb_commit_index(cpu_buffer))))
2013 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002014
2015 rb_update_iter_read_stamp(iter, event);
2016
2017 iter->head += length;
2018
2019 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002020 if ((iter->head >= rb_page_size(iter->head_page)) &&
2021 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002022 rb_advance_iter(iter);
2023}
2024
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002025static struct ring_buffer_event *
2026rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002027{
2028 struct ring_buffer_per_cpu *cpu_buffer;
2029 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04002030 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002031 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002032
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002033 cpu_buffer = buffer->buffers[cpu];
2034
2035 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002036 /*
2037 * We repeat when a timestamp is encountered. It is possible
2038 * to get multiple timestamps from an interrupt entering just
2039 * as one timestamp is about to be written. The max times
2040 * that this can happen is the number of nested interrupts we
2041 * can have. Nesting 10 deep of interrupts is clearly
2042 * an anomaly.
2043 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002044 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002045 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002046
Steven Rostedtd7690412008-10-01 00:29:53 -04002047 reader = rb_get_reader_page(cpu_buffer);
2048 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002049 return NULL;
2050
Steven Rostedtd7690412008-10-01 00:29:53 -04002051 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002052
2053 switch (event->type) {
2054 case RINGBUF_TYPE_PADDING:
Steven Rostedtbf41a152008-10-04 02:00:59 -04002055 RB_WARN_ON(cpu_buffer, 1);
Steven Rostedtd7690412008-10-01 00:29:53 -04002056 rb_advance_reader(cpu_buffer);
2057 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002058
2059 case RINGBUF_TYPE_TIME_EXTEND:
2060 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04002061 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002062 goto again;
2063
2064 case RINGBUF_TYPE_TIME_STAMP:
2065 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04002066 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002067 goto again;
2068
2069 case RINGBUF_TYPE_DATA:
2070 if (ts) {
2071 *ts = cpu_buffer->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002072 ring_buffer_normalize_time_stamp(buffer,
2073 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002074 }
2075 return event;
2076
2077 default:
2078 BUG();
2079 }
2080
2081 return NULL;
2082}
Robert Richterc4f50182008-12-11 16:49:22 +01002083EXPORT_SYMBOL_GPL(ring_buffer_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002084
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002085static struct ring_buffer_event *
2086rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002087{
2088 struct ring_buffer *buffer;
2089 struct ring_buffer_per_cpu *cpu_buffer;
2090 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002091 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002092
2093 if (ring_buffer_iter_empty(iter))
2094 return NULL;
2095
2096 cpu_buffer = iter->cpu_buffer;
2097 buffer = cpu_buffer->buffer;
2098
2099 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002100 /*
2101 * We repeat when a timestamp is encountered. It is possible
2102 * to get multiple timestamps from an interrupt entering just
2103 * as one timestamp is about to be written. The max times
2104 * that this can happen is the number of nested interrupts we
2105 * can have. Nesting 10 deep of interrupts is clearly
2106 * an anomaly.
2107 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002108 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002109 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002110
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002111 if (rb_per_cpu_empty(cpu_buffer))
2112 return NULL;
2113
2114 event = rb_iter_head_event(iter);
2115
2116 switch (event->type) {
2117 case RINGBUF_TYPE_PADDING:
Steven Rostedtd7690412008-10-01 00:29:53 -04002118 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002119 goto again;
2120
2121 case RINGBUF_TYPE_TIME_EXTEND:
2122 /* Internal data, OK to advance */
2123 rb_advance_iter(iter);
2124 goto again;
2125
2126 case RINGBUF_TYPE_TIME_STAMP:
2127 /* FIXME: not implemented */
2128 rb_advance_iter(iter);
2129 goto again;
2130
2131 case RINGBUF_TYPE_DATA:
2132 if (ts) {
2133 *ts = iter->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002134 ring_buffer_normalize_time_stamp(buffer,
2135 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002136 }
2137 return event;
2138
2139 default:
2140 BUG();
2141 }
2142
2143 return NULL;
2144}
Robert Richterc4f50182008-12-11 16:49:22 +01002145EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002146
2147/**
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002148 * ring_buffer_peek - peek at the next event to be read
2149 * @buffer: The ring buffer to read
2150 * @cpu: The cpu to peak at
2151 * @ts: The timestamp counter of this event.
2152 *
2153 * This will return the event that will be read next, but does
2154 * not consume the data.
2155 */
2156struct ring_buffer_event *
2157ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2158{
2159 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8aabee52009-03-12 13:13:49 -04002160 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002161 unsigned long flags;
2162
Steven Rostedt554f7862009-03-11 22:00:13 -04002163 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002164 return NULL;
Steven Rostedt554f7862009-03-11 22:00:13 -04002165
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002166 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2167 event = rb_buffer_peek(buffer, cpu, ts);
2168 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2169
2170 return event;
2171}
2172
2173/**
2174 * ring_buffer_iter_peek - peek at the next event to be read
2175 * @iter: The ring buffer iterator
2176 * @ts: The timestamp counter of this event.
2177 *
2178 * This will return the event that will be read next, but does
2179 * not increment the iterator.
2180 */
2181struct ring_buffer_event *
2182ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2183{
2184 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2185 struct ring_buffer_event *event;
2186 unsigned long flags;
2187
2188 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2189 event = rb_iter_peek(iter, ts);
2190 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2191
2192 return event;
2193}
2194
2195/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002196 * ring_buffer_consume - return an event and consume it
2197 * @buffer: The ring buffer to get the next event from
2198 *
2199 * Returns the next event in the ring buffer, and that event is consumed.
2200 * Meaning, that sequential reads will keep returning a different event,
2201 * and eventually empty the ring buffer if the producer is slower.
2202 */
2203struct ring_buffer_event *
2204ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2205{
Steven Rostedt554f7862009-03-11 22:00:13 -04002206 struct ring_buffer_per_cpu *cpu_buffer;
2207 struct ring_buffer_event *event = NULL;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002208 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002209
Steven Rostedt554f7862009-03-11 22:00:13 -04002210 /* might be called in atomic */
2211 preempt_disable();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002212
Steven Rostedt554f7862009-03-11 22:00:13 -04002213 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2214 goto out;
2215
2216 cpu_buffer = buffer->buffers[cpu];
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002217 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002218
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002219 event = rb_buffer_peek(buffer, cpu, ts);
2220 if (!event)
Steven Rostedt554f7862009-03-11 22:00:13 -04002221 goto out_unlock;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002222
Steven Rostedtd7690412008-10-01 00:29:53 -04002223 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002224
Steven Rostedt554f7862009-03-11 22:00:13 -04002225 out_unlock:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002226 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2227
Steven Rostedt554f7862009-03-11 22:00:13 -04002228 out:
2229 preempt_enable();
2230
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002231 return event;
2232}
Robert Richterc4f50182008-12-11 16:49:22 +01002233EXPORT_SYMBOL_GPL(ring_buffer_consume);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002234
2235/**
2236 * ring_buffer_read_start - start a non consuming read of the buffer
2237 * @buffer: The ring buffer to read from
2238 * @cpu: The cpu buffer to iterate over
2239 *
2240 * This starts up an iteration through the buffer. It also disables
2241 * the recording to the buffer until the reading is finished.
2242 * This prevents the reading from being corrupted. This is not
2243 * a consuming read, so a producer is not expected.
2244 *
2245 * Must be paired with ring_buffer_finish.
2246 */
2247struct ring_buffer_iter *
2248ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2249{
2250 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002251 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04002252 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002253
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302254 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002255 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002256
2257 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2258 if (!iter)
Steven Rostedt8aabee52009-03-12 13:13:49 -04002259 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002260
2261 cpu_buffer = buffer->buffers[cpu];
2262
2263 iter->cpu_buffer = cpu_buffer;
2264
2265 atomic_inc(&cpu_buffer->record_disabled);
2266 synchronize_sched();
2267
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002268 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002269 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt642edba2008-11-12 00:01:26 -05002270 rb_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002271 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002272 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002273
2274 return iter;
2275}
Robert Richterc4f50182008-12-11 16:49:22 +01002276EXPORT_SYMBOL_GPL(ring_buffer_read_start);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002277
2278/**
2279 * ring_buffer_finish - finish reading the iterator of the buffer
2280 * @iter: The iterator retrieved by ring_buffer_start
2281 *
2282 * This re-enables the recording to the buffer, and frees the
2283 * iterator.
2284 */
2285void
2286ring_buffer_read_finish(struct ring_buffer_iter *iter)
2287{
2288 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2289
2290 atomic_dec(&cpu_buffer->record_disabled);
2291 kfree(iter);
2292}
Robert Richterc4f50182008-12-11 16:49:22 +01002293EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002294
2295/**
2296 * ring_buffer_read - read the next item in the ring buffer by the iterator
2297 * @iter: The ring buffer iterator
2298 * @ts: The time stamp of the event read.
2299 *
2300 * This reads the next event in the ring buffer and increments the iterator.
2301 */
2302struct ring_buffer_event *
2303ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2304{
2305 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002306 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2307 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002308
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002309 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2310 event = rb_iter_peek(iter, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002311 if (!event)
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002312 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002313
2314 rb_advance_iter(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002315 out:
2316 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002317
2318 return event;
2319}
Robert Richterc4f50182008-12-11 16:49:22 +01002320EXPORT_SYMBOL_GPL(ring_buffer_read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002321
2322/**
2323 * ring_buffer_size - return the size of the ring buffer (in bytes)
2324 * @buffer: The ring buffer.
2325 */
2326unsigned long ring_buffer_size(struct ring_buffer *buffer)
2327{
2328 return BUF_PAGE_SIZE * buffer->pages;
2329}
Robert Richterc4f50182008-12-11 16:49:22 +01002330EXPORT_SYMBOL_GPL(ring_buffer_size);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002331
2332static void
2333rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2334{
2335 cpu_buffer->head_page
2336 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002337 local_set(&cpu_buffer->head_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002338 local_set(&cpu_buffer->head_page->page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002339
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002340 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002341
2342 cpu_buffer->tail_page = cpu_buffer->head_page;
2343 cpu_buffer->commit_page = cpu_buffer->head_page;
2344
2345 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2346 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002347 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002348 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002349
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002350 cpu_buffer->overrun = 0;
2351 cpu_buffer->entries = 0;
Steven Rostedt69507c02009-01-21 18:45:57 -05002352
2353 cpu_buffer->write_stamp = 0;
2354 cpu_buffer->read_stamp = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002355}
2356
2357/**
2358 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2359 * @buffer: The ring buffer to reset a per cpu buffer of
2360 * @cpu: The CPU buffer to be reset
2361 */
2362void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2363{
2364 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2365 unsigned long flags;
2366
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302367 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002368 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002369
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002370 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2371
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002372 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002373
2374 rb_reset_cpu(cpu_buffer);
2375
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002376 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002377
2378 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002379}
Robert Richterc4f50182008-12-11 16:49:22 +01002380EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002381
2382/**
2383 * ring_buffer_reset - reset a ring buffer
2384 * @buffer: The ring buffer to reset all cpu buffers
2385 */
2386void ring_buffer_reset(struct ring_buffer *buffer)
2387{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002388 int cpu;
2389
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002390 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002391 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002392}
Robert Richterc4f50182008-12-11 16:49:22 +01002393EXPORT_SYMBOL_GPL(ring_buffer_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002394
2395/**
2396 * rind_buffer_empty - is the ring buffer empty?
2397 * @buffer: The ring buffer to test
2398 */
2399int ring_buffer_empty(struct ring_buffer *buffer)
2400{
2401 struct ring_buffer_per_cpu *cpu_buffer;
2402 int cpu;
2403
2404 /* yes this is racy, but if you don't like the race, lock the buffer */
2405 for_each_buffer_cpu(buffer, cpu) {
2406 cpu_buffer = buffer->buffers[cpu];
2407 if (!rb_per_cpu_empty(cpu_buffer))
2408 return 0;
2409 }
Steven Rostedt554f7862009-03-11 22:00:13 -04002410
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002411 return 1;
2412}
Robert Richterc4f50182008-12-11 16:49:22 +01002413EXPORT_SYMBOL_GPL(ring_buffer_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002414
2415/**
2416 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2417 * @buffer: The ring buffer
2418 * @cpu: The CPU buffer to test
2419 */
2420int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2421{
2422 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002423 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002424
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302425 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002426 return 1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002427
2428 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04002429 ret = rb_per_cpu_empty(cpu_buffer);
2430
Steven Rostedt554f7862009-03-11 22:00:13 -04002431
2432 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002433}
Robert Richterc4f50182008-12-11 16:49:22 +01002434EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002435
2436/**
2437 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2438 * @buffer_a: One buffer to swap with
2439 * @buffer_b: The other buffer to swap with
2440 *
2441 * This function is useful for tracers that want to take a "snapshot"
2442 * of a CPU buffer and has another back up buffer lying around.
2443 * it is expected that the tracer handles the cpu buffer not being
2444 * used at the moment.
2445 */
2446int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2447 struct ring_buffer *buffer_b, int cpu)
2448{
2449 struct ring_buffer_per_cpu *cpu_buffer_a;
2450 struct ring_buffer_per_cpu *cpu_buffer_b;
Steven Rostedt554f7862009-03-11 22:00:13 -04002451 int ret = -EINVAL;
2452
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302453 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2454 !cpumask_test_cpu(cpu, buffer_b->cpumask))
Steven Rostedt554f7862009-03-11 22:00:13 -04002455 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002456
2457 /* At least make sure the two buffers are somewhat the same */
Lai Jiangshan6d102bc2008-12-17 17:48:23 +08002458 if (buffer_a->pages != buffer_b->pages)
Steven Rostedt554f7862009-03-11 22:00:13 -04002459 goto out;
2460
2461 ret = -EAGAIN;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002462
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002463 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedt554f7862009-03-11 22:00:13 -04002464 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002465
2466 if (atomic_read(&buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002467 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002468
2469 if (atomic_read(&buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002470 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002471
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002472 cpu_buffer_a = buffer_a->buffers[cpu];
2473 cpu_buffer_b = buffer_b->buffers[cpu];
2474
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002475 if (atomic_read(&cpu_buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002476 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002477
2478 if (atomic_read(&cpu_buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002479 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002480
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002481 /*
2482 * We can't do a synchronize_sched here because this
2483 * function can be called in atomic context.
2484 * Normally this will be called from the same CPU as cpu.
2485 * If not it's up to the caller to protect this.
2486 */
2487 atomic_inc(&cpu_buffer_a->record_disabled);
2488 atomic_inc(&cpu_buffer_b->record_disabled);
2489
2490 buffer_a->buffers[cpu] = cpu_buffer_b;
2491 buffer_b->buffers[cpu] = cpu_buffer_a;
2492
2493 cpu_buffer_b->buffer = buffer_a;
2494 cpu_buffer_a->buffer = buffer_b;
2495
2496 atomic_dec(&cpu_buffer_a->record_disabled);
2497 atomic_dec(&cpu_buffer_b->record_disabled);
2498
Steven Rostedt554f7862009-03-11 22:00:13 -04002499 ret = 0;
2500out:
Steven Rostedt554f7862009-03-11 22:00:13 -04002501 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002502}
Robert Richterc4f50182008-12-11 16:49:22 +01002503EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002504
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002505static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
Lai Jiangshan667d2412009-02-09 14:21:17 +08002506 struct buffer_data_page *bpage,
2507 unsigned int offset)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002508{
2509 struct ring_buffer_event *event;
2510 unsigned long head;
2511
2512 __raw_spin_lock(&cpu_buffer->lock);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002513 for (head = offset; head < local_read(&bpage->commit);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002514 head += rb_event_length(event)) {
2515
Steven Rostedt044fa782008-12-02 23:50:03 -05002516 event = __rb_data_page_index(bpage, head);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002517 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2518 return;
2519 /* Only count data entries */
2520 if (event->type != RINGBUF_TYPE_DATA)
2521 continue;
2522 cpu_buffer->entries--;
2523 }
2524 __raw_spin_unlock(&cpu_buffer->lock);
2525}
2526
2527/**
2528 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2529 * @buffer: the buffer to allocate for.
2530 *
2531 * This function is used in conjunction with ring_buffer_read_page.
2532 * When reading a full page from the ring buffer, these functions
2533 * can be used to speed up the process. The calling function should
2534 * allocate a few pages first with this function. Then when it
2535 * needs to get pages from the ring buffer, it passes the result
2536 * of this function into ring_buffer_read_page, which will swap
2537 * the page that was allocated, with the read page of the buffer.
2538 *
2539 * Returns:
2540 * The page allocated, or NULL on error.
2541 */
2542void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2543{
Steven Rostedt044fa782008-12-02 23:50:03 -05002544 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002545 unsigned long addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002546
2547 addr = __get_free_page(GFP_KERNEL);
2548 if (!addr)
2549 return NULL;
2550
Steven Rostedt044fa782008-12-02 23:50:03 -05002551 bpage = (void *)addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002552
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002553 rb_init_page(bpage);
2554
Steven Rostedt044fa782008-12-02 23:50:03 -05002555 return bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002556}
2557
2558/**
2559 * ring_buffer_free_read_page - free an allocated read page
2560 * @buffer: the buffer the page was allocate for
2561 * @data: the page to free
2562 *
2563 * Free a page allocated from ring_buffer_alloc_read_page.
2564 */
2565void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2566{
2567 free_page((unsigned long)data);
2568}
2569
2570/**
2571 * ring_buffer_read_page - extract a page from the ring buffer
2572 * @buffer: buffer to extract from
2573 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002574 * @len: amount to extract
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002575 * @cpu: the cpu of the buffer to extract
2576 * @full: should the extraction only happen when the page is full.
2577 *
2578 * This function will pull out a page from the ring buffer and consume it.
2579 * @data_page must be the address of the variable that was returned
2580 * from ring_buffer_alloc_read_page. This is because the page might be used
2581 * to swap with a page in the ring buffer.
2582 *
2583 * for example:
Lai Jiangshanb85fa012009-02-09 14:21:14 +08002584 * rpage = ring_buffer_alloc_read_page(buffer);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002585 * if (!rpage)
2586 * return error;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002587 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002588 * if (ret >= 0)
2589 * process_page(rpage, ret);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002590 *
2591 * When @full is set, the function will not return true unless
2592 * the writer is off the reader page.
2593 *
2594 * Note: it is up to the calling functions to handle sleeps and wakeups.
2595 * The ring buffer can be used anywhere in the kernel and can not
2596 * blindly call wake_up. The layer that uses the ring buffer must be
2597 * responsible for that.
2598 *
2599 * Returns:
Lai Jiangshan667d2412009-02-09 14:21:17 +08002600 * >=0 if data has been transferred, returns the offset of consumed data.
2601 * <0 if no data has been transferred.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002602 */
2603int ring_buffer_read_page(struct ring_buffer *buffer,
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002604 void **data_page, size_t len, int cpu, int full)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002605{
2606 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2607 struct ring_buffer_event *event;
Steven Rostedt044fa782008-12-02 23:50:03 -05002608 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002609 struct buffer_page *reader;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002610 unsigned long flags;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002611 unsigned int commit;
Lai Jiangshan667d2412009-02-09 14:21:17 +08002612 unsigned int read;
Steven Rostedt4f3640f2009-03-03 23:52:42 -05002613 u64 save_timestamp;
Lai Jiangshan667d2412009-02-09 14:21:17 +08002614 int ret = -1;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002615
Steven Rostedt554f7862009-03-11 22:00:13 -04002616 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2617 goto out;
2618
Steven Rostedt474d32b2009-03-03 19:51:40 -05002619 /*
2620 * If len is not big enough to hold the page header, then
2621 * we can not copy anything.
2622 */
2623 if (len <= BUF_PAGE_HDR_SIZE)
Steven Rostedt554f7862009-03-11 22:00:13 -04002624 goto out;
Steven Rostedt474d32b2009-03-03 19:51:40 -05002625
2626 len -= BUF_PAGE_HDR_SIZE;
2627
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002628 if (!data_page)
Steven Rostedt554f7862009-03-11 22:00:13 -04002629 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002630
Steven Rostedt044fa782008-12-02 23:50:03 -05002631 bpage = *data_page;
2632 if (!bpage)
Steven Rostedt554f7862009-03-11 22:00:13 -04002633 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002634
2635 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2636
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002637 reader = rb_get_reader_page(cpu_buffer);
2638 if (!reader)
Steven Rostedt554f7862009-03-11 22:00:13 -04002639 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002640
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002641 event = rb_reader_event(cpu_buffer);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002642
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002643 read = reader->read;
2644 commit = rb_page_commit(reader);
2645
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002646 /*
Steven Rostedt474d32b2009-03-03 19:51:40 -05002647 * If this page has been partially read or
2648 * if len is not big enough to read the rest of the page or
2649 * a writer is still on the page, then
2650 * we must copy the data from the page to the buffer.
2651 * Otherwise, we can simply swap the page with the one passed in.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002652 */
Steven Rostedt474d32b2009-03-03 19:51:40 -05002653 if (read || (len < (commit - read)) ||
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002654 cpu_buffer->reader_page == cpu_buffer->commit_page) {
Lai Jiangshan667d2412009-02-09 14:21:17 +08002655 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
Steven Rostedt474d32b2009-03-03 19:51:40 -05002656 unsigned int rpos = read;
2657 unsigned int pos = 0;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002658 unsigned int size;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002659
2660 if (full)
Steven Rostedt554f7862009-03-11 22:00:13 -04002661 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002662
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002663 if (len > (commit - read))
2664 len = (commit - read);
2665
2666 size = rb_event_length(event);
2667
2668 if (len < size)
Steven Rostedt554f7862009-03-11 22:00:13 -04002669 goto out_unlock;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002670
Steven Rostedt4f3640f2009-03-03 23:52:42 -05002671 /* save the current timestamp, since the user will need it */
2672 save_timestamp = cpu_buffer->read_stamp;
2673
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002674 /* Need to copy one event at a time */
2675 do {
Steven Rostedt474d32b2009-03-03 19:51:40 -05002676 memcpy(bpage->data + pos, rpage->data + rpos, size);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002677
2678 len -= size;
2679
2680 rb_advance_reader(cpu_buffer);
Steven Rostedt474d32b2009-03-03 19:51:40 -05002681 rpos = reader->read;
2682 pos += size;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002683
2684 event = rb_reader_event(cpu_buffer);
2685 size = rb_event_length(event);
2686 } while (len > size);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002687
2688 /* update bpage */
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002689 local_set(&bpage->commit, pos);
Steven Rostedt4f3640f2009-03-03 23:52:42 -05002690 bpage->time_stamp = save_timestamp;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002691
Steven Rostedt474d32b2009-03-03 19:51:40 -05002692 /* we copied everything to the beginning */
2693 read = 0;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002694 } else {
2695 /* swap the pages */
Steven Rostedt044fa782008-12-02 23:50:03 -05002696 rb_init_page(bpage);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002697 bpage = reader->page;
2698 reader->page = *data_page;
2699 local_set(&reader->write, 0);
2700 reader->read = 0;
Steven Rostedt044fa782008-12-02 23:50:03 -05002701 *data_page = bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002702
2703 /* update the entry counter */
2704 rb_remove_entries(cpu_buffer, bpage, read);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002705 }
Lai Jiangshan667d2412009-02-09 14:21:17 +08002706 ret = read;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002707
Steven Rostedt554f7862009-03-11 22:00:13 -04002708 out_unlock:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002709 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2710
Steven Rostedt554f7862009-03-11 22:00:13 -04002711 out:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002712 return ret;
2713}
2714
Steven Rostedta3583242008-11-11 15:01:42 -05002715static ssize_t
2716rb_simple_read(struct file *filp, char __user *ubuf,
2717 size_t cnt, loff_t *ppos)
2718{
Hannes Eder5e398412009-02-10 19:44:34 +01002719 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05002720 char buf[64];
2721 int r;
2722
Steven Rostedt033601a2008-11-21 12:41:55 -05002723 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2724 r = sprintf(buf, "permanently disabled\n");
2725 else
2726 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
Steven Rostedta3583242008-11-11 15:01:42 -05002727
2728 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2729}
2730
2731static ssize_t
2732rb_simple_write(struct file *filp, const char __user *ubuf,
2733 size_t cnt, loff_t *ppos)
2734{
Hannes Eder5e398412009-02-10 19:44:34 +01002735 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05002736 char buf[64];
Hannes Eder5e398412009-02-10 19:44:34 +01002737 unsigned long val;
Steven Rostedta3583242008-11-11 15:01:42 -05002738 int ret;
2739
2740 if (cnt >= sizeof(buf))
2741 return -EINVAL;
2742
2743 if (copy_from_user(&buf, ubuf, cnt))
2744 return -EFAULT;
2745
2746 buf[cnt] = 0;
2747
2748 ret = strict_strtoul(buf, 10, &val);
2749 if (ret < 0)
2750 return ret;
2751
Steven Rostedt033601a2008-11-21 12:41:55 -05002752 if (val)
2753 set_bit(RB_BUFFERS_ON_BIT, p);
2754 else
2755 clear_bit(RB_BUFFERS_ON_BIT, p);
Steven Rostedta3583242008-11-11 15:01:42 -05002756
2757 (*ppos)++;
2758
2759 return cnt;
2760}
2761
Steven Rostedt5e2336a02009-03-05 21:44:55 -05002762static const struct file_operations rb_simple_fops = {
Steven Rostedta3583242008-11-11 15:01:42 -05002763 .open = tracing_open_generic,
2764 .read = rb_simple_read,
2765 .write = rb_simple_write,
2766};
2767
2768
2769static __init int rb_init_debugfs(void)
2770{
2771 struct dentry *d_tracer;
2772 struct dentry *entry;
2773
2774 d_tracer = tracing_init_dentry();
2775
2776 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
Steven Rostedt033601a2008-11-21 12:41:55 -05002777 &ring_buffer_flags, &rb_simple_fops);
Steven Rostedta3583242008-11-11 15:01:42 -05002778 if (!entry)
2779 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2780
2781 return 0;
2782}
2783
2784fs_initcall(rb_init_debugfs);
Steven Rostedt554f7862009-03-11 22:00:13 -04002785
Steven Rostedt59222ef2009-03-12 11:46:03 -04002786#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -04002787static int __cpuinit rb_cpu_notify(struct notifier_block *self,
2788 unsigned long action, void *hcpu)
2789{
2790 struct ring_buffer *buffer =
2791 container_of(self, struct ring_buffer, cpu_notify);
2792 long cpu = (long)hcpu;
2793
2794 switch (action) {
2795 case CPU_UP_PREPARE:
2796 case CPU_UP_PREPARE_FROZEN:
2797 if (cpu_isset(cpu, *buffer->cpumask))
2798 return NOTIFY_OK;
2799
2800 buffer->buffers[cpu] =
2801 rb_allocate_cpu_buffer(buffer, cpu);
2802 if (!buffer->buffers[cpu]) {
2803 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
2804 cpu);
2805 return NOTIFY_OK;
2806 }
2807 smp_wmb();
2808 cpu_set(cpu, *buffer->cpumask);
2809 break;
2810 case CPU_DOWN_PREPARE:
2811 case CPU_DOWN_PREPARE_FROZEN:
2812 /*
2813 * Do nothing.
2814 * If we were to free the buffer, then the user would
2815 * lose any trace that was in the buffer.
2816 */
2817 break;
2818 default:
2819 break;
2820 }
2821 return NOTIFY_OK;
2822}
2823#endif