blob: 2e642b2b7253d11ddce40c657a471f007561be65 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
Ingo Molnar14131f22009-02-26 18:47:11 +01007#include <linux/trace_clock.h>
Steven Rostedt78d904b2009-02-05 18:43:07 -05008#include <linux/ftrace_irq.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04009#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
Steven Rostedta81bd802009-02-06 01:45:16 -050012#include <linux/hardirq.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040013#include <linux/module.h>
14#include <linux/percpu.h>
15#include <linux/mutex.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040016#include <linux/init.h>
17#include <linux/hash.h>
18#include <linux/list.h>
Steven Rostedt554f7862009-03-11 22:00:13 -040019#include <linux/cpu.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040020#include <linux/fs.h>
21
Steven Rostedt182e9f52008-11-03 23:15:56 -050022#include "trace.h"
23
Steven Rostedt033601a2008-11-21 12:41:55 -050024/*
Steven Rostedtd1b182a2009-04-15 16:53:47 -040025 * The ring buffer header is special. We must manually up keep it.
26 */
27int ring_buffer_print_entry_header(struct trace_seq *s)
28{
29 int ret;
30
Lai Jiangshan334d4162009-04-24 11:27:05 +080031 ret = trace_seq_printf(s, "# compressed entry header\n");
32 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
Steven Rostedtd1b182a2009-04-15 16:53:47 -040033 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
34 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
35 ret = trace_seq_printf(s, "\n");
36 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
37 RINGBUF_TYPE_PADDING);
38 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39 RINGBUF_TYPE_TIME_EXTEND);
Lai Jiangshan334d4162009-04-24 11:27:05 +080040 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
41 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
Steven Rostedtd1b182a2009-04-15 16:53:47 -040042
43 return ret;
44}
45
46/*
Steven Rostedt5cc98542009-03-12 22:24:17 -040047 * The ring buffer is made up of a list of pages. A separate list of pages is
48 * allocated for each CPU. A writer may only write to a buffer that is
49 * associated with the CPU it is currently executing on. A reader may read
50 * from any per cpu buffer.
51 *
52 * The reader is special. For each per cpu buffer, the reader has its own
53 * reader page. When a reader has read the entire reader page, this reader
54 * page is swapped with another page in the ring buffer.
55 *
56 * Now, as long as the writer is off the reader page, the reader can do what
57 * ever it wants with that page. The writer will never write to that page
58 * again (as long as it is out of the ring buffer).
59 *
60 * Here's some silly ASCII art.
61 *
62 * +------+
63 * |reader| RING BUFFER
64 * |page |
65 * +------+ +---+ +---+ +---+
66 * | |-->| |-->| |
67 * +---+ +---+ +---+
68 * ^ |
69 * | |
70 * +---------------+
71 *
72 *
73 * +------+
74 * |reader| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
77 * | |-->| |-->| |
78 * +---+ +---+ +---+
79 * ^ |
80 * | |
81 * +---------------+
82 *
83 *
84 * +------+
85 * |reader| RING BUFFER
86 * |page |------------------v
87 * +------+ +---+ +---+ +---+
88 * ^ | |-->| |-->| |
89 * | +---+ +---+ +---+
90 * | |
91 * | |
92 * +------------------------------+
93 *
94 *
95 * +------+
96 * |buffer| RING BUFFER
97 * |page |------------------v
98 * +------+ +---+ +---+ +---+
99 * ^ | | | |-->| |
100 * | New +---+ +---+ +---+
101 * | Reader------^ |
102 * | page |
103 * +------------------------------+
104 *
105 *
106 * After we make this swap, the reader can hand this page off to the splice
107 * code and be done with it. It can even allocate a new page if it needs to
108 * and swap that into the ring buffer.
109 *
110 * We will be using cmpxchg soon to make all this lockless.
111 *
112 */
113
114/*
Steven Rostedt033601a2008-11-21 12:41:55 -0500115 * A fast way to enable or disable all ring buffers is to
116 * call tracing_on or tracing_off. Turning off the ring buffers
117 * prevents all ring buffers from being recorded to.
118 * Turning this switch on, makes it OK to write to the
119 * ring buffer, if the ring buffer is enabled itself.
120 *
121 * There's three layers that must be on in order to write
122 * to the ring buffer.
123 *
124 * 1) This global flag must be set.
125 * 2) The ring buffer must be enabled for recording.
126 * 3) The per cpu buffer must be enabled for recording.
127 *
128 * In case of an anomaly, this global flag has a bit set that
129 * will permantly disable all ring buffers.
130 */
131
132/*
133 * Global flag to disable all recording to ring buffers
134 * This has two bits: ON, DISABLED
135 *
136 * ON DISABLED
137 * ---- ----------
138 * 0 0 : ring buffers are off
139 * 1 0 : ring buffers are on
140 * X 1 : ring buffers are permanently disabled
141 */
142
143enum {
144 RB_BUFFERS_ON_BIT = 0,
145 RB_BUFFERS_DISABLED_BIT = 1,
146};
147
148enum {
149 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
150 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
151};
152
Hannes Eder5e398412009-02-10 19:44:34 +0100153static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
Steven Rostedta3583242008-11-11 15:01:42 -0500154
Steven Rostedt474d32b2009-03-03 19:51:40 -0500155#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156
Steven Rostedta3583242008-11-11 15:01:42 -0500157/**
158 * tracing_on - enable all tracing buffers
159 *
160 * This function enables all tracing buffers that may have been
161 * disabled with tracing_off.
162 */
163void tracing_on(void)
164{
Steven Rostedt033601a2008-11-21 12:41:55 -0500165 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500166}
Robert Richterc4f50182008-12-11 16:49:22 +0100167EXPORT_SYMBOL_GPL(tracing_on);
Steven Rostedta3583242008-11-11 15:01:42 -0500168
169/**
170 * tracing_off - turn off all tracing buffers
171 *
172 * This function stops all tracing buffers from recording data.
173 * It does not disable any overhead the tracers themselves may
174 * be causing. This function simply causes all recording to
175 * the ring buffers to fail.
176 */
177void tracing_off(void)
178{
Steven Rostedt033601a2008-11-21 12:41:55 -0500179 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
180}
Robert Richterc4f50182008-12-11 16:49:22 +0100181EXPORT_SYMBOL_GPL(tracing_off);
Steven Rostedt033601a2008-11-21 12:41:55 -0500182
183/**
184 * tracing_off_permanent - permanently disable ring buffers
185 *
186 * This function, once called, will disable all ring buffers
Wenji Huangc3706f02009-02-10 01:03:18 -0500187 * permanently.
Steven Rostedt033601a2008-11-21 12:41:55 -0500188 */
189void tracing_off_permanent(void)
190{
191 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500192}
193
Steven Rostedt988ae9d2009-02-14 19:17:02 -0500194/**
195 * tracing_is_on - show state of ring buffers enabled
196 */
197int tracing_is_on(void)
198{
199 return ring_buffer_flags == RB_BUFFERS_ON;
200}
201EXPORT_SYMBOL_GPL(tracing_is_on);
202
Ingo Molnard06bbd62008-11-12 10:11:37 +0100203#include "trace.h"
204
Steven Rostedte3d6bf02009-03-03 13:53:07 -0500205#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
Andrew Morton67d34722009-01-09 12:27:09 -0800206#define RB_ALIGNMENT 4U
Lai Jiangshan334d4162009-04-24 11:27:05 +0800207#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208
209/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400211
212enum {
213 RB_LEN_TIME_EXTEND = 8,
214 RB_LEN_TIME_STAMP = 16,
215};
216
Tom Zanussi2d622712009-03-22 03:30:49 -0500217static inline int rb_null_event(struct ring_buffer_event *event)
218{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800219 return event->type_len == RINGBUF_TYPE_PADDING
220 && event->time_delta == 0;
Tom Zanussi2d622712009-03-22 03:30:49 -0500221}
222
223static inline int rb_discarded_event(struct ring_buffer_event *event)
224{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800225 return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
Tom Zanussi2d622712009-03-22 03:30:49 -0500226}
227
228static void rb_event_set_padding(struct ring_buffer_event *event)
229{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800230 event->type_len = RINGBUF_TYPE_PADDING;
Tom Zanussi2d622712009-03-22 03:30:49 -0500231 event->time_delta = 0;
232}
233
Tom Zanussi2d622712009-03-22 03:30:49 -0500234static unsigned
235rb_event_data_length(struct ring_buffer_event *event)
236{
237 unsigned length;
238
Lai Jiangshan334d4162009-04-24 11:27:05 +0800239 if (event->type_len)
240 length = event->type_len * RB_ALIGNMENT;
Tom Zanussi2d622712009-03-22 03:30:49 -0500241 else
242 length = event->array[0];
243 return length + RB_EVNT_HDR_SIZE;
244}
245
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400246/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800247static unsigned
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400248rb_event_length(struct ring_buffer_event *event)
249{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800250 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400251 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -0500252 if (rb_null_event(event))
253 /* undefined */
254 return -1;
Lai Jiangshan334d4162009-04-24 11:27:05 +0800255 return event->array[0] + RB_EVNT_HDR_SIZE;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400256
257 case RINGBUF_TYPE_TIME_EXTEND:
258 return RB_LEN_TIME_EXTEND;
259
260 case RINGBUF_TYPE_TIME_STAMP:
261 return RB_LEN_TIME_STAMP;
262
263 case RINGBUF_TYPE_DATA:
Tom Zanussi2d622712009-03-22 03:30:49 -0500264 return rb_event_data_length(event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400265 default:
266 BUG();
267 }
268 /* not hit */
269 return 0;
270}
271
272/**
273 * ring_buffer_event_length - return the length of the event
274 * @event: the event to get the length of
275 */
276unsigned ring_buffer_event_length(struct ring_buffer_event *event)
277{
Robert Richter465634a2009-01-07 15:32:11 +0100278 unsigned length = rb_event_length(event);
Lai Jiangshan334d4162009-04-24 11:27:05 +0800279 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
Robert Richter465634a2009-01-07 15:32:11 +0100280 return length;
281 length -= RB_EVNT_HDR_SIZE;
282 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283 length -= sizeof(event->array[0]);
284 return length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400285}
Robert Richterc4f50182008-12-11 16:49:22 +0100286EXPORT_SYMBOL_GPL(ring_buffer_event_length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400287
288/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800289static void *
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400290rb_event_data(struct ring_buffer_event *event)
291{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800292 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400293 /* If length is in len field, then array[0] has the data */
Lai Jiangshan334d4162009-04-24 11:27:05 +0800294 if (event->type_len)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400295 return (void *)&event->array[0];
296 /* Otherwise length is in array[0] and array[1] has the data */
297 return (void *)&event->array[1];
298}
299
300/**
301 * ring_buffer_event_data - return the data of the event
302 * @event: the event to get the data from
303 */
304void *ring_buffer_event_data(struct ring_buffer_event *event)
305{
306 return rb_event_data(event);
307}
Robert Richterc4f50182008-12-11 16:49:22 +0100308EXPORT_SYMBOL_GPL(ring_buffer_event_data);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400309
310#define for_each_buffer_cpu(buffer, cpu) \
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030311 for_each_cpu(cpu, buffer->cpumask)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400312
313#define TS_SHIFT 27
314#define TS_MASK ((1ULL << TS_SHIFT) - 1)
315#define TS_DELTA_TEST (~TS_MASK)
316
Steven Rostedtabc9b562008-12-02 15:34:06 -0500317struct buffer_data_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400318 u64 time_stamp; /* page time stamp */
Wenji Huangc3706f02009-02-10 01:03:18 -0500319 local_t commit; /* write committed index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500320 unsigned char data[]; /* data of buffer page */
321};
322
323struct buffer_page {
Steven Rostedt778c55d2009-05-01 18:44:45 -0400324 struct list_head list; /* list of buffer pages */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500325 local_t write; /* index for next write */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400326 unsigned read; /* index for next read */
Steven Rostedt778c55d2009-05-01 18:44:45 -0400327 local_t entries; /* entries on this page */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500328 struct buffer_data_page *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400329};
330
Steven Rostedt044fa782008-12-02 23:50:03 -0500331static void rb_init_page(struct buffer_data_page *bpage)
Steven Rostedtabc9b562008-12-02 15:34:06 -0500332{
Steven Rostedt044fa782008-12-02 23:50:03 -0500333 local_set(&bpage->commit, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500334}
335
Steven Rostedt474d32b2009-03-03 19:51:40 -0500336/**
337 * ring_buffer_page_len - the size of data on the page.
338 * @page: The page to read
339 *
340 * Returns the amount of data on the page, including buffer page header.
341 */
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500342size_t ring_buffer_page_len(void *page)
343{
Steven Rostedt474d32b2009-03-03 19:51:40 -0500344 return local_read(&((struct buffer_data_page *)page)->commit)
345 + BUF_PAGE_HDR_SIZE;
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500346}
347
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400348/*
Steven Rostedted568292008-09-29 23:02:40 -0400349 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
350 * this issue out.
351 */
Andrew Morton34a148b2009-01-09 12:27:09 -0800352static void free_buffer_page(struct buffer_page *bpage)
Steven Rostedted568292008-09-29 23:02:40 -0400353{
Andrew Morton34a148b2009-01-09 12:27:09 -0800354 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400355 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400356}
357
358/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400359 * We need to fit the time_stamp delta into 27 bits.
360 */
361static inline int test_time_stamp(u64 delta)
362{
363 if (delta & TS_DELTA_TEST)
364 return 1;
365 return 0;
366}
367
Steven Rostedt474d32b2009-03-03 19:51:40 -0500368#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400369
Steven Rostedtbe957c42009-05-11 14:42:53 -0400370/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
371#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
372
Steven Rostedtea05b572009-06-03 09:30:10 -0400373/* Max number of timestamps that can fit on a page */
374#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
375
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400376int ring_buffer_print_page_header(struct trace_seq *s)
377{
378 struct buffer_data_page field;
379 int ret;
380
381 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
382 "offset:0;\tsize:%u;\n",
383 (unsigned int)sizeof(field.time_stamp));
384
385 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
386 "offset:%u;\tsize:%u;\n",
387 (unsigned int)offsetof(typeof(field), commit),
388 (unsigned int)sizeof(field.commit));
389
390 ret = trace_seq_printf(s, "\tfield: char data;\t"
391 "offset:%u;\tsize:%u;\n",
392 (unsigned int)offsetof(typeof(field), data),
393 (unsigned int)BUF_PAGE_SIZE);
394
395 return ret;
396}
397
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400398/*
399 * head_page == tail_page && head == tail then buffer is empty.
400 */
401struct ring_buffer_per_cpu {
402 int cpu;
403 struct ring_buffer *buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100404 spinlock_t reader_lock; /* serialize readers */
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500405 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400406 struct lock_class_key lock_key;
407 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400408 struct buffer_page *head_page; /* read from head */
409 struct buffer_page *tail_page; /* write to tail */
Wenji Huangc3706f02009-02-10 01:03:18 -0500410 struct buffer_page *commit_page; /* committed pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400411 struct buffer_page *reader_page;
Steven Rostedtf0d2c682009-04-29 13:43:37 -0400412 unsigned long nmi_dropped;
413 unsigned long commit_overrun;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400414 unsigned long overrun;
Steven Rostedte4906ef2009-04-30 20:49:44 -0400415 unsigned long read;
416 local_t entries;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400417 u64 write_stamp;
418 u64 read_stamp;
419 atomic_t record_disabled;
420};
421
422struct ring_buffer {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400423 unsigned pages;
424 unsigned flags;
425 int cpus;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400426 atomic_t record_disabled;
Arnaldo Carvalho de Melo00f62f62009-02-09 17:04:06 -0200427 cpumask_var_t cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400428
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200429 struct lock_class_key *reader_lock_key;
430
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400431 struct mutex mutex;
432
433 struct ring_buffer_per_cpu **buffers;
Steven Rostedt554f7862009-03-11 22:00:13 -0400434
Steven Rostedt59222ef2009-03-12 11:46:03 -0400435#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400436 struct notifier_block cpu_notify;
437#endif
Steven Rostedt37886f62009-03-17 17:22:06 -0400438 u64 (*clock)(void);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400439};
440
441struct ring_buffer_iter {
442 struct ring_buffer_per_cpu *cpu_buffer;
443 unsigned long head;
444 struct buffer_page *head_page;
445 u64 read_stamp;
446};
447
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500448/* buffer may be either ring_buffer or ring_buffer_per_cpu */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400449#define RB_WARN_ON(buffer, cond) \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500450 ({ \
451 int _____ret = unlikely(cond); \
452 if (_____ret) { \
Steven Rostedtbf41a152008-10-04 02:00:59 -0400453 atomic_inc(&buffer->record_disabled); \
454 WARN_ON(1); \
455 } \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500456 _____ret; \
457 })
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500458
Steven Rostedt37886f62009-03-17 17:22:06 -0400459/* Up this if you want to test the TIME_EXTENTS and normalization */
460#define DEBUG_SHIFT 0
461
Steven Rostedt88eb0122009-05-11 16:28:23 -0400462static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
463{
464 /* shift to debug/test normalization and TIME_EXTENTS */
465 return buffer->clock() << DEBUG_SHIFT;
466}
467
Steven Rostedt37886f62009-03-17 17:22:06 -0400468u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
469{
470 u64 time;
471
472 preempt_disable_notrace();
Steven Rostedt88eb0122009-05-11 16:28:23 -0400473 time = rb_time_stamp(buffer, cpu);
Steven Rostedt37886f62009-03-17 17:22:06 -0400474 preempt_enable_no_resched_notrace();
475
476 return time;
477}
478EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
479
480void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
481 int cpu, u64 *ts)
482{
483 /* Just stupid testing the normalize function and deltas */
484 *ts >>= DEBUG_SHIFT;
485}
486EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
487
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400488/**
489 * check_pages - integrity check of buffer pages
490 * @cpu_buffer: CPU buffer with pages to test
491 *
Wenji Huangc3706f02009-02-10 01:03:18 -0500492 * As a safety measure we check to make sure the data pages have not
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400493 * been corrupted.
494 */
495static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
496{
497 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500498 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400499
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500500 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
501 return -1;
502 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
503 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400504
Steven Rostedt044fa782008-12-02 23:50:03 -0500505 list_for_each_entry_safe(bpage, tmp, head, list) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500506 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500507 bpage->list.next->prev != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500508 return -1;
509 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500510 bpage->list.prev->next != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500511 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400512 }
513
514 return 0;
515}
516
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400517static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
518 unsigned nr_pages)
519{
520 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500521 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400522 unsigned long addr;
523 LIST_HEAD(pages);
524 unsigned i;
525
526 for (i = 0; i < nr_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500527 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400528 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500529 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400530 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500531 list_add(&bpage->list, &pages);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400532
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400533 addr = __get_free_page(GFP_KERNEL);
534 if (!addr)
535 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500536 bpage->page = (void *)addr;
537 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400538 }
539
540 list_splice(&pages, head);
541
542 rb_check_pages(cpu_buffer);
543
544 return 0;
545
546 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500547 list_for_each_entry_safe(bpage, tmp, &pages, list) {
548 list_del_init(&bpage->list);
549 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400550 }
551 return -ENOMEM;
552}
553
554static struct ring_buffer_per_cpu *
555rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
556{
557 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt044fa782008-12-02 23:50:03 -0500558 struct buffer_page *bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400559 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400560 int ret;
561
562 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
563 GFP_KERNEL, cpu_to_node(cpu));
564 if (!cpu_buffer)
565 return NULL;
566
567 cpu_buffer->cpu = cpu;
568 cpu_buffer->buffer = buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100569 spin_lock_init(&cpu_buffer->reader_lock);
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200570 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500571 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400572 INIT_LIST_HEAD(&cpu_buffer->pages);
573
Steven Rostedt044fa782008-12-02 23:50:03 -0500574 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400575 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500576 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400577 goto fail_free_buffer;
578
Steven Rostedt044fa782008-12-02 23:50:03 -0500579 cpu_buffer->reader_page = bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400580 addr = __get_free_page(GFP_KERNEL);
581 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400582 goto fail_free_reader;
Steven Rostedt044fa782008-12-02 23:50:03 -0500583 bpage->page = (void *)addr;
584 rb_init_page(bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400585
Steven Rostedtd7690412008-10-01 00:29:53 -0400586 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400587
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400588 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
589 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400590 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400591
592 cpu_buffer->head_page
593 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400594 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400595
596 return cpu_buffer;
597
Steven Rostedtd7690412008-10-01 00:29:53 -0400598 fail_free_reader:
599 free_buffer_page(cpu_buffer->reader_page);
600
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400601 fail_free_buffer:
602 kfree(cpu_buffer);
603 return NULL;
604}
605
606static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
607{
608 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500609 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400610
Steven Rostedtd7690412008-10-01 00:29:53 -0400611 free_buffer_page(cpu_buffer->reader_page);
612
Steven Rostedt044fa782008-12-02 23:50:03 -0500613 list_for_each_entry_safe(bpage, tmp, head, list) {
614 list_del_init(&bpage->list);
615 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400616 }
617 kfree(cpu_buffer);
618}
619
Steven Rostedta7b13742008-09-29 23:02:39 -0400620/*
621 * Causes compile errors if the struct buffer_page gets bigger
622 * than the struct page.
623 */
624extern int ring_buffer_page_too_big(void);
625
Steven Rostedt59222ef2009-03-12 11:46:03 -0400626#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +0100627static int rb_cpu_notify(struct notifier_block *self,
628 unsigned long action, void *hcpu);
Steven Rostedt554f7862009-03-11 22:00:13 -0400629#endif
630
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400631/**
632 * ring_buffer_alloc - allocate a new ring_buffer
Robert Richter68814b52008-11-24 12:24:12 +0100633 * @size: the size in bytes per cpu that is needed.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400634 * @flags: attributes to set for the ring buffer.
635 *
636 * Currently the only flag that is available is the RB_FL_OVERWRITE
637 * flag. This flag means that the buffer will overwrite old data
638 * when the buffer wraps. If this flag is not set, the buffer will
639 * drop data when the tail hits the head.
640 */
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200641struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
642 struct lock_class_key *key)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400643{
644 struct ring_buffer *buffer;
645 int bsize;
646 int cpu;
647
Steven Rostedta7b13742008-09-29 23:02:39 -0400648 /* Paranoid! Optimizes out when all is well */
649 if (sizeof(struct buffer_page) > sizeof(struct page))
650 ring_buffer_page_too_big();
651
652
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400653 /* keep it in its own cache line */
654 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
655 GFP_KERNEL);
656 if (!buffer)
657 return NULL;
658
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030659 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
660 goto fail_free_buffer;
661
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400662 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
663 buffer->flags = flags;
Steven Rostedt37886f62009-03-17 17:22:06 -0400664 buffer->clock = trace_clock_local;
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200665 buffer->reader_lock_key = key;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400666
667 /* need at least two pages */
668 if (buffer->pages == 1)
669 buffer->pages++;
670
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100671 /*
672 * In case of non-hotplug cpu, if the ring-buffer is allocated
673 * in early initcall, it will not be notified of secondary cpus.
674 * In that off case, we need to allocate for all possible cpus.
675 */
676#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400677 get_online_cpus();
678 cpumask_copy(buffer->cpumask, cpu_online_mask);
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100679#else
680 cpumask_copy(buffer->cpumask, cpu_possible_mask);
681#endif
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400682 buffer->cpus = nr_cpu_ids;
683
684 bsize = sizeof(void *) * nr_cpu_ids;
685 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
686 GFP_KERNEL);
687 if (!buffer->buffers)
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030688 goto fail_free_cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400689
690 for_each_buffer_cpu(buffer, cpu) {
691 buffer->buffers[cpu] =
692 rb_allocate_cpu_buffer(buffer, cpu);
693 if (!buffer->buffers[cpu])
694 goto fail_free_buffers;
695 }
696
Steven Rostedt59222ef2009-03-12 11:46:03 -0400697#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400698 buffer->cpu_notify.notifier_call = rb_cpu_notify;
699 buffer->cpu_notify.priority = 0;
700 register_cpu_notifier(&buffer->cpu_notify);
701#endif
702
703 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400704 mutex_init(&buffer->mutex);
705
706 return buffer;
707
708 fail_free_buffers:
709 for_each_buffer_cpu(buffer, cpu) {
710 if (buffer->buffers[cpu])
711 rb_free_cpu_buffer(buffer->buffers[cpu]);
712 }
713 kfree(buffer->buffers);
714
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030715 fail_free_cpumask:
716 free_cpumask_var(buffer->cpumask);
Steven Rostedt554f7862009-03-11 22:00:13 -0400717 put_online_cpus();
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030718
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400719 fail_free_buffer:
720 kfree(buffer);
721 return NULL;
722}
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200723EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400724
725/**
726 * ring_buffer_free - free a ring buffer.
727 * @buffer: the buffer to free.
728 */
729void
730ring_buffer_free(struct ring_buffer *buffer)
731{
732 int cpu;
733
Steven Rostedt554f7862009-03-11 22:00:13 -0400734 get_online_cpus();
735
Steven Rostedt59222ef2009-03-12 11:46:03 -0400736#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400737 unregister_cpu_notifier(&buffer->cpu_notify);
738#endif
739
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400740 for_each_buffer_cpu(buffer, cpu)
741 rb_free_cpu_buffer(buffer->buffers[cpu]);
742
Steven Rostedt554f7862009-03-11 22:00:13 -0400743 put_online_cpus();
744
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030745 free_cpumask_var(buffer->cpumask);
746
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400747 kfree(buffer);
748}
Robert Richterc4f50182008-12-11 16:49:22 +0100749EXPORT_SYMBOL_GPL(ring_buffer_free);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400750
Steven Rostedt37886f62009-03-17 17:22:06 -0400751void ring_buffer_set_clock(struct ring_buffer *buffer,
752 u64 (*clock)(void))
753{
754 buffer->clock = clock;
755}
756
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400757static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
758
759static void
760rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
761{
Steven Rostedt044fa782008-12-02 23:50:03 -0500762 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400763 struct list_head *p;
764 unsigned i;
765
766 atomic_inc(&cpu_buffer->record_disabled);
767 synchronize_sched();
768
769 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500770 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
771 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400772 p = cpu_buffer->pages.next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500773 bpage = list_entry(p, struct buffer_page, list);
774 list_del_init(&bpage->list);
775 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400776 }
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500777 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
778 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400779
780 rb_reset_cpu(cpu_buffer);
781
782 rb_check_pages(cpu_buffer);
783
784 atomic_dec(&cpu_buffer->record_disabled);
785
786}
787
788static void
789rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
790 struct list_head *pages, unsigned nr_pages)
791{
Steven Rostedt044fa782008-12-02 23:50:03 -0500792 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400793 struct list_head *p;
794 unsigned i;
795
796 atomic_inc(&cpu_buffer->record_disabled);
797 synchronize_sched();
798
799 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500800 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
801 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400802 p = pages->next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500803 bpage = list_entry(p, struct buffer_page, list);
804 list_del_init(&bpage->list);
805 list_add_tail(&bpage->list, &cpu_buffer->pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400806 }
807 rb_reset_cpu(cpu_buffer);
808
809 rb_check_pages(cpu_buffer);
810
811 atomic_dec(&cpu_buffer->record_disabled);
812}
813
814/**
815 * ring_buffer_resize - resize the ring buffer
816 * @buffer: the buffer to resize.
817 * @size: the new size.
818 *
819 * The tracer is responsible for making sure that the buffer is
820 * not being used while changing the size.
821 * Note: We may be able to change the above requirement by using
822 * RCU synchronizations.
823 *
824 * Minimum size is 2 * BUF_PAGE_SIZE.
825 *
826 * Returns -1 on failure.
827 */
828int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
829{
830 struct ring_buffer_per_cpu *cpu_buffer;
831 unsigned nr_pages, rm_pages, new_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500832 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400833 unsigned long buffer_size;
834 unsigned long addr;
835 LIST_HEAD(pages);
836 int i, cpu;
837
Ingo Molnaree51a1d2008-11-13 14:58:31 +0100838 /*
839 * Always succeed at resizing a non-existent buffer:
840 */
841 if (!buffer)
842 return size;
843
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400844 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
845 size *= BUF_PAGE_SIZE;
846 buffer_size = buffer->pages * BUF_PAGE_SIZE;
847
848 /* we need a minimum of two pages */
849 if (size < BUF_PAGE_SIZE * 2)
850 size = BUF_PAGE_SIZE * 2;
851
852 if (size == buffer_size)
853 return size;
854
855 mutex_lock(&buffer->mutex);
Steven Rostedt554f7862009-03-11 22:00:13 -0400856 get_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400857
858 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
859
860 if (size < buffer_size) {
861
862 /* easy case, just free pages */
Steven Rostedt554f7862009-03-11 22:00:13 -0400863 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
864 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400865
866 rm_pages = buffer->pages - nr_pages;
867
868 for_each_buffer_cpu(buffer, cpu) {
869 cpu_buffer = buffer->buffers[cpu];
870 rb_remove_pages(cpu_buffer, rm_pages);
871 }
872 goto out;
873 }
874
875 /*
876 * This is a bit more difficult. We only want to add pages
877 * when we can allocate enough for all CPUs. We do this
878 * by allocating all the pages and storing them on a local
879 * link list. If we succeed in our allocation, then we
880 * add these pages to the cpu_buffers. Otherwise we just free
881 * them all and return -ENOMEM;
882 */
Steven Rostedt554f7862009-03-11 22:00:13 -0400883 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
884 goto out_fail;
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500885
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400886 new_pages = nr_pages - buffer->pages;
887
888 for_each_buffer_cpu(buffer, cpu) {
889 for (i = 0; i < new_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500890 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400891 cache_line_size()),
892 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500893 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400894 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500895 list_add(&bpage->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400896 addr = __get_free_page(GFP_KERNEL);
897 if (!addr)
898 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500899 bpage->page = (void *)addr;
900 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400901 }
902 }
903
904 for_each_buffer_cpu(buffer, cpu) {
905 cpu_buffer = buffer->buffers[cpu];
906 rb_insert_pages(cpu_buffer, &pages, new_pages);
907 }
908
Steven Rostedt554f7862009-03-11 22:00:13 -0400909 if (RB_WARN_ON(buffer, !list_empty(&pages)))
910 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400911
912 out:
913 buffer->pages = nr_pages;
Steven Rostedt554f7862009-03-11 22:00:13 -0400914 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400915 mutex_unlock(&buffer->mutex);
916
917 return size;
918
919 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500920 list_for_each_entry_safe(bpage, tmp, &pages, list) {
921 list_del_init(&bpage->list);
922 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400923 }
Steven Rostedt554f7862009-03-11 22:00:13 -0400924 put_online_cpus();
Vegard Nossum641d2f62008-11-18 19:22:13 +0100925 mutex_unlock(&buffer->mutex);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400926 return -ENOMEM;
Steven Rostedt554f7862009-03-11 22:00:13 -0400927
928 /*
929 * Something went totally wrong, and we are too paranoid
930 * to even clean up the mess.
931 */
932 out_fail:
933 put_online_cpus();
934 mutex_unlock(&buffer->mutex);
935 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400936}
Robert Richterc4f50182008-12-11 16:49:22 +0100937EXPORT_SYMBOL_GPL(ring_buffer_resize);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400938
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500939static inline void *
Steven Rostedt044fa782008-12-02 23:50:03 -0500940__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500941{
Steven Rostedt044fa782008-12-02 23:50:03 -0500942 return bpage->data + index;
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500943}
944
Steven Rostedt044fa782008-12-02 23:50:03 -0500945static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400946{
Steven Rostedt044fa782008-12-02 23:50:03 -0500947 return bpage->page->data + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400948}
949
950static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400951rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400952{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400953 return __rb_page_index(cpu_buffer->reader_page,
954 cpu_buffer->reader_page->read);
955}
956
957static inline struct ring_buffer_event *
958rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
959{
960 return __rb_page_index(cpu_buffer->head_page,
961 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400962}
963
964static inline struct ring_buffer_event *
965rb_iter_head_event(struct ring_buffer_iter *iter)
966{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400967 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400968}
969
Steven Rostedtbf41a152008-10-04 02:00:59 -0400970static inline unsigned rb_page_write(struct buffer_page *bpage)
971{
972 return local_read(&bpage->write);
973}
974
975static inline unsigned rb_page_commit(struct buffer_page *bpage)
976{
Steven Rostedtabc9b562008-12-02 15:34:06 -0500977 return local_read(&bpage->page->commit);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400978}
979
980/* Size is determined by what has been commited */
981static inline unsigned rb_page_size(struct buffer_page *bpage)
982{
983 return rb_page_commit(bpage);
984}
985
986static inline unsigned
987rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
988{
989 return rb_page_commit(cpu_buffer->commit_page);
990}
991
992static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
993{
994 return rb_page_commit(cpu_buffer->head_page);
995}
996
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400997static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500998 struct buffer_page **bpage)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400999{
Steven Rostedt044fa782008-12-02 23:50:03 -05001000 struct list_head *p = (*bpage)->list.next;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001001
1002 if (p == &cpu_buffer->pages)
1003 p = p->next;
1004
Steven Rostedt044fa782008-12-02 23:50:03 -05001005 *bpage = list_entry(p, struct buffer_page, list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001006}
1007
Steven Rostedtbf41a152008-10-04 02:00:59 -04001008static inline unsigned
1009rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001010{
Steven Rostedtbf41a152008-10-04 02:00:59 -04001011 unsigned long addr = (unsigned long)event;
1012
1013 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001014}
1015
Steven Rostedt0f0c85f2009-05-11 16:08:00 -04001016static inline int
Steven Rostedtbf41a152008-10-04 02:00:59 -04001017rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1018 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001019{
Steven Rostedtbf41a152008-10-04 02:00:59 -04001020 unsigned long addr = (unsigned long)event;
1021 unsigned long index;
1022
1023 index = rb_event_index(event);
1024 addr &= PAGE_MASK;
1025
1026 return cpu_buffer->commit_page->page == (void *)addr &&
1027 rb_commit_index(cpu_buffer) == index;
1028}
1029
Andrew Morton34a148b2009-01-09 12:27:09 -08001030static void
Steven Rostedtbf41a152008-10-04 02:00:59 -04001031rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1032 struct ring_buffer_event *event)
1033{
1034 unsigned long addr = (unsigned long)event;
1035 unsigned long index;
1036
1037 index = rb_event_index(event);
1038 addr &= PAGE_MASK;
1039
1040 while (cpu_buffer->commit_page->page != (void *)addr) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001041 if (RB_WARN_ON(cpu_buffer,
1042 cpu_buffer->commit_page == cpu_buffer->tail_page))
1043 return;
Steven Rostedtabc9b562008-12-02 15:34:06 -05001044 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001045 cpu_buffer->commit_page->write;
1046 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001047 cpu_buffer->write_stamp =
1048 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001049 }
1050
1051 /* Now set the commit to the event's index */
Steven Rostedtabc9b562008-12-02 15:34:06 -05001052 local_set(&cpu_buffer->commit_page->page->commit, index);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001053}
1054
Andrew Morton34a148b2009-01-09 12:27:09 -08001055static void
Steven Rostedtbf41a152008-10-04 02:00:59 -04001056rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1057{
1058 /*
1059 * We only race with interrupts and NMIs on this CPU.
1060 * If we own the commit event, then we can commit
1061 * all others that interrupted us, since the interruptions
1062 * are in stack format (they finish before they come
1063 * back to us). This allows us to do a simple loop to
1064 * assign the commit to the tail.
1065 */
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001066 again:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001067 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001068 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001069 cpu_buffer->commit_page->write;
1070 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001071 cpu_buffer->write_stamp =
1072 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001073 /* add barrier to keep gcc from optimizing too much */
1074 barrier();
1075 }
1076 while (rb_commit_index(cpu_buffer) !=
1077 rb_page_write(cpu_buffer->commit_page)) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001078 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001079 cpu_buffer->commit_page->write;
1080 barrier();
1081 }
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001082
1083 /* again, keep gcc from optimizing */
1084 barrier();
1085
1086 /*
1087 * If an interrupt came in just after the first while loop
1088 * and pushed the tail page forward, we will be left with
1089 * a dangling commit that will never go forward.
1090 */
1091 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1092 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001093}
1094
Steven Rostedtd7690412008-10-01 00:29:53 -04001095static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001096{
Steven Rostedtabc9b562008-12-02 15:34:06 -05001097 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001098 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001099}
1100
Andrew Morton34a148b2009-01-09 12:27:09 -08001101static void rb_inc_iter(struct ring_buffer_iter *iter)
Steven Rostedtd7690412008-10-01 00:29:53 -04001102{
1103 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1104
1105 /*
1106 * The iterator could be on the reader page (it starts there).
1107 * But the head could have moved, since the reader was
1108 * found. Check for this case and assign the iterator
1109 * to the head page instead of next.
1110 */
1111 if (iter->head_page == cpu_buffer->reader_page)
1112 iter->head_page = cpu_buffer->head_page;
1113 else
1114 rb_inc_page(cpu_buffer, &iter->head_page);
1115
Steven Rostedtabc9b562008-12-02 15:34:06 -05001116 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001117 iter->head = 0;
1118}
1119
1120/**
1121 * ring_buffer_update_event - update event type and data
1122 * @event: the even to update
1123 * @type: the type of event
1124 * @length: the size of the event field in the ring buffer
1125 *
1126 * Update the type and data fields of the event. The length
1127 * is the actual size that is written to the ring buffer,
1128 * and with this, we can determine what to place into the
1129 * data field.
1130 */
Andrew Morton34a148b2009-01-09 12:27:09 -08001131static void
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001132rb_update_event(struct ring_buffer_event *event,
1133 unsigned type, unsigned length)
1134{
Lai Jiangshan334d4162009-04-24 11:27:05 +08001135 event->type_len = type;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001136
1137 switch (type) {
1138
1139 case RINGBUF_TYPE_PADDING:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001140 case RINGBUF_TYPE_TIME_EXTEND:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001141 case RINGBUF_TYPE_TIME_STAMP:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001142 break;
1143
Lai Jiangshan334d4162009-04-24 11:27:05 +08001144 case 0:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001145 length -= RB_EVNT_HDR_SIZE;
Lai Jiangshan334d4162009-04-24 11:27:05 +08001146 if (length > RB_MAX_SMALL_DATA)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001147 event->array[0] = length;
Lai Jiangshan334d4162009-04-24 11:27:05 +08001148 else
1149 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001150 break;
1151 default:
1152 BUG();
1153 }
1154}
1155
Andrew Morton34a148b2009-01-09 12:27:09 -08001156static unsigned rb_calculate_event_length(unsigned length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001157{
1158 struct ring_buffer_event event; /* Used only for sizeof array */
1159
1160 /* zero length can cause confusions */
1161 if (!length)
1162 length = 1;
1163
1164 if (length > RB_MAX_SMALL_DATA)
1165 length += sizeof(event.array[0]);
1166
1167 length += RB_EVNT_HDR_SIZE;
1168 length = ALIGN(length, RB_ALIGNMENT);
1169
1170 return length;
1171}
1172
Steven Rostedt6634ff22009-05-06 15:30:07 -04001173
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001174static struct ring_buffer_event *
Steven Rostedt6634ff22009-05-06 15:30:07 -04001175rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1176 unsigned long length, unsigned long tail,
1177 struct buffer_page *commit_page,
1178 struct buffer_page *tail_page, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001179{
Steven Rostedt6634ff22009-05-06 15:30:07 -04001180 struct buffer_page *next_page, *head_page, *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001181 struct ring_buffer *buffer = cpu_buffer->buffer;
1182 struct ring_buffer_event *event;
Steven Rostedt78d904b2009-02-05 18:43:07 -05001183 bool lock_taken = false;
Steven Rostedt6634ff22009-05-06 15:30:07 -04001184 unsigned long flags;
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001185
1186 next_page = tail_page;
1187
1188 local_irq_save(flags);
1189 /*
1190 * Since the write to the buffer is still not
1191 * fully lockless, we must be careful with NMIs.
1192 * The locks in the writers are taken when a write
1193 * crosses to a new page. The locks protect against
1194 * races with the readers (this will soon be fixed
1195 * with a lockless solution).
1196 *
1197 * Because we can not protect against NMIs, and we
1198 * want to keep traces reentrant, we need to manage
1199 * what happens when we are in an NMI.
1200 *
1201 * NMIs can happen after we take the lock.
1202 * If we are in an NMI, only take the lock
1203 * if it is not already taken. Otherwise
1204 * simply fail.
1205 */
1206 if (unlikely(in_nmi())) {
1207 if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1208 cpu_buffer->nmi_dropped++;
1209 goto out_reset;
1210 }
1211 } else
1212 __raw_spin_lock(&cpu_buffer->lock);
1213
1214 lock_taken = true;
1215
1216 rb_inc_page(cpu_buffer, &next_page);
1217
1218 head_page = cpu_buffer->head_page;
1219 reader_page = cpu_buffer->reader_page;
1220
1221 /* we grabbed the lock before incrementing */
1222 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1223 goto out_reset;
1224
1225 /*
1226 * If for some reason, we had an interrupt storm that made
1227 * it all the way around the buffer, bail, and warn
1228 * about it.
1229 */
1230 if (unlikely(next_page == commit_page)) {
1231 cpu_buffer->commit_overrun++;
1232 goto out_reset;
1233 }
1234
1235 if (next_page == head_page) {
1236 if (!(buffer->flags & RB_FL_OVERWRITE))
1237 goto out_reset;
1238
1239 /* tail_page has not moved yet? */
1240 if (tail_page == cpu_buffer->tail_page) {
1241 /* count overflows */
1242 cpu_buffer->overrun +=
1243 local_read(&head_page->entries);
1244
1245 rb_inc_page(cpu_buffer, &head_page);
1246 cpu_buffer->head_page = head_page;
1247 cpu_buffer->head_page->read = 0;
1248 }
1249 }
1250
1251 /*
1252 * If the tail page is still the same as what we think
1253 * it is, then it is up to us to update the tail
1254 * pointer.
1255 */
1256 if (tail_page == cpu_buffer->tail_page) {
1257 local_set(&next_page->write, 0);
1258 local_set(&next_page->entries, 0);
1259 local_set(&next_page->page->commit, 0);
1260 cpu_buffer->tail_page = next_page;
1261
1262 /* reread the time stamp */
Steven Rostedt88eb0122009-05-11 16:28:23 -04001263 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001264 cpu_buffer->tail_page->page->time_stamp = *ts;
1265 }
1266
1267 /*
1268 * The actual tail page has moved forward.
1269 */
1270 if (tail < BUF_PAGE_SIZE) {
1271 /* Mark the rest of the page with padding */
1272 event = __rb_page_index(tail_page, tail);
1273 rb_event_set_padding(event);
1274 }
1275
Steven Rostedt8e7abf12009-05-06 10:26:45 -04001276 /* Set the write back to the previous setting */
1277 local_sub(length, &tail_page->write);
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001278
1279 /*
1280 * If this was a commit entry that failed,
1281 * increment that too
1282 */
1283 if (tail_page == cpu_buffer->commit_page &&
1284 tail == rb_commit_index(cpu_buffer)) {
1285 rb_set_commit_to_write(cpu_buffer);
1286 }
1287
1288 __raw_spin_unlock(&cpu_buffer->lock);
1289 local_irq_restore(flags);
1290
1291 /* fail and let the caller try again */
1292 return ERR_PTR(-EAGAIN);
1293
Steven Rostedt45141d42009-02-12 13:19:48 -05001294 out_reset:
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001295 /* reset write */
Steven Rostedt8e7abf12009-05-06 10:26:45 -04001296 local_sub(length, &tail_page->write);
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001297
Steven Rostedt78d904b2009-02-05 18:43:07 -05001298 if (likely(lock_taken))
1299 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001300 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001301 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001302}
1303
Steven Rostedt6634ff22009-05-06 15:30:07 -04001304static struct ring_buffer_event *
1305__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1306 unsigned type, unsigned long length, u64 *ts)
1307{
1308 struct buffer_page *tail_page, *commit_page;
1309 struct ring_buffer_event *event;
1310 unsigned long tail, write;
1311
1312 commit_page = cpu_buffer->commit_page;
1313 /* we just need to protect against interrupts */
1314 barrier();
1315 tail_page = cpu_buffer->tail_page;
1316 write = local_add_return(length, &tail_page->write);
1317 tail = write - length;
1318
1319 /* See if we shot pass the end of this buffer page */
1320 if (write > BUF_PAGE_SIZE)
1321 return rb_move_tail(cpu_buffer, length, tail,
1322 commit_page, tail_page, ts);
1323
1324 /* We reserved something on the buffer */
1325
1326 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1327 return NULL;
1328
1329 event = __rb_page_index(tail_page, tail);
1330 rb_update_event(event, type, length);
1331
1332 /* The passed in type is zero for DATA */
1333 if (likely(!type))
1334 local_inc(&tail_page->entries);
1335
1336 /*
1337 * If this is a commit and the tail is zero, then update
1338 * this page's time stamp.
1339 */
1340 if (!tail && rb_is_commit(cpu_buffer, event))
1341 cpu_buffer->commit_page->page->time_stamp = *ts;
1342
1343 return event;
1344}
1345
Steven Rostedtedd813b2009-06-02 23:00:53 -04001346static inline int
1347rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1348 struct ring_buffer_event *event)
1349{
1350 unsigned long new_index, old_index;
1351 struct buffer_page *bpage;
1352 unsigned long index;
1353 unsigned long addr;
1354
1355 new_index = rb_event_index(event);
1356 old_index = new_index + rb_event_length(event);
1357 addr = (unsigned long)event;
1358 addr &= PAGE_MASK;
1359
1360 bpage = cpu_buffer->tail_page;
1361
1362 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1363 /*
1364 * This is on the tail page. It is possible that
1365 * a write could come in and move the tail page
1366 * and write to the next page. That is fine
1367 * because we just shorten what is on this page.
1368 */
1369 index = local_cmpxchg(&bpage->write, old_index, new_index);
1370 if (index == old_index)
1371 return 1;
1372 }
1373
1374 /* could not discard */
1375 return 0;
1376}
1377
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001378static int
1379rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1380 u64 *ts, u64 *delta)
1381{
1382 struct ring_buffer_event *event;
1383 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001384 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001385
1386 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1387 printk(KERN_WARNING "Delta way too big! %llu"
1388 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +11001389 (unsigned long long)*delta,
1390 (unsigned long long)*ts,
1391 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001392 WARN_ON(1);
1393 }
1394
1395 /*
1396 * The delta is too big, we to add a
1397 * new timestamp.
1398 */
1399 event = __rb_reserve_next(cpu_buffer,
1400 RINGBUF_TYPE_TIME_EXTEND,
1401 RB_LEN_TIME_EXTEND,
1402 ts);
1403 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001404 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001405
Steven Rostedtbf41a152008-10-04 02:00:59 -04001406 if (PTR_ERR(event) == -EAGAIN)
1407 return -EAGAIN;
1408
1409 /* Only a commited time event can update the write stamp */
1410 if (rb_is_commit(cpu_buffer, event)) {
1411 /*
1412 * If this is the first on the page, then we need to
1413 * update the page itself, and just put in a zero.
1414 */
1415 if (rb_event_index(event)) {
1416 event->time_delta = *delta & TS_MASK;
1417 event->array[0] = *delta >> TS_SHIFT;
1418 } else {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001419 cpu_buffer->commit_page->page->time_stamp = *ts;
Steven Rostedtea05b572009-06-03 09:30:10 -04001420 /* try to discard, since we do not need this */
1421 if (!rb_try_to_discard(cpu_buffer, event)) {
1422 /* nope, just zero it */
1423 event->time_delta = 0;
1424 event->array[0] = 0;
1425 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001426 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001427 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001428 /* let the caller know this was the commit */
1429 ret = 1;
1430 } else {
Steven Rostedtedd813b2009-06-02 23:00:53 -04001431 /* Try to discard the event */
1432 if (!rb_try_to_discard(cpu_buffer, event)) {
1433 /* Darn, this is just wasted space */
1434 event->time_delta = 0;
1435 event->array[0] = 0;
Steven Rostedtedd813b2009-06-02 23:00:53 -04001436 }
Steven Rostedtf57a8a12009-06-05 14:11:30 -04001437 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001438 }
1439
Steven Rostedtbf41a152008-10-04 02:00:59 -04001440 *delta = 0;
1441
1442 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001443}
1444
1445static struct ring_buffer_event *
1446rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt1cd8d732009-05-11 14:08:09 -04001447 unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001448{
1449 struct ring_buffer_event *event;
Steven Rostedt168b6b12009-05-11 22:11:05 -04001450 u64 ts, delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001451 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001452 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001453
Steven Rostedtbe957c42009-05-11 14:42:53 -04001454 length = rb_calculate_event_length(length);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001455 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001456 /*
1457 * We allow for interrupts to reenter here and do a trace.
1458 * If one does, it will cause this original code to loop
1459 * back here. Even with heavy interrupts happening, this
1460 * should only happen a few times in a row. If this happens
1461 * 1000 times in a row, there must be either an interrupt
1462 * storm or we have something buggy.
1463 * Bail!
1464 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001465 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001466 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001467
Steven Rostedt88eb0122009-05-11 16:28:23 -04001468 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001469
Steven Rostedtbf41a152008-10-04 02:00:59 -04001470 /*
1471 * Only the first commit can update the timestamp.
1472 * Yes there is a race here. If an interrupt comes in
1473 * just after the conditional and it traces too, then it
1474 * will also check the deltas. More than one timestamp may
1475 * also be made. But only the entry that did the actual
1476 * commit will be something other than zero.
1477 */
Steven Rostedt0f0c85f2009-05-11 16:08:00 -04001478 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1479 rb_page_write(cpu_buffer->tail_page) ==
1480 rb_commit_index(cpu_buffer))) {
Steven Rostedt168b6b12009-05-11 22:11:05 -04001481 u64 diff;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001482
Steven Rostedt168b6b12009-05-11 22:11:05 -04001483 diff = ts - cpu_buffer->write_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001484
Steven Rostedt168b6b12009-05-11 22:11:05 -04001485 /* make sure this diff is calculated here */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001486 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001487
Steven Rostedtbf41a152008-10-04 02:00:59 -04001488 /* Did the write stamp get updated already? */
1489 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt168b6b12009-05-11 22:11:05 -04001490 goto get_event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001491
Steven Rostedt168b6b12009-05-11 22:11:05 -04001492 delta = diff;
1493 if (unlikely(test_time_stamp(delta))) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001494
1495 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001496 if (commit == -EBUSY)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001497 return NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001498
1499 if (commit == -EAGAIN)
1500 goto again;
1501
1502 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001503 }
Steven Rostedt168b6b12009-05-11 22:11:05 -04001504 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001505
Steven Rostedt168b6b12009-05-11 22:11:05 -04001506 get_event:
Steven Rostedt1cd8d732009-05-11 14:08:09 -04001507 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
Steven Rostedt168b6b12009-05-11 22:11:05 -04001508 if (unlikely(PTR_ERR(event) == -EAGAIN))
Steven Rostedtbf41a152008-10-04 02:00:59 -04001509 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001510
Steven Rostedtbf41a152008-10-04 02:00:59 -04001511 if (!event) {
1512 if (unlikely(commit))
1513 /*
1514 * Ouch! We needed a timestamp and it was commited. But
1515 * we didn't get our event reserved.
1516 */
1517 rb_set_commit_to_write(cpu_buffer);
1518 return NULL;
1519 }
1520
1521 /*
1522 * If the timestamp was commited, make the commit our entry
1523 * now so that we will update it when needed.
1524 */
Steven Rostedt0f0c85f2009-05-11 16:08:00 -04001525 if (unlikely(commit))
Steven Rostedtbf41a152008-10-04 02:00:59 -04001526 rb_set_commit_event(cpu_buffer, event);
1527 else if (!rb_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001528 delta = 0;
1529
1530 event->time_delta = delta;
1531
1532 return event;
1533}
1534
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001535#define TRACE_RECURSIVE_DEPTH 16
Steven Rostedt261842b2009-04-16 21:41:52 -04001536
1537static int trace_recursive_lock(void)
1538{
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001539 current->trace_recursion++;
Steven Rostedt261842b2009-04-16 21:41:52 -04001540
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001541 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1542 return 0;
Steven Rostedt261842b2009-04-16 21:41:52 -04001543
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001544 /* Disable all tracing before we do anything else */
1545 tracing_off_permanent();
Frederic Weisbeckere057a5e2009-04-19 23:38:12 +02001546
Steven Rostedt7d7d2b82009-04-27 12:37:49 -04001547 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001548 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1549 current->trace_recursion,
1550 hardirq_count() >> HARDIRQ_SHIFT,
1551 softirq_count() >> SOFTIRQ_SHIFT,
1552 in_nmi());
Frederic Weisbeckere057a5e2009-04-19 23:38:12 +02001553
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001554 WARN_ON_ONCE(1);
1555 return -1;
Steven Rostedt261842b2009-04-16 21:41:52 -04001556}
1557
1558static void trace_recursive_unlock(void)
1559{
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001560 WARN_ON_ONCE(!current->trace_recursion);
Steven Rostedt261842b2009-04-16 21:41:52 -04001561
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001562 current->trace_recursion--;
Steven Rostedt261842b2009-04-16 21:41:52 -04001563}
1564
Steven Rostedtbf41a152008-10-04 02:00:59 -04001565static DEFINE_PER_CPU(int, rb_need_resched);
1566
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001567/**
1568 * ring_buffer_lock_reserve - reserve a part of the buffer
1569 * @buffer: the ring buffer to reserve from
1570 * @length: the length of the data to reserve (excluding event header)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001571 *
1572 * Returns a reseverd event on the ring buffer to copy directly to.
1573 * The user of this interface will need to get the body to write into
1574 * and can use the ring_buffer_event_data() interface.
1575 *
1576 * The length is the length of the data needed, not the event length
1577 * which also includes the event header.
1578 *
1579 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1580 * If NULL is returned, then nothing has been allocated or locked.
1581 */
1582struct ring_buffer_event *
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001583ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001584{
1585 struct ring_buffer_per_cpu *cpu_buffer;
1586 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001587 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001588
Steven Rostedt033601a2008-11-21 12:41:55 -05001589 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001590 return NULL;
1591
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001592 if (atomic_read(&buffer->record_disabled))
1593 return NULL;
1594
Steven Rostedtbf41a152008-10-04 02:00:59 -04001595 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001596 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001597
Steven Rostedt261842b2009-04-16 21:41:52 -04001598 if (trace_recursive_lock())
1599 goto out_nocheck;
1600
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001601 cpu = raw_smp_processor_id();
1602
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301603 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001604 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001605
1606 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001607
1608 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001609 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001610
Steven Rostedtbe957c42009-05-11 14:42:53 -04001611 if (length > BUF_MAX_DATA_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001612 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001613
Steven Rostedt1cd8d732009-05-11 14:08:09 -04001614 event = rb_reserve_next_event(cpu_buffer, length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001615 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001616 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001617
Steven Rostedtbf41a152008-10-04 02:00:59 -04001618 /*
1619 * Need to store resched state on this cpu.
1620 * Only the first needs to.
1621 */
1622
1623 if (preempt_count() == 1)
1624 per_cpu(rb_need_resched, cpu) = resched;
1625
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001626 return event;
1627
Steven Rostedtd7690412008-10-01 00:29:53 -04001628 out:
Steven Rostedt261842b2009-04-16 21:41:52 -04001629 trace_recursive_unlock();
1630
1631 out_nocheck:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001632 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001633 return NULL;
1634}
Robert Richterc4f50182008-12-11 16:49:22 +01001635EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001636
1637static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1638 struct ring_buffer_event *event)
1639{
Steven Rostedte4906ef2009-04-30 20:49:44 -04001640 local_inc(&cpu_buffer->entries);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001641
1642 /* Only process further if we own the commit */
1643 if (!rb_is_commit(cpu_buffer, event))
1644 return;
1645
1646 cpu_buffer->write_stamp += event->time_delta;
1647
1648 rb_set_commit_to_write(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001649}
1650
1651/**
1652 * ring_buffer_unlock_commit - commit a reserved
1653 * @buffer: The buffer to commit to
1654 * @event: The event pointer to commit.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001655 *
1656 * This commits the data to the ring buffer, and releases any locks held.
1657 *
1658 * Must be paired with ring_buffer_lock_reserve.
1659 */
1660int ring_buffer_unlock_commit(struct ring_buffer *buffer,
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001661 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001662{
1663 struct ring_buffer_per_cpu *cpu_buffer;
1664 int cpu = raw_smp_processor_id();
1665
1666 cpu_buffer = buffer->buffers[cpu];
1667
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001668 rb_commit(cpu_buffer, event);
1669
Steven Rostedt261842b2009-04-16 21:41:52 -04001670 trace_recursive_unlock();
1671
Steven Rostedtbf41a152008-10-04 02:00:59 -04001672 /*
1673 * Only the last preempt count needs to restore preemption.
1674 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001675 if (preempt_count() == 1)
1676 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1677 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04001678 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001679
1680 return 0;
1681}
Robert Richterc4f50182008-12-11 16:49:22 +01001682EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001683
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001684static inline void rb_event_discard(struct ring_buffer_event *event)
1685{
Lai Jiangshan334d4162009-04-24 11:27:05 +08001686 /* array[0] holds the actual length for the discarded event */
1687 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1688 event->type_len = RINGBUF_TYPE_PADDING;
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001689 /* time delta must be non zero */
1690 if (!event->time_delta)
1691 event->time_delta = 1;
1692}
1693
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001694/**
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001695 * ring_buffer_event_discard - discard any event in the ring buffer
1696 * @event: the event to discard
1697 *
1698 * Sometimes a event that is in the ring buffer needs to be ignored.
1699 * This function lets the user discard an event in the ring buffer
1700 * and then that event will not be read later.
1701 *
1702 * Note, it is up to the user to be careful with this, and protect
1703 * against races. If the user discards an event that has been consumed
1704 * it is possible that it could corrupt the ring buffer.
1705 */
1706void ring_buffer_event_discard(struct ring_buffer_event *event)
1707{
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001708 rb_event_discard(event);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001709}
1710EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1711
1712/**
1713 * ring_buffer_commit_discard - discard an event that has not been committed
1714 * @buffer: the ring buffer
1715 * @event: non committed event to discard
1716 *
1717 * This is similar to ring_buffer_event_discard but must only be
1718 * performed on an event that has not been committed yet. The difference
1719 * is that this will also try to free the event from the ring buffer
1720 * if another event has not been added behind it.
1721 *
1722 * If another event has been added behind it, it will set the event
1723 * up as discarded, and perform the commit.
1724 *
1725 * If this function is called, do not call ring_buffer_unlock_commit on
1726 * the event.
1727 */
1728void ring_buffer_discard_commit(struct ring_buffer *buffer,
1729 struct ring_buffer_event *event)
1730{
1731 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001732 int cpu;
1733
1734 /* The event is discarded regardless */
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001735 rb_event_discard(event);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001736
1737 /*
1738 * This must only be called if the event has not been
1739 * committed yet. Thus we can assume that preemption
1740 * is still disabled.
1741 */
Steven Rostedt74f4fd22009-05-07 19:58:55 -04001742 RB_WARN_ON(buffer, preemptible());
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001743
1744 cpu = smp_processor_id();
1745 cpu_buffer = buffer->buffers[cpu];
1746
Steven Rostedtedd813b2009-06-02 23:00:53 -04001747 if (!rb_try_to_discard(cpu_buffer, event))
1748 goto out;
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001749
1750 /*
1751 * The commit is still visible by the reader, so we
1752 * must increment entries.
1753 */
Steven Rostedte4906ef2009-04-30 20:49:44 -04001754 local_inc(&cpu_buffer->entries);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001755 out:
1756 /*
1757 * If a write came in and pushed the tail page
1758 * we still need to update the commit pointer
1759 * if we were the commit.
1760 */
1761 if (rb_is_commit(cpu_buffer, event))
1762 rb_set_commit_to_write(cpu_buffer);
1763
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001764 trace_recursive_unlock();
1765
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001766 /*
1767 * Only the last preempt count needs to restore preemption.
1768 */
1769 if (preempt_count() == 1)
1770 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1771 else
1772 preempt_enable_no_resched_notrace();
1773
1774}
1775EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1776
1777/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001778 * ring_buffer_write - write data to the buffer without reserving
1779 * @buffer: The ring buffer to write to.
1780 * @length: The length of the data being written (excluding the event header)
1781 * @data: The data to write to the buffer.
1782 *
1783 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1784 * one function. If you already have the data to write to the buffer, it
1785 * may be easier to simply call this function.
1786 *
1787 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1788 * and not the length of the event which would hold the header.
1789 */
1790int ring_buffer_write(struct ring_buffer *buffer,
1791 unsigned long length,
1792 void *data)
1793{
1794 struct ring_buffer_per_cpu *cpu_buffer;
1795 struct ring_buffer_event *event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001796 void *body;
1797 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001798 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001799
Steven Rostedt033601a2008-11-21 12:41:55 -05001800 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001801 return -EBUSY;
1802
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001803 if (atomic_read(&buffer->record_disabled))
1804 return -EBUSY;
1805
Steven Rostedt182e9f52008-11-03 23:15:56 -05001806 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001807
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001808 cpu = raw_smp_processor_id();
1809
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301810 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001811 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001812
1813 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001814
1815 if (atomic_read(&cpu_buffer->record_disabled))
1816 goto out;
1817
Steven Rostedtbe957c42009-05-11 14:42:53 -04001818 if (length > BUF_MAX_DATA_SIZE)
1819 goto out;
1820
1821 event = rb_reserve_next_event(cpu_buffer, length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001822 if (!event)
1823 goto out;
1824
1825 body = rb_event_data(event);
1826
1827 memcpy(body, data, length);
1828
1829 rb_commit(cpu_buffer, event);
1830
1831 ret = 0;
1832 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001833 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001834
1835 return ret;
1836}
Robert Richterc4f50182008-12-11 16:49:22 +01001837EXPORT_SYMBOL_GPL(ring_buffer_write);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001838
Andrew Morton34a148b2009-01-09 12:27:09 -08001839static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001840{
1841 struct buffer_page *reader = cpu_buffer->reader_page;
1842 struct buffer_page *head = cpu_buffer->head_page;
1843 struct buffer_page *commit = cpu_buffer->commit_page;
1844
1845 return reader->read == rb_page_commit(reader) &&
1846 (commit == reader ||
1847 (commit == head &&
1848 head->read == rb_page_commit(commit)));
1849}
1850
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001851/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001852 * ring_buffer_record_disable - stop all writes into the buffer
1853 * @buffer: The ring buffer to stop writes to.
1854 *
1855 * This prevents all writes to the buffer. Any attempt to write
1856 * to the buffer after this will fail and return NULL.
1857 *
1858 * The caller should call synchronize_sched() after this.
1859 */
1860void ring_buffer_record_disable(struct ring_buffer *buffer)
1861{
1862 atomic_inc(&buffer->record_disabled);
1863}
Robert Richterc4f50182008-12-11 16:49:22 +01001864EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001865
1866/**
1867 * ring_buffer_record_enable - enable writes to the buffer
1868 * @buffer: The ring buffer to enable writes
1869 *
1870 * Note, multiple disables will need the same number of enables
1871 * to truely enable the writing (much like preempt_disable).
1872 */
1873void ring_buffer_record_enable(struct ring_buffer *buffer)
1874{
1875 atomic_dec(&buffer->record_disabled);
1876}
Robert Richterc4f50182008-12-11 16:49:22 +01001877EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001878
1879/**
1880 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1881 * @buffer: The ring buffer to stop writes to.
1882 * @cpu: The CPU buffer to stop
1883 *
1884 * This prevents all writes to the buffer. Any attempt to write
1885 * to the buffer after this will fail and return NULL.
1886 *
1887 * The caller should call synchronize_sched() after this.
1888 */
1889void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1890{
1891 struct ring_buffer_per_cpu *cpu_buffer;
1892
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301893 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001894 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001895
1896 cpu_buffer = buffer->buffers[cpu];
1897 atomic_inc(&cpu_buffer->record_disabled);
1898}
Robert Richterc4f50182008-12-11 16:49:22 +01001899EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001900
1901/**
1902 * ring_buffer_record_enable_cpu - enable writes to the buffer
1903 * @buffer: The ring buffer to enable writes
1904 * @cpu: The CPU to enable.
1905 *
1906 * Note, multiple disables will need the same number of enables
1907 * to truely enable the writing (much like preempt_disable).
1908 */
1909void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1910{
1911 struct ring_buffer_per_cpu *cpu_buffer;
1912
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301913 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001914 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001915
1916 cpu_buffer = buffer->buffers[cpu];
1917 atomic_dec(&cpu_buffer->record_disabled);
1918}
Robert Richterc4f50182008-12-11 16:49:22 +01001919EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001920
1921/**
1922 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1923 * @buffer: The ring buffer
1924 * @cpu: The per CPU buffer to get the entries from.
1925 */
1926unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1927{
1928 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001929 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001930
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301931 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001932 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001933
1934 cpu_buffer = buffer->buffers[cpu];
Steven Rostedte4906ef2009-04-30 20:49:44 -04001935 ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1936 - cpu_buffer->read;
Steven Rostedt554f7862009-03-11 22:00:13 -04001937
1938 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001939}
Robert Richterc4f50182008-12-11 16:49:22 +01001940EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001941
1942/**
1943 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1944 * @buffer: The ring buffer
1945 * @cpu: The per CPU buffer to get the number of overruns from
1946 */
1947unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1948{
1949 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001950 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001951
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301952 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001953 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001954
1955 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04001956 ret = cpu_buffer->overrun;
Steven Rostedt554f7862009-03-11 22:00:13 -04001957
1958 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001959}
Robert Richterc4f50182008-12-11 16:49:22 +01001960EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001961
1962/**
Steven Rostedtf0d2c682009-04-29 13:43:37 -04001963 * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
1964 * @buffer: The ring buffer
1965 * @cpu: The per CPU buffer to get the number of overruns from
1966 */
1967unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
1968{
1969 struct ring_buffer_per_cpu *cpu_buffer;
1970 unsigned long ret;
1971
1972 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1973 return 0;
1974
1975 cpu_buffer = buffer->buffers[cpu];
1976 ret = cpu_buffer->nmi_dropped;
1977
1978 return ret;
1979}
1980EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
1981
1982/**
1983 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
1984 * @buffer: The ring buffer
1985 * @cpu: The per CPU buffer to get the number of overruns from
1986 */
1987unsigned long
1988ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
1989{
1990 struct ring_buffer_per_cpu *cpu_buffer;
1991 unsigned long ret;
1992
1993 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1994 return 0;
1995
1996 cpu_buffer = buffer->buffers[cpu];
1997 ret = cpu_buffer->commit_overrun;
1998
1999 return ret;
2000}
2001EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2002
2003/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002004 * ring_buffer_entries - get the number of entries in a buffer
2005 * @buffer: The ring buffer
2006 *
2007 * Returns the total number of entries in the ring buffer
2008 * (all CPU entries)
2009 */
2010unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2011{
2012 struct ring_buffer_per_cpu *cpu_buffer;
2013 unsigned long entries = 0;
2014 int cpu;
2015
2016 /* if you care about this being correct, lock the buffer */
2017 for_each_buffer_cpu(buffer, cpu) {
2018 cpu_buffer = buffer->buffers[cpu];
Steven Rostedte4906ef2009-04-30 20:49:44 -04002019 entries += (local_read(&cpu_buffer->entries) -
2020 cpu_buffer->overrun) - cpu_buffer->read;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002021 }
2022
2023 return entries;
2024}
Robert Richterc4f50182008-12-11 16:49:22 +01002025EXPORT_SYMBOL_GPL(ring_buffer_entries);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002026
2027/**
2028 * ring_buffer_overrun_cpu - get the number of overruns in buffer
2029 * @buffer: The ring buffer
2030 *
2031 * Returns the total number of overruns in the ring buffer
2032 * (all CPU entries)
2033 */
2034unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2035{
2036 struct ring_buffer_per_cpu *cpu_buffer;
2037 unsigned long overruns = 0;
2038 int cpu;
2039
2040 /* if you care about this being correct, lock the buffer */
2041 for_each_buffer_cpu(buffer, cpu) {
2042 cpu_buffer = buffer->buffers[cpu];
2043 overruns += cpu_buffer->overrun;
2044 }
2045
2046 return overruns;
2047}
Robert Richterc4f50182008-12-11 16:49:22 +01002048EXPORT_SYMBOL_GPL(ring_buffer_overruns);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002049
Steven Rostedt642edba2008-11-12 00:01:26 -05002050static void rb_iter_reset(struct ring_buffer_iter *iter)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002051{
2052 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2053
Steven Rostedtd7690412008-10-01 00:29:53 -04002054 /* Iterator usage is expected to have record disabled */
2055 if (list_empty(&cpu_buffer->reader_page->list)) {
2056 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002057 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04002058 } else {
2059 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002060 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04002061 }
2062 if (iter->head)
2063 iter->read_stamp = cpu_buffer->read_stamp;
2064 else
Steven Rostedtabc9b562008-12-02 15:34:06 -05002065 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt642edba2008-11-12 00:01:26 -05002066}
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002067
Steven Rostedt642edba2008-11-12 00:01:26 -05002068/**
2069 * ring_buffer_iter_reset - reset an iterator
2070 * @iter: The iterator to reset
2071 *
2072 * Resets the iterator, so that it will start from the beginning
2073 * again.
2074 */
2075void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2076{
Steven Rostedt554f7862009-03-11 22:00:13 -04002077 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt642edba2008-11-12 00:01:26 -05002078 unsigned long flags;
2079
Steven Rostedt554f7862009-03-11 22:00:13 -04002080 if (!iter)
2081 return;
2082
2083 cpu_buffer = iter->cpu_buffer;
2084
Steven Rostedt642edba2008-11-12 00:01:26 -05002085 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2086 rb_iter_reset(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002087 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002088}
Robert Richterc4f50182008-12-11 16:49:22 +01002089EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002090
2091/**
2092 * ring_buffer_iter_empty - check if an iterator has no more to read
2093 * @iter: The iterator to check
2094 */
2095int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2096{
2097 struct ring_buffer_per_cpu *cpu_buffer;
2098
2099 cpu_buffer = iter->cpu_buffer;
2100
Steven Rostedtbf41a152008-10-04 02:00:59 -04002101 return iter->head_page == cpu_buffer->commit_page &&
2102 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002103}
Robert Richterc4f50182008-12-11 16:49:22 +01002104EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002105
2106static void
2107rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2108 struct ring_buffer_event *event)
2109{
2110 u64 delta;
2111
Lai Jiangshan334d4162009-04-24 11:27:05 +08002112 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002113 case RINGBUF_TYPE_PADDING:
2114 return;
2115
2116 case RINGBUF_TYPE_TIME_EXTEND:
2117 delta = event->array[0];
2118 delta <<= TS_SHIFT;
2119 delta += event->time_delta;
2120 cpu_buffer->read_stamp += delta;
2121 return;
2122
2123 case RINGBUF_TYPE_TIME_STAMP:
2124 /* FIXME: not implemented */
2125 return;
2126
2127 case RINGBUF_TYPE_DATA:
2128 cpu_buffer->read_stamp += event->time_delta;
2129 return;
2130
2131 default:
2132 BUG();
2133 }
2134 return;
2135}
2136
2137static void
2138rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2139 struct ring_buffer_event *event)
2140{
2141 u64 delta;
2142
Lai Jiangshan334d4162009-04-24 11:27:05 +08002143 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002144 case RINGBUF_TYPE_PADDING:
2145 return;
2146
2147 case RINGBUF_TYPE_TIME_EXTEND:
2148 delta = event->array[0];
2149 delta <<= TS_SHIFT;
2150 delta += event->time_delta;
2151 iter->read_stamp += delta;
2152 return;
2153
2154 case RINGBUF_TYPE_TIME_STAMP:
2155 /* FIXME: not implemented */
2156 return;
2157
2158 case RINGBUF_TYPE_DATA:
2159 iter->read_stamp += event->time_delta;
2160 return;
2161
2162 default:
2163 BUG();
2164 }
2165 return;
2166}
2167
Steven Rostedtd7690412008-10-01 00:29:53 -04002168static struct buffer_page *
2169rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002170{
Steven Rostedtd7690412008-10-01 00:29:53 -04002171 struct buffer_page *reader = NULL;
2172 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002173 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002174
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002175 local_irq_save(flags);
2176 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04002177
2178 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002179 /*
2180 * This should normally only loop twice. But because the
2181 * start of the reader inserts an empty page, it causes
2182 * a case where we will loop three times. There should be no
2183 * reason to loop four times (that I know of).
2184 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002185 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002186 reader = NULL;
2187 goto out;
2188 }
2189
Steven Rostedtd7690412008-10-01 00:29:53 -04002190 reader = cpu_buffer->reader_page;
2191
2192 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002193 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04002194 goto out;
2195
2196 /* Never should we have an index greater than the size */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002197 if (RB_WARN_ON(cpu_buffer,
2198 cpu_buffer->reader_page->read > rb_page_size(reader)))
2199 goto out;
Steven Rostedtd7690412008-10-01 00:29:53 -04002200
2201 /* check if we caught up to the tail */
2202 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002203 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04002204 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002205
2206 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04002207 * Splice the empty reader page into the list around the head.
2208 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002209 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002210
Steven Rostedtd7690412008-10-01 00:29:53 -04002211 reader = cpu_buffer->head_page;
2212 cpu_buffer->reader_page->list.next = reader->list.next;
2213 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002214
2215 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04002216 local_set(&cpu_buffer->reader_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002217 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04002218
2219 /* Make the reader page now replace the head */
2220 reader->list.prev->next = &cpu_buffer->reader_page->list;
2221 reader->list.next->prev = &cpu_buffer->reader_page->list;
2222
2223 /*
2224 * If the tail is on the reader, then we must set the head
2225 * to the inserted page, otherwise we set it one before.
2226 */
2227 cpu_buffer->head_page = cpu_buffer->reader_page;
2228
Steven Rostedtbf41a152008-10-04 02:00:59 -04002229 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04002230 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2231
2232 /* Finally update the reader page to the new head */
2233 cpu_buffer->reader_page = reader;
2234 rb_reset_reader_page(cpu_buffer);
2235
2236 goto again;
2237
2238 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002239 __raw_spin_unlock(&cpu_buffer->lock);
2240 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04002241
2242 return reader;
2243}
2244
2245static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2246{
2247 struct ring_buffer_event *event;
2248 struct buffer_page *reader;
2249 unsigned length;
2250
2251 reader = rb_get_reader_page(cpu_buffer);
2252
2253 /* This function should not be called when buffer is empty */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002254 if (RB_WARN_ON(cpu_buffer, !reader))
2255 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002256
2257 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002258
Lai Jiangshan334d4162009-04-24 11:27:05 +08002259 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2260 || rb_discarded_event(event))
Steven Rostedte4906ef2009-04-30 20:49:44 -04002261 cpu_buffer->read++;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002262
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002263 rb_update_read_stamp(cpu_buffer, event);
2264
Steven Rostedtd7690412008-10-01 00:29:53 -04002265 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002266 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002267}
2268
2269static void rb_advance_iter(struct ring_buffer_iter *iter)
2270{
2271 struct ring_buffer *buffer;
2272 struct ring_buffer_per_cpu *cpu_buffer;
2273 struct ring_buffer_event *event;
2274 unsigned length;
2275
2276 cpu_buffer = iter->cpu_buffer;
2277 buffer = cpu_buffer->buffer;
2278
2279 /*
2280 * Check if we are at the end of the buffer.
2281 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002282 if (iter->head >= rb_page_size(iter->head_page)) {
Steven Rostedtea05b572009-06-03 09:30:10 -04002283 /* discarded commits can make the page empty */
2284 if (iter->head_page == cpu_buffer->commit_page)
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002285 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002286 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002287 return;
2288 }
2289
2290 event = rb_iter_head_event(iter);
2291
2292 length = rb_event_length(event);
2293
2294 /*
2295 * This should not be called to advance the header if we are
2296 * at the tail of the buffer.
2297 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002298 if (RB_WARN_ON(cpu_buffer,
Steven Rostedtf536aaf2008-11-10 23:07:30 -05002299 (iter->head_page == cpu_buffer->commit_page) &&
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002300 (iter->head + length > rb_commit_index(cpu_buffer))))
2301 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002302
2303 rb_update_iter_read_stamp(iter, event);
2304
2305 iter->head += length;
2306
2307 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002308 if ((iter->head >= rb_page_size(iter->head_page)) &&
2309 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002310 rb_advance_iter(iter);
2311}
2312
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002313static struct ring_buffer_event *
2314rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002315{
2316 struct ring_buffer_per_cpu *cpu_buffer;
2317 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04002318 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002319 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002320
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002321 cpu_buffer = buffer->buffers[cpu];
2322
2323 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002324 /*
2325 * We repeat when a timestamp is encountered. It is possible
2326 * to get multiple timestamps from an interrupt entering just
Steven Rostedtea05b572009-06-03 09:30:10 -04002327 * as one timestamp is about to be written, or from discarded
2328 * commits. The most that we can have is the number on a single page.
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002329 */
Steven Rostedtea05b572009-06-03 09:30:10 -04002330 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002331 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002332
Steven Rostedtd7690412008-10-01 00:29:53 -04002333 reader = rb_get_reader_page(cpu_buffer);
2334 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002335 return NULL;
2336
Steven Rostedtd7690412008-10-01 00:29:53 -04002337 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002338
Lai Jiangshan334d4162009-04-24 11:27:05 +08002339 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002340 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05002341 if (rb_null_event(event))
2342 RB_WARN_ON(cpu_buffer, 1);
2343 /*
2344 * Because the writer could be discarding every
2345 * event it creates (which would probably be bad)
2346 * if we were to go back to "again" then we may never
2347 * catch up, and will trigger the warn on, or lock
2348 * the box. Return the padding, and we will release
2349 * the current locks, and try again.
2350 */
Steven Rostedtd7690412008-10-01 00:29:53 -04002351 rb_advance_reader(cpu_buffer);
Tom Zanussi2d622712009-03-22 03:30:49 -05002352 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002353
2354 case RINGBUF_TYPE_TIME_EXTEND:
2355 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04002356 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002357 goto again;
2358
2359 case RINGBUF_TYPE_TIME_STAMP:
2360 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04002361 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002362 goto again;
2363
2364 case RINGBUF_TYPE_DATA:
2365 if (ts) {
2366 *ts = cpu_buffer->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002367 ring_buffer_normalize_time_stamp(buffer,
2368 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002369 }
2370 return event;
2371
2372 default:
2373 BUG();
2374 }
2375
2376 return NULL;
2377}
Robert Richterc4f50182008-12-11 16:49:22 +01002378EXPORT_SYMBOL_GPL(ring_buffer_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002379
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002380static struct ring_buffer_event *
2381rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002382{
2383 struct ring_buffer *buffer;
2384 struct ring_buffer_per_cpu *cpu_buffer;
2385 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002386 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002387
2388 if (ring_buffer_iter_empty(iter))
2389 return NULL;
2390
2391 cpu_buffer = iter->cpu_buffer;
2392 buffer = cpu_buffer->buffer;
2393
2394 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002395 /*
Steven Rostedtea05b572009-06-03 09:30:10 -04002396 * We repeat when a timestamp is encountered.
2397 * We can get multiple timestamps by nested interrupts or also
2398 * if filtering is on (discarding commits). Since discarding
2399 * commits can be frequent we can get a lot of timestamps.
2400 * But we limit them by not adding timestamps if they begin
2401 * at the start of a page.
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002402 */
Steven Rostedtea05b572009-06-03 09:30:10 -04002403 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002404 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002405
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002406 if (rb_per_cpu_empty(cpu_buffer))
2407 return NULL;
2408
2409 event = rb_iter_head_event(iter);
2410
Lai Jiangshan334d4162009-04-24 11:27:05 +08002411 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002412 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05002413 if (rb_null_event(event)) {
2414 rb_inc_iter(iter);
2415 goto again;
2416 }
2417 rb_advance_iter(iter);
2418 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002419
2420 case RINGBUF_TYPE_TIME_EXTEND:
2421 /* Internal data, OK to advance */
2422 rb_advance_iter(iter);
2423 goto again;
2424
2425 case RINGBUF_TYPE_TIME_STAMP:
2426 /* FIXME: not implemented */
2427 rb_advance_iter(iter);
2428 goto again;
2429
2430 case RINGBUF_TYPE_DATA:
2431 if (ts) {
2432 *ts = iter->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002433 ring_buffer_normalize_time_stamp(buffer,
2434 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002435 }
2436 return event;
2437
2438 default:
2439 BUG();
2440 }
2441
2442 return NULL;
2443}
Robert Richterc4f50182008-12-11 16:49:22 +01002444EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002445
2446/**
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002447 * ring_buffer_peek - peek at the next event to be read
2448 * @buffer: The ring buffer to read
2449 * @cpu: The cpu to peak at
2450 * @ts: The timestamp counter of this event.
2451 *
2452 * This will return the event that will be read next, but does
2453 * not consume the data.
2454 */
2455struct ring_buffer_event *
2456ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2457{
2458 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8aabee52009-03-12 13:13:49 -04002459 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002460 unsigned long flags;
2461
Steven Rostedt554f7862009-03-11 22:00:13 -04002462 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002463 return NULL;
Steven Rostedt554f7862009-03-11 22:00:13 -04002464
Tom Zanussi2d622712009-03-22 03:30:49 -05002465 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002466 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2467 event = rb_buffer_peek(buffer, cpu, ts);
2468 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2469
Lai Jiangshan334d4162009-04-24 11:27:05 +08002470 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002471 cpu_relax();
2472 goto again;
2473 }
2474
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002475 return event;
2476}
2477
2478/**
2479 * ring_buffer_iter_peek - peek at the next event to be read
2480 * @iter: The ring buffer iterator
2481 * @ts: The timestamp counter of this event.
2482 *
2483 * This will return the event that will be read next, but does
2484 * not increment the iterator.
2485 */
2486struct ring_buffer_event *
2487ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2488{
2489 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2490 struct ring_buffer_event *event;
2491 unsigned long flags;
2492
Tom Zanussi2d622712009-03-22 03:30:49 -05002493 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002494 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2495 event = rb_iter_peek(iter, ts);
2496 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2497
Lai Jiangshan334d4162009-04-24 11:27:05 +08002498 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002499 cpu_relax();
2500 goto again;
2501 }
2502
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002503 return event;
2504}
2505
2506/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002507 * ring_buffer_consume - return an event and consume it
2508 * @buffer: The ring buffer to get the next event from
2509 *
2510 * Returns the next event in the ring buffer, and that event is consumed.
2511 * Meaning, that sequential reads will keep returning a different event,
2512 * and eventually empty the ring buffer if the producer is slower.
2513 */
2514struct ring_buffer_event *
2515ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2516{
Steven Rostedt554f7862009-03-11 22:00:13 -04002517 struct ring_buffer_per_cpu *cpu_buffer;
2518 struct ring_buffer_event *event = NULL;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002519 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002520
Tom Zanussi2d622712009-03-22 03:30:49 -05002521 again:
Steven Rostedt554f7862009-03-11 22:00:13 -04002522 /* might be called in atomic */
2523 preempt_disable();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002524
Steven Rostedt554f7862009-03-11 22:00:13 -04002525 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2526 goto out;
2527
2528 cpu_buffer = buffer->buffers[cpu];
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002529 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002530
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002531 event = rb_buffer_peek(buffer, cpu, ts);
2532 if (!event)
Steven Rostedt554f7862009-03-11 22:00:13 -04002533 goto out_unlock;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002534
Steven Rostedtd7690412008-10-01 00:29:53 -04002535 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002536
Steven Rostedt554f7862009-03-11 22:00:13 -04002537 out_unlock:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002538 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2539
Steven Rostedt554f7862009-03-11 22:00:13 -04002540 out:
2541 preempt_enable();
2542
Lai Jiangshan334d4162009-04-24 11:27:05 +08002543 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002544 cpu_relax();
2545 goto again;
2546 }
2547
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002548 return event;
2549}
Robert Richterc4f50182008-12-11 16:49:22 +01002550EXPORT_SYMBOL_GPL(ring_buffer_consume);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002551
2552/**
2553 * ring_buffer_read_start - start a non consuming read of the buffer
2554 * @buffer: The ring buffer to read from
2555 * @cpu: The cpu buffer to iterate over
2556 *
2557 * This starts up an iteration through the buffer. It also disables
2558 * the recording to the buffer until the reading is finished.
2559 * This prevents the reading from being corrupted. This is not
2560 * a consuming read, so a producer is not expected.
2561 *
2562 * Must be paired with ring_buffer_finish.
2563 */
2564struct ring_buffer_iter *
2565ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2566{
2567 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002568 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04002569 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002570
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302571 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002572 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002573
2574 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2575 if (!iter)
Steven Rostedt8aabee52009-03-12 13:13:49 -04002576 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002577
2578 cpu_buffer = buffer->buffers[cpu];
2579
2580 iter->cpu_buffer = cpu_buffer;
2581
2582 atomic_inc(&cpu_buffer->record_disabled);
2583 synchronize_sched();
2584
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002585 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002586 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt642edba2008-11-12 00:01:26 -05002587 rb_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002588 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002589 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002590
2591 return iter;
2592}
Robert Richterc4f50182008-12-11 16:49:22 +01002593EXPORT_SYMBOL_GPL(ring_buffer_read_start);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002594
2595/**
2596 * ring_buffer_finish - finish reading the iterator of the buffer
2597 * @iter: The iterator retrieved by ring_buffer_start
2598 *
2599 * This re-enables the recording to the buffer, and frees the
2600 * iterator.
2601 */
2602void
2603ring_buffer_read_finish(struct ring_buffer_iter *iter)
2604{
2605 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2606
2607 atomic_dec(&cpu_buffer->record_disabled);
2608 kfree(iter);
2609}
Robert Richterc4f50182008-12-11 16:49:22 +01002610EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002611
2612/**
2613 * ring_buffer_read - read the next item in the ring buffer by the iterator
2614 * @iter: The ring buffer iterator
2615 * @ts: The time stamp of the event read.
2616 *
2617 * This reads the next event in the ring buffer and increments the iterator.
2618 */
2619struct ring_buffer_event *
2620ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2621{
2622 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002623 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2624 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002625
Tom Zanussi2d622712009-03-22 03:30:49 -05002626 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002627 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2628 event = rb_iter_peek(iter, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002629 if (!event)
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002630 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002631
2632 rb_advance_iter(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002633 out:
2634 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002635
Lai Jiangshan334d4162009-04-24 11:27:05 +08002636 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002637 cpu_relax();
2638 goto again;
2639 }
2640
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002641 return event;
2642}
Robert Richterc4f50182008-12-11 16:49:22 +01002643EXPORT_SYMBOL_GPL(ring_buffer_read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002644
2645/**
2646 * ring_buffer_size - return the size of the ring buffer (in bytes)
2647 * @buffer: The ring buffer.
2648 */
2649unsigned long ring_buffer_size(struct ring_buffer *buffer)
2650{
2651 return BUF_PAGE_SIZE * buffer->pages;
2652}
Robert Richterc4f50182008-12-11 16:49:22 +01002653EXPORT_SYMBOL_GPL(ring_buffer_size);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002654
2655static void
2656rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2657{
2658 cpu_buffer->head_page
2659 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002660 local_set(&cpu_buffer->head_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04002661 local_set(&cpu_buffer->head_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002662 local_set(&cpu_buffer->head_page->page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002663
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002664 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002665
2666 cpu_buffer->tail_page = cpu_buffer->head_page;
2667 cpu_buffer->commit_page = cpu_buffer->head_page;
2668
2669 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2670 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04002671 local_set(&cpu_buffer->reader_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002672 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002673 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002674
Steven Rostedtf0d2c682009-04-29 13:43:37 -04002675 cpu_buffer->nmi_dropped = 0;
2676 cpu_buffer->commit_overrun = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002677 cpu_buffer->overrun = 0;
Steven Rostedte4906ef2009-04-30 20:49:44 -04002678 cpu_buffer->read = 0;
2679 local_set(&cpu_buffer->entries, 0);
Steven Rostedt69507c02009-01-21 18:45:57 -05002680
2681 cpu_buffer->write_stamp = 0;
2682 cpu_buffer->read_stamp = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002683}
2684
2685/**
2686 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2687 * @buffer: The ring buffer to reset a per cpu buffer of
2688 * @cpu: The CPU buffer to be reset
2689 */
2690void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2691{
2692 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2693 unsigned long flags;
2694
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302695 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002696 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002697
Steven Rostedt41ede232009-05-01 20:26:54 -04002698 atomic_inc(&cpu_buffer->record_disabled);
2699
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002700 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2701
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002702 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002703
2704 rb_reset_cpu(cpu_buffer);
2705
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002706 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002707
2708 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt41ede232009-05-01 20:26:54 -04002709
2710 atomic_dec(&cpu_buffer->record_disabled);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002711}
Robert Richterc4f50182008-12-11 16:49:22 +01002712EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002713
2714/**
2715 * ring_buffer_reset - reset a ring buffer
2716 * @buffer: The ring buffer to reset all cpu buffers
2717 */
2718void ring_buffer_reset(struct ring_buffer *buffer)
2719{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002720 int cpu;
2721
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002722 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002723 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002724}
Robert Richterc4f50182008-12-11 16:49:22 +01002725EXPORT_SYMBOL_GPL(ring_buffer_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002726
2727/**
2728 * rind_buffer_empty - is the ring buffer empty?
2729 * @buffer: The ring buffer to test
2730 */
2731int ring_buffer_empty(struct ring_buffer *buffer)
2732{
2733 struct ring_buffer_per_cpu *cpu_buffer;
2734 int cpu;
2735
2736 /* yes this is racy, but if you don't like the race, lock the buffer */
2737 for_each_buffer_cpu(buffer, cpu) {
2738 cpu_buffer = buffer->buffers[cpu];
2739 if (!rb_per_cpu_empty(cpu_buffer))
2740 return 0;
2741 }
Steven Rostedt554f7862009-03-11 22:00:13 -04002742
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002743 return 1;
2744}
Robert Richterc4f50182008-12-11 16:49:22 +01002745EXPORT_SYMBOL_GPL(ring_buffer_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002746
2747/**
2748 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2749 * @buffer: The ring buffer
2750 * @cpu: The CPU buffer to test
2751 */
2752int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2753{
2754 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002755 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002756
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302757 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002758 return 1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002759
2760 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04002761 ret = rb_per_cpu_empty(cpu_buffer);
2762
Steven Rostedt554f7862009-03-11 22:00:13 -04002763
2764 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002765}
Robert Richterc4f50182008-12-11 16:49:22 +01002766EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002767
2768/**
2769 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2770 * @buffer_a: One buffer to swap with
2771 * @buffer_b: The other buffer to swap with
2772 *
2773 * This function is useful for tracers that want to take a "snapshot"
2774 * of a CPU buffer and has another back up buffer lying around.
2775 * it is expected that the tracer handles the cpu buffer not being
2776 * used at the moment.
2777 */
2778int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2779 struct ring_buffer *buffer_b, int cpu)
2780{
2781 struct ring_buffer_per_cpu *cpu_buffer_a;
2782 struct ring_buffer_per_cpu *cpu_buffer_b;
Steven Rostedt554f7862009-03-11 22:00:13 -04002783 int ret = -EINVAL;
2784
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302785 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2786 !cpumask_test_cpu(cpu, buffer_b->cpumask))
Steven Rostedt554f7862009-03-11 22:00:13 -04002787 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002788
2789 /* At least make sure the two buffers are somewhat the same */
Lai Jiangshan6d102bc2008-12-17 17:48:23 +08002790 if (buffer_a->pages != buffer_b->pages)
Steven Rostedt554f7862009-03-11 22:00:13 -04002791 goto out;
2792
2793 ret = -EAGAIN;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002794
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002795 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedt554f7862009-03-11 22:00:13 -04002796 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002797
2798 if (atomic_read(&buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002799 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002800
2801 if (atomic_read(&buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002802 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002803
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002804 cpu_buffer_a = buffer_a->buffers[cpu];
2805 cpu_buffer_b = buffer_b->buffers[cpu];
2806
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002807 if (atomic_read(&cpu_buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002808 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002809
2810 if (atomic_read(&cpu_buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002811 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002812
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002813 /*
2814 * We can't do a synchronize_sched here because this
2815 * function can be called in atomic context.
2816 * Normally this will be called from the same CPU as cpu.
2817 * If not it's up to the caller to protect this.
2818 */
2819 atomic_inc(&cpu_buffer_a->record_disabled);
2820 atomic_inc(&cpu_buffer_b->record_disabled);
2821
2822 buffer_a->buffers[cpu] = cpu_buffer_b;
2823 buffer_b->buffers[cpu] = cpu_buffer_a;
2824
2825 cpu_buffer_b->buffer = buffer_a;
2826 cpu_buffer_a->buffer = buffer_b;
2827
2828 atomic_dec(&cpu_buffer_a->record_disabled);
2829 atomic_dec(&cpu_buffer_b->record_disabled);
2830
Steven Rostedt554f7862009-03-11 22:00:13 -04002831 ret = 0;
2832out:
Steven Rostedt554f7862009-03-11 22:00:13 -04002833 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002834}
Robert Richterc4f50182008-12-11 16:49:22 +01002835EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002836
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002837/**
2838 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2839 * @buffer: the buffer to allocate for.
2840 *
2841 * This function is used in conjunction with ring_buffer_read_page.
2842 * When reading a full page from the ring buffer, these functions
2843 * can be used to speed up the process. The calling function should
2844 * allocate a few pages first with this function. Then when it
2845 * needs to get pages from the ring buffer, it passes the result
2846 * of this function into ring_buffer_read_page, which will swap
2847 * the page that was allocated, with the read page of the buffer.
2848 *
2849 * Returns:
2850 * The page allocated, or NULL on error.
2851 */
2852void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2853{
Steven Rostedt044fa782008-12-02 23:50:03 -05002854 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002855 unsigned long addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002856
2857 addr = __get_free_page(GFP_KERNEL);
2858 if (!addr)
2859 return NULL;
2860
Steven Rostedt044fa782008-12-02 23:50:03 -05002861 bpage = (void *)addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002862
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002863 rb_init_page(bpage);
2864
Steven Rostedt044fa782008-12-02 23:50:03 -05002865 return bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002866}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04002867EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002868
2869/**
2870 * ring_buffer_free_read_page - free an allocated read page
2871 * @buffer: the buffer the page was allocate for
2872 * @data: the page to free
2873 *
2874 * Free a page allocated from ring_buffer_alloc_read_page.
2875 */
2876void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2877{
2878 free_page((unsigned long)data);
2879}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04002880EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002881
2882/**
2883 * ring_buffer_read_page - extract a page from the ring buffer
2884 * @buffer: buffer to extract from
2885 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002886 * @len: amount to extract
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002887 * @cpu: the cpu of the buffer to extract
2888 * @full: should the extraction only happen when the page is full.
2889 *
2890 * This function will pull out a page from the ring buffer and consume it.
2891 * @data_page must be the address of the variable that was returned
2892 * from ring_buffer_alloc_read_page. This is because the page might be used
2893 * to swap with a page in the ring buffer.
2894 *
2895 * for example:
Lai Jiangshanb85fa012009-02-09 14:21:14 +08002896 * rpage = ring_buffer_alloc_read_page(buffer);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002897 * if (!rpage)
2898 * return error;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002899 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002900 * if (ret >= 0)
2901 * process_page(rpage, ret);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002902 *
2903 * When @full is set, the function will not return true unless
2904 * the writer is off the reader page.
2905 *
2906 * Note: it is up to the calling functions to handle sleeps and wakeups.
2907 * The ring buffer can be used anywhere in the kernel and can not
2908 * blindly call wake_up. The layer that uses the ring buffer must be
2909 * responsible for that.
2910 *
2911 * Returns:
Lai Jiangshan667d2412009-02-09 14:21:17 +08002912 * >=0 if data has been transferred, returns the offset of consumed data.
2913 * <0 if no data has been transferred.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002914 */
2915int ring_buffer_read_page(struct ring_buffer *buffer,
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002916 void **data_page, size_t len, int cpu, int full)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002917{
2918 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2919 struct ring_buffer_event *event;
Steven Rostedt044fa782008-12-02 23:50:03 -05002920 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002921 struct buffer_page *reader;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002922 unsigned long flags;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002923 unsigned int commit;
Lai Jiangshan667d2412009-02-09 14:21:17 +08002924 unsigned int read;
Steven Rostedt4f3640f2009-03-03 23:52:42 -05002925 u64 save_timestamp;
Lai Jiangshan667d2412009-02-09 14:21:17 +08002926 int ret = -1;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002927
Steven Rostedt554f7862009-03-11 22:00:13 -04002928 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2929 goto out;
2930
Steven Rostedt474d32b2009-03-03 19:51:40 -05002931 /*
2932 * If len is not big enough to hold the page header, then
2933 * we can not copy anything.
2934 */
2935 if (len <= BUF_PAGE_HDR_SIZE)
Steven Rostedt554f7862009-03-11 22:00:13 -04002936 goto out;
Steven Rostedt474d32b2009-03-03 19:51:40 -05002937
2938 len -= BUF_PAGE_HDR_SIZE;
2939
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002940 if (!data_page)
Steven Rostedt554f7862009-03-11 22:00:13 -04002941 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002942
Steven Rostedt044fa782008-12-02 23:50:03 -05002943 bpage = *data_page;
2944 if (!bpage)
Steven Rostedt554f7862009-03-11 22:00:13 -04002945 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002946
2947 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2948
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002949 reader = rb_get_reader_page(cpu_buffer);
2950 if (!reader)
Steven Rostedt554f7862009-03-11 22:00:13 -04002951 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002952
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002953 event = rb_reader_event(cpu_buffer);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002954
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002955 read = reader->read;
2956 commit = rb_page_commit(reader);
2957
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002958 /*
Steven Rostedt474d32b2009-03-03 19:51:40 -05002959 * If this page has been partially read or
2960 * if len is not big enough to read the rest of the page or
2961 * a writer is still on the page, then
2962 * we must copy the data from the page to the buffer.
2963 * Otherwise, we can simply swap the page with the one passed in.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002964 */
Steven Rostedt474d32b2009-03-03 19:51:40 -05002965 if (read || (len < (commit - read)) ||
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002966 cpu_buffer->reader_page == cpu_buffer->commit_page) {
Lai Jiangshan667d2412009-02-09 14:21:17 +08002967 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
Steven Rostedt474d32b2009-03-03 19:51:40 -05002968 unsigned int rpos = read;
2969 unsigned int pos = 0;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002970 unsigned int size;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002971
2972 if (full)
Steven Rostedt554f7862009-03-11 22:00:13 -04002973 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002974
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002975 if (len > (commit - read))
2976 len = (commit - read);
2977
2978 size = rb_event_length(event);
2979
2980 if (len < size)
Steven Rostedt554f7862009-03-11 22:00:13 -04002981 goto out_unlock;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002982
Steven Rostedt4f3640f2009-03-03 23:52:42 -05002983 /* save the current timestamp, since the user will need it */
2984 save_timestamp = cpu_buffer->read_stamp;
2985
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002986 /* Need to copy one event at a time */
2987 do {
Steven Rostedt474d32b2009-03-03 19:51:40 -05002988 memcpy(bpage->data + pos, rpage->data + rpos, size);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002989
2990 len -= size;
2991
2992 rb_advance_reader(cpu_buffer);
Steven Rostedt474d32b2009-03-03 19:51:40 -05002993 rpos = reader->read;
2994 pos += size;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002995
2996 event = rb_reader_event(cpu_buffer);
2997 size = rb_event_length(event);
2998 } while (len > size);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002999
3000 /* update bpage */
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003001 local_set(&bpage->commit, pos);
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003002 bpage->time_stamp = save_timestamp;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003003
Steven Rostedt474d32b2009-03-03 19:51:40 -05003004 /* we copied everything to the beginning */
3005 read = 0;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003006 } else {
Steven Rostedtafbab762009-05-01 19:40:05 -04003007 /* update the entry counter */
3008 cpu_buffer->read += local_read(&reader->entries);
3009
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003010 /* swap the pages */
Steven Rostedt044fa782008-12-02 23:50:03 -05003011 rb_init_page(bpage);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003012 bpage = reader->page;
3013 reader->page = *data_page;
3014 local_set(&reader->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04003015 local_set(&reader->entries, 0);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003016 reader->read = 0;
Steven Rostedt044fa782008-12-02 23:50:03 -05003017 *data_page = bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003018 }
Lai Jiangshan667d2412009-02-09 14:21:17 +08003019 ret = read;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003020
Steven Rostedt554f7862009-03-11 22:00:13 -04003021 out_unlock:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003022 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3023
Steven Rostedt554f7862009-03-11 22:00:13 -04003024 out:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003025 return ret;
3026}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04003027EXPORT_SYMBOL_GPL(ring_buffer_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003028
Steven Rostedta3583242008-11-11 15:01:42 -05003029static ssize_t
3030rb_simple_read(struct file *filp, char __user *ubuf,
3031 size_t cnt, loff_t *ppos)
3032{
Hannes Eder5e398412009-02-10 19:44:34 +01003033 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05003034 char buf[64];
3035 int r;
3036
Steven Rostedt033601a2008-11-21 12:41:55 -05003037 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3038 r = sprintf(buf, "permanently disabled\n");
3039 else
3040 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
Steven Rostedta3583242008-11-11 15:01:42 -05003041
3042 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3043}
3044
3045static ssize_t
3046rb_simple_write(struct file *filp, const char __user *ubuf,
3047 size_t cnt, loff_t *ppos)
3048{
Hannes Eder5e398412009-02-10 19:44:34 +01003049 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05003050 char buf[64];
Hannes Eder5e398412009-02-10 19:44:34 +01003051 unsigned long val;
Steven Rostedta3583242008-11-11 15:01:42 -05003052 int ret;
3053
3054 if (cnt >= sizeof(buf))
3055 return -EINVAL;
3056
3057 if (copy_from_user(&buf, ubuf, cnt))
3058 return -EFAULT;
3059
3060 buf[cnt] = 0;
3061
3062 ret = strict_strtoul(buf, 10, &val);
3063 if (ret < 0)
3064 return ret;
3065
Steven Rostedt033601a2008-11-21 12:41:55 -05003066 if (val)
3067 set_bit(RB_BUFFERS_ON_BIT, p);
3068 else
3069 clear_bit(RB_BUFFERS_ON_BIT, p);
Steven Rostedta3583242008-11-11 15:01:42 -05003070
3071 (*ppos)++;
3072
3073 return cnt;
3074}
3075
Steven Rostedt5e2336a02009-03-05 21:44:55 -05003076static const struct file_operations rb_simple_fops = {
Steven Rostedta3583242008-11-11 15:01:42 -05003077 .open = tracing_open_generic,
3078 .read = rb_simple_read,
3079 .write = rb_simple_write,
3080};
3081
3082
3083static __init int rb_init_debugfs(void)
3084{
3085 struct dentry *d_tracer;
Steven Rostedta3583242008-11-11 15:01:42 -05003086
3087 d_tracer = tracing_init_dentry();
3088
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003089 trace_create_file("tracing_on", 0644, d_tracer,
3090 &ring_buffer_flags, &rb_simple_fops);
Steven Rostedta3583242008-11-11 15:01:42 -05003091
3092 return 0;
3093}
3094
3095fs_initcall(rb_init_debugfs);
Steven Rostedt554f7862009-03-11 22:00:13 -04003096
Steven Rostedt59222ef2009-03-12 11:46:03 -04003097#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +01003098static int rb_cpu_notify(struct notifier_block *self,
3099 unsigned long action, void *hcpu)
Steven Rostedt554f7862009-03-11 22:00:13 -04003100{
3101 struct ring_buffer *buffer =
3102 container_of(self, struct ring_buffer, cpu_notify);
3103 long cpu = (long)hcpu;
3104
3105 switch (action) {
3106 case CPU_UP_PREPARE:
3107 case CPU_UP_PREPARE_FROZEN:
3108 if (cpu_isset(cpu, *buffer->cpumask))
3109 return NOTIFY_OK;
3110
3111 buffer->buffers[cpu] =
3112 rb_allocate_cpu_buffer(buffer, cpu);
3113 if (!buffer->buffers[cpu]) {
3114 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3115 cpu);
3116 return NOTIFY_OK;
3117 }
3118 smp_wmb();
3119 cpu_set(cpu, *buffer->cpumask);
3120 break;
3121 case CPU_DOWN_PREPARE:
3122 case CPU_DOWN_PREPARE_FROZEN:
3123 /*
3124 * Do nothing.
3125 * If we were to free the buffer, then the user would
3126 * lose any trace that was in the buffer.
3127 */
3128 break;
3129 default:
3130 break;
3131 }
3132 return NOTIFY_OK;
3133}
3134#endif