blob: 04dac263825874d69f104df2f33cbd54433a2e40 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
Ingo Molnar14131f22009-02-26 18:47:11 +01007#include <linux/trace_clock.h>
Steven Rostedt78d904b2009-02-05 18:43:07 -05008#include <linux/ftrace_irq.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04009#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
Steven Rostedta81bd802009-02-06 01:45:16 -050012#include <linux/hardirq.h>
Vegard Nossum1744a212009-02-28 08:29:44 +010013#include <linux/kmemcheck.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040014#include <linux/module.h>
15#include <linux/percpu.h>
16#include <linux/mutex.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040017#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/list.h>
Steven Rostedt554f7862009-03-11 22:00:13 -040020#include <linux/cpu.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040021#include <linux/fs.h>
22
Steven Rostedt182e9f52008-11-03 23:15:56 -050023#include "trace.h"
24
Steven Rostedt033601a2008-11-21 12:41:55 -050025/*
Steven Rostedtd1b182a2009-04-15 16:53:47 -040026 * The ring buffer header is special. We must manually up keep it.
27 */
28int ring_buffer_print_entry_header(struct trace_seq *s)
29{
30 int ret;
31
Lai Jiangshan334d4162009-04-24 11:27:05 +080032 ret = trace_seq_printf(s, "# compressed entry header\n");
33 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
Steven Rostedtd1b182a2009-04-15 16:53:47 -040034 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
35 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
36 ret = trace_seq_printf(s, "\n");
37 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
38 RINGBUF_TYPE_PADDING);
39 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
40 RINGBUF_TYPE_TIME_EXTEND);
Lai Jiangshan334d4162009-04-24 11:27:05 +080041 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
42 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
Steven Rostedtd1b182a2009-04-15 16:53:47 -040043
44 return ret;
45}
46
47/*
Steven Rostedt5cc98542009-03-12 22:24:17 -040048 * The ring buffer is made up of a list of pages. A separate list of pages is
49 * allocated for each CPU. A writer may only write to a buffer that is
50 * associated with the CPU it is currently executing on. A reader may read
51 * from any per cpu buffer.
52 *
53 * The reader is special. For each per cpu buffer, the reader has its own
54 * reader page. When a reader has read the entire reader page, this reader
55 * page is swapped with another page in the ring buffer.
56 *
57 * Now, as long as the writer is off the reader page, the reader can do what
58 * ever it wants with that page. The writer will never write to that page
59 * again (as long as it is out of the ring buffer).
60 *
61 * Here's some silly ASCII art.
62 *
63 * +------+
64 * |reader| RING BUFFER
65 * |page |
66 * +------+ +---+ +---+ +---+
67 * | |-->| |-->| |
68 * +---+ +---+ +---+
69 * ^ |
70 * | |
71 * +---------------+
72 *
73 *
74 * +------+
75 * |reader| RING BUFFER
76 * |page |------------------v
77 * +------+ +---+ +---+ +---+
78 * | |-->| |-->| |
79 * +---+ +---+ +---+
80 * ^ |
81 * | |
82 * +---------------+
83 *
84 *
85 * +------+
86 * |reader| RING BUFFER
87 * |page |------------------v
88 * +------+ +---+ +---+ +---+
89 * ^ | |-->| |-->| |
90 * | +---+ +---+ +---+
91 * | |
92 * | |
93 * +------------------------------+
94 *
95 *
96 * +------+
97 * |buffer| RING BUFFER
98 * |page |------------------v
99 * +------+ +---+ +---+ +---+
100 * ^ | | | |-->| |
101 * | New +---+ +---+ +---+
102 * | Reader------^ |
103 * | page |
104 * +------------------------------+
105 *
106 *
107 * After we make this swap, the reader can hand this page off to the splice
108 * code and be done with it. It can even allocate a new page if it needs to
109 * and swap that into the ring buffer.
110 *
111 * We will be using cmpxchg soon to make all this lockless.
112 *
113 */
114
115/*
Steven Rostedt033601a2008-11-21 12:41:55 -0500116 * A fast way to enable or disable all ring buffers is to
117 * call tracing_on or tracing_off. Turning off the ring buffers
118 * prevents all ring buffers from being recorded to.
119 * Turning this switch on, makes it OK to write to the
120 * ring buffer, if the ring buffer is enabled itself.
121 *
122 * There's three layers that must be on in order to write
123 * to the ring buffer.
124 *
125 * 1) This global flag must be set.
126 * 2) The ring buffer must be enabled for recording.
127 * 3) The per cpu buffer must be enabled for recording.
128 *
129 * In case of an anomaly, this global flag has a bit set that
130 * will permantly disable all ring buffers.
131 */
132
133/*
134 * Global flag to disable all recording to ring buffers
135 * This has two bits: ON, DISABLED
136 *
137 * ON DISABLED
138 * ---- ----------
139 * 0 0 : ring buffers are off
140 * 1 0 : ring buffers are on
141 * X 1 : ring buffers are permanently disabled
142 */
143
144enum {
145 RB_BUFFERS_ON_BIT = 0,
146 RB_BUFFERS_DISABLED_BIT = 1,
147};
148
149enum {
150 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
151 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
152};
153
Hannes Eder5e398412009-02-10 19:44:34 +0100154static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
Steven Rostedta3583242008-11-11 15:01:42 -0500155
Steven Rostedt474d32b2009-03-03 19:51:40 -0500156#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
157
Steven Rostedta3583242008-11-11 15:01:42 -0500158/**
159 * tracing_on - enable all tracing buffers
160 *
161 * This function enables all tracing buffers that may have been
162 * disabled with tracing_off.
163 */
164void tracing_on(void)
165{
Steven Rostedt033601a2008-11-21 12:41:55 -0500166 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500167}
Robert Richterc4f50182008-12-11 16:49:22 +0100168EXPORT_SYMBOL_GPL(tracing_on);
Steven Rostedta3583242008-11-11 15:01:42 -0500169
170/**
171 * tracing_off - turn off all tracing buffers
172 *
173 * This function stops all tracing buffers from recording data.
174 * It does not disable any overhead the tracers themselves may
175 * be causing. This function simply causes all recording to
176 * the ring buffers to fail.
177 */
178void tracing_off(void)
179{
Steven Rostedt033601a2008-11-21 12:41:55 -0500180 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181}
Robert Richterc4f50182008-12-11 16:49:22 +0100182EXPORT_SYMBOL_GPL(tracing_off);
Steven Rostedt033601a2008-11-21 12:41:55 -0500183
184/**
185 * tracing_off_permanent - permanently disable ring buffers
186 *
187 * This function, once called, will disable all ring buffers
Wenji Huangc3706f02009-02-10 01:03:18 -0500188 * permanently.
Steven Rostedt033601a2008-11-21 12:41:55 -0500189 */
190void tracing_off_permanent(void)
191{
192 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500193}
194
Steven Rostedt988ae9d2009-02-14 19:17:02 -0500195/**
196 * tracing_is_on - show state of ring buffers enabled
197 */
198int tracing_is_on(void)
199{
200 return ring_buffer_flags == RB_BUFFERS_ON;
201}
202EXPORT_SYMBOL_GPL(tracing_is_on);
203
Ingo Molnard06bbd62008-11-12 10:11:37 +0100204#include "trace.h"
205
Steven Rostedte3d6bf02009-03-03 13:53:07 -0500206#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
Andrew Morton67d34722009-01-09 12:27:09 -0800207#define RB_ALIGNMENT 4U
Lai Jiangshan334d4162009-04-24 11:27:05 +0800208#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
Steven Rostedtc7b09302009-06-11 11:12:00 -0400209#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
Lai Jiangshan334d4162009-04-24 11:27:05 +0800210
211/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
212#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400213
214enum {
215 RB_LEN_TIME_EXTEND = 8,
216 RB_LEN_TIME_STAMP = 16,
217};
218
Tom Zanussi2d622712009-03-22 03:30:49 -0500219static inline int rb_null_event(struct ring_buffer_event *event)
220{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800221 return event->type_len == RINGBUF_TYPE_PADDING
222 && event->time_delta == 0;
Tom Zanussi2d622712009-03-22 03:30:49 -0500223}
224
225static inline int rb_discarded_event(struct ring_buffer_event *event)
226{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800227 return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
Tom Zanussi2d622712009-03-22 03:30:49 -0500228}
229
230static void rb_event_set_padding(struct ring_buffer_event *event)
231{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800232 event->type_len = RINGBUF_TYPE_PADDING;
Tom Zanussi2d622712009-03-22 03:30:49 -0500233 event->time_delta = 0;
234}
235
Tom Zanussi2d622712009-03-22 03:30:49 -0500236static unsigned
237rb_event_data_length(struct ring_buffer_event *event)
238{
239 unsigned length;
240
Lai Jiangshan334d4162009-04-24 11:27:05 +0800241 if (event->type_len)
242 length = event->type_len * RB_ALIGNMENT;
Tom Zanussi2d622712009-03-22 03:30:49 -0500243 else
244 length = event->array[0];
245 return length + RB_EVNT_HDR_SIZE;
246}
247
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400248/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800249static unsigned
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400250rb_event_length(struct ring_buffer_event *event)
251{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800252 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400253 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -0500254 if (rb_null_event(event))
255 /* undefined */
256 return -1;
Lai Jiangshan334d4162009-04-24 11:27:05 +0800257 return event->array[0] + RB_EVNT_HDR_SIZE;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400258
259 case RINGBUF_TYPE_TIME_EXTEND:
260 return RB_LEN_TIME_EXTEND;
261
262 case RINGBUF_TYPE_TIME_STAMP:
263 return RB_LEN_TIME_STAMP;
264
265 case RINGBUF_TYPE_DATA:
Tom Zanussi2d622712009-03-22 03:30:49 -0500266 return rb_event_data_length(event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400267 default:
268 BUG();
269 }
270 /* not hit */
271 return 0;
272}
273
274/**
275 * ring_buffer_event_length - return the length of the event
276 * @event: the event to get the length of
277 */
278unsigned ring_buffer_event_length(struct ring_buffer_event *event)
279{
Robert Richter465634a2009-01-07 15:32:11 +0100280 unsigned length = rb_event_length(event);
Lai Jiangshan334d4162009-04-24 11:27:05 +0800281 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
Robert Richter465634a2009-01-07 15:32:11 +0100282 return length;
283 length -= RB_EVNT_HDR_SIZE;
284 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
285 length -= sizeof(event->array[0]);
286 return length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400287}
Robert Richterc4f50182008-12-11 16:49:22 +0100288EXPORT_SYMBOL_GPL(ring_buffer_event_length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400289
290/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800291static void *
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400292rb_event_data(struct ring_buffer_event *event)
293{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800294 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400295 /* If length is in len field, then array[0] has the data */
Lai Jiangshan334d4162009-04-24 11:27:05 +0800296 if (event->type_len)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400297 return (void *)&event->array[0];
298 /* Otherwise length is in array[0] and array[1] has the data */
299 return (void *)&event->array[1];
300}
301
302/**
303 * ring_buffer_event_data - return the data of the event
304 * @event: the event to get the data from
305 */
306void *ring_buffer_event_data(struct ring_buffer_event *event)
307{
308 return rb_event_data(event);
309}
Robert Richterc4f50182008-12-11 16:49:22 +0100310EXPORT_SYMBOL_GPL(ring_buffer_event_data);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400311
312#define for_each_buffer_cpu(buffer, cpu) \
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030313 for_each_cpu(cpu, buffer->cpumask)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400314
315#define TS_SHIFT 27
316#define TS_MASK ((1ULL << TS_SHIFT) - 1)
317#define TS_DELTA_TEST (~TS_MASK)
318
Steven Rostedtabc9b562008-12-02 15:34:06 -0500319struct buffer_data_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400320 u64 time_stamp; /* page time stamp */
Wenji Huangc3706f02009-02-10 01:03:18 -0500321 local_t commit; /* write committed index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500322 unsigned char data[]; /* data of buffer page */
323};
324
325struct buffer_page {
Steven Rostedt778c55d2009-05-01 18:44:45 -0400326 struct list_head list; /* list of buffer pages */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500327 local_t write; /* index for next write */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400328 unsigned read; /* index for next read */
Steven Rostedt778c55d2009-05-01 18:44:45 -0400329 local_t entries; /* entries on this page */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500330 struct buffer_data_page *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400331};
332
Steven Rostedt044fa782008-12-02 23:50:03 -0500333static void rb_init_page(struct buffer_data_page *bpage)
Steven Rostedtabc9b562008-12-02 15:34:06 -0500334{
Steven Rostedt044fa782008-12-02 23:50:03 -0500335 local_set(&bpage->commit, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500336}
337
Steven Rostedt474d32b2009-03-03 19:51:40 -0500338/**
339 * ring_buffer_page_len - the size of data on the page.
340 * @page: The page to read
341 *
342 * Returns the amount of data on the page, including buffer page header.
343 */
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500344size_t ring_buffer_page_len(void *page)
345{
Steven Rostedt474d32b2009-03-03 19:51:40 -0500346 return local_read(&((struct buffer_data_page *)page)->commit)
347 + BUF_PAGE_HDR_SIZE;
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500348}
349
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400350/*
Steven Rostedted568292008-09-29 23:02:40 -0400351 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
352 * this issue out.
353 */
Andrew Morton34a148b2009-01-09 12:27:09 -0800354static void free_buffer_page(struct buffer_page *bpage)
Steven Rostedted568292008-09-29 23:02:40 -0400355{
Andrew Morton34a148b2009-01-09 12:27:09 -0800356 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400357 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400358}
359
360/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400361 * We need to fit the time_stamp delta into 27 bits.
362 */
363static inline int test_time_stamp(u64 delta)
364{
365 if (delta & TS_DELTA_TEST)
366 return 1;
367 return 0;
368}
369
Steven Rostedt474d32b2009-03-03 19:51:40 -0500370#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400371
Steven Rostedtbe957c42009-05-11 14:42:53 -0400372/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
373#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
374
Steven Rostedtea05b572009-06-03 09:30:10 -0400375/* Max number of timestamps that can fit on a page */
376#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
377
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400378int ring_buffer_print_page_header(struct trace_seq *s)
379{
380 struct buffer_data_page field;
381 int ret;
382
383 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
384 "offset:0;\tsize:%u;\n",
385 (unsigned int)sizeof(field.time_stamp));
386
387 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
388 "offset:%u;\tsize:%u;\n",
389 (unsigned int)offsetof(typeof(field), commit),
390 (unsigned int)sizeof(field.commit));
391
392 ret = trace_seq_printf(s, "\tfield: char data;\t"
393 "offset:%u;\tsize:%u;\n",
394 (unsigned int)offsetof(typeof(field), data),
395 (unsigned int)BUF_PAGE_SIZE);
396
397 return ret;
398}
399
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400400/*
401 * head_page == tail_page && head == tail then buffer is empty.
402 */
403struct ring_buffer_per_cpu {
404 int cpu;
405 struct ring_buffer *buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100406 spinlock_t reader_lock; /* serialize readers */
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500407 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400408 struct lock_class_key lock_key;
409 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400410 struct buffer_page *head_page; /* read from head */
411 struct buffer_page *tail_page; /* write to tail */
Wenji Huangc3706f02009-02-10 01:03:18 -0500412 struct buffer_page *commit_page; /* committed pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400413 struct buffer_page *reader_page;
Steven Rostedtf0d2c682009-04-29 13:43:37 -0400414 unsigned long nmi_dropped;
415 unsigned long commit_overrun;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400416 unsigned long overrun;
Steven Rostedte4906ef2009-04-30 20:49:44 -0400417 unsigned long read;
418 local_t entries;
Steven Rostedtfa743952009-06-16 12:37:57 -0400419 local_t committing;
420 local_t commits;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400421 u64 write_stamp;
422 u64 read_stamp;
423 atomic_t record_disabled;
424};
425
426struct ring_buffer {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400427 unsigned pages;
428 unsigned flags;
429 int cpus;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400430 atomic_t record_disabled;
Arnaldo Carvalho de Melo00f62f62009-02-09 17:04:06 -0200431 cpumask_var_t cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400432
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200433 struct lock_class_key *reader_lock_key;
434
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400435 struct mutex mutex;
436
437 struct ring_buffer_per_cpu **buffers;
Steven Rostedt554f7862009-03-11 22:00:13 -0400438
Steven Rostedt59222ef2009-03-12 11:46:03 -0400439#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400440 struct notifier_block cpu_notify;
441#endif
Steven Rostedt37886f62009-03-17 17:22:06 -0400442 u64 (*clock)(void);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400443};
444
445struct ring_buffer_iter {
446 struct ring_buffer_per_cpu *cpu_buffer;
447 unsigned long head;
448 struct buffer_page *head_page;
449 u64 read_stamp;
450};
451
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500452/* buffer may be either ring_buffer or ring_buffer_per_cpu */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400453#define RB_WARN_ON(buffer, cond) \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500454 ({ \
455 int _____ret = unlikely(cond); \
456 if (_____ret) { \
Steven Rostedtbf41a152008-10-04 02:00:59 -0400457 atomic_inc(&buffer->record_disabled); \
458 WARN_ON(1); \
459 } \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500460 _____ret; \
461 })
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500462
Steven Rostedt37886f62009-03-17 17:22:06 -0400463/* Up this if you want to test the TIME_EXTENTS and normalization */
464#define DEBUG_SHIFT 0
465
Steven Rostedt88eb0122009-05-11 16:28:23 -0400466static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
467{
468 /* shift to debug/test normalization and TIME_EXTENTS */
469 return buffer->clock() << DEBUG_SHIFT;
470}
471
Steven Rostedt37886f62009-03-17 17:22:06 -0400472u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
473{
474 u64 time;
475
476 preempt_disable_notrace();
Steven Rostedt88eb0122009-05-11 16:28:23 -0400477 time = rb_time_stamp(buffer, cpu);
Steven Rostedt37886f62009-03-17 17:22:06 -0400478 preempt_enable_no_resched_notrace();
479
480 return time;
481}
482EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
483
484void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
485 int cpu, u64 *ts)
486{
487 /* Just stupid testing the normalize function and deltas */
488 *ts >>= DEBUG_SHIFT;
489}
490EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
491
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400492/**
493 * check_pages - integrity check of buffer pages
494 * @cpu_buffer: CPU buffer with pages to test
495 *
Wenji Huangc3706f02009-02-10 01:03:18 -0500496 * As a safety measure we check to make sure the data pages have not
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400497 * been corrupted.
498 */
499static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
500{
501 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500502 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400503
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500504 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
505 return -1;
506 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
507 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400508
Steven Rostedt044fa782008-12-02 23:50:03 -0500509 list_for_each_entry_safe(bpage, tmp, head, list) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500510 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500511 bpage->list.next->prev != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500512 return -1;
513 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500514 bpage->list.prev->next != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500515 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400516 }
517
518 return 0;
519}
520
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400521static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
522 unsigned nr_pages)
523{
524 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500525 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400526 unsigned long addr;
527 LIST_HEAD(pages);
528 unsigned i;
529
530 for (i = 0; i < nr_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500531 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400532 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500533 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400534 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500535 list_add(&bpage->list, &pages);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400536
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400537 addr = __get_free_page(GFP_KERNEL);
538 if (!addr)
539 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500540 bpage->page = (void *)addr;
541 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400542 }
543
544 list_splice(&pages, head);
545
546 rb_check_pages(cpu_buffer);
547
548 return 0;
549
550 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500551 list_for_each_entry_safe(bpage, tmp, &pages, list) {
552 list_del_init(&bpage->list);
553 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400554 }
555 return -ENOMEM;
556}
557
558static struct ring_buffer_per_cpu *
559rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
560{
561 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt044fa782008-12-02 23:50:03 -0500562 struct buffer_page *bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400563 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400564 int ret;
565
566 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
567 GFP_KERNEL, cpu_to_node(cpu));
568 if (!cpu_buffer)
569 return NULL;
570
571 cpu_buffer->cpu = cpu;
572 cpu_buffer->buffer = buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100573 spin_lock_init(&cpu_buffer->reader_lock);
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200574 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500575 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400576 INIT_LIST_HEAD(&cpu_buffer->pages);
577
Steven Rostedt044fa782008-12-02 23:50:03 -0500578 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400579 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500580 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400581 goto fail_free_buffer;
582
Steven Rostedt044fa782008-12-02 23:50:03 -0500583 cpu_buffer->reader_page = bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400584 addr = __get_free_page(GFP_KERNEL);
585 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400586 goto fail_free_reader;
Steven Rostedt044fa782008-12-02 23:50:03 -0500587 bpage->page = (void *)addr;
588 rb_init_page(bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400589
Steven Rostedtd7690412008-10-01 00:29:53 -0400590 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400591
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400592 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
593 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400594 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400595
596 cpu_buffer->head_page
597 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400598 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400599
600 return cpu_buffer;
601
Steven Rostedtd7690412008-10-01 00:29:53 -0400602 fail_free_reader:
603 free_buffer_page(cpu_buffer->reader_page);
604
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400605 fail_free_buffer:
606 kfree(cpu_buffer);
607 return NULL;
608}
609
610static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
611{
612 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500613 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400614
Steven Rostedtd7690412008-10-01 00:29:53 -0400615 free_buffer_page(cpu_buffer->reader_page);
616
Steven Rostedt044fa782008-12-02 23:50:03 -0500617 list_for_each_entry_safe(bpage, tmp, head, list) {
618 list_del_init(&bpage->list);
619 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400620 }
621 kfree(cpu_buffer);
622}
623
Steven Rostedt59222ef2009-03-12 11:46:03 -0400624#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +0100625static int rb_cpu_notify(struct notifier_block *self,
626 unsigned long action, void *hcpu);
Steven Rostedt554f7862009-03-11 22:00:13 -0400627#endif
628
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400629/**
630 * ring_buffer_alloc - allocate a new ring_buffer
Robert Richter68814b52008-11-24 12:24:12 +0100631 * @size: the size in bytes per cpu that is needed.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400632 * @flags: attributes to set for the ring buffer.
633 *
634 * Currently the only flag that is available is the RB_FL_OVERWRITE
635 * flag. This flag means that the buffer will overwrite old data
636 * when the buffer wraps. If this flag is not set, the buffer will
637 * drop data when the tail hits the head.
638 */
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200639struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
640 struct lock_class_key *key)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400641{
642 struct ring_buffer *buffer;
643 int bsize;
644 int cpu;
645
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400646 /* keep it in its own cache line */
647 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
648 GFP_KERNEL);
649 if (!buffer)
650 return NULL;
651
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030652 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
653 goto fail_free_buffer;
654
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400655 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
656 buffer->flags = flags;
Steven Rostedt37886f62009-03-17 17:22:06 -0400657 buffer->clock = trace_clock_local;
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200658 buffer->reader_lock_key = key;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400659
660 /* need at least two pages */
Steven Rostedt5f78abe2009-06-17 14:11:10 -0400661 if (buffer->pages < 2)
662 buffer->pages = 2;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400663
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100664 /*
665 * In case of non-hotplug cpu, if the ring-buffer is allocated
666 * in early initcall, it will not be notified of secondary cpus.
667 * In that off case, we need to allocate for all possible cpus.
668 */
669#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400670 get_online_cpus();
671 cpumask_copy(buffer->cpumask, cpu_online_mask);
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100672#else
673 cpumask_copy(buffer->cpumask, cpu_possible_mask);
674#endif
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400675 buffer->cpus = nr_cpu_ids;
676
677 bsize = sizeof(void *) * nr_cpu_ids;
678 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
679 GFP_KERNEL);
680 if (!buffer->buffers)
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030681 goto fail_free_cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400682
683 for_each_buffer_cpu(buffer, cpu) {
684 buffer->buffers[cpu] =
685 rb_allocate_cpu_buffer(buffer, cpu);
686 if (!buffer->buffers[cpu])
687 goto fail_free_buffers;
688 }
689
Steven Rostedt59222ef2009-03-12 11:46:03 -0400690#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400691 buffer->cpu_notify.notifier_call = rb_cpu_notify;
692 buffer->cpu_notify.priority = 0;
693 register_cpu_notifier(&buffer->cpu_notify);
694#endif
695
696 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400697 mutex_init(&buffer->mutex);
698
699 return buffer;
700
701 fail_free_buffers:
702 for_each_buffer_cpu(buffer, cpu) {
703 if (buffer->buffers[cpu])
704 rb_free_cpu_buffer(buffer->buffers[cpu]);
705 }
706 kfree(buffer->buffers);
707
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030708 fail_free_cpumask:
709 free_cpumask_var(buffer->cpumask);
Steven Rostedt554f7862009-03-11 22:00:13 -0400710 put_online_cpus();
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030711
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400712 fail_free_buffer:
713 kfree(buffer);
714 return NULL;
715}
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200716EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400717
718/**
719 * ring_buffer_free - free a ring buffer.
720 * @buffer: the buffer to free.
721 */
722void
723ring_buffer_free(struct ring_buffer *buffer)
724{
725 int cpu;
726
Steven Rostedt554f7862009-03-11 22:00:13 -0400727 get_online_cpus();
728
Steven Rostedt59222ef2009-03-12 11:46:03 -0400729#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400730 unregister_cpu_notifier(&buffer->cpu_notify);
731#endif
732
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400733 for_each_buffer_cpu(buffer, cpu)
734 rb_free_cpu_buffer(buffer->buffers[cpu]);
735
Steven Rostedt554f7862009-03-11 22:00:13 -0400736 put_online_cpus();
737
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030738 free_cpumask_var(buffer->cpumask);
739
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400740 kfree(buffer);
741}
Robert Richterc4f50182008-12-11 16:49:22 +0100742EXPORT_SYMBOL_GPL(ring_buffer_free);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400743
Steven Rostedt37886f62009-03-17 17:22:06 -0400744void ring_buffer_set_clock(struct ring_buffer *buffer,
745 u64 (*clock)(void))
746{
747 buffer->clock = clock;
748}
749
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400750static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
751
752static void
753rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
754{
Steven Rostedt044fa782008-12-02 23:50:03 -0500755 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400756 struct list_head *p;
757 unsigned i;
758
759 atomic_inc(&cpu_buffer->record_disabled);
760 synchronize_sched();
761
762 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500763 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
764 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400765 p = cpu_buffer->pages.next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500766 bpage = list_entry(p, struct buffer_page, list);
767 list_del_init(&bpage->list);
768 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400769 }
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500770 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
771 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400772
773 rb_reset_cpu(cpu_buffer);
774
775 rb_check_pages(cpu_buffer);
776
777 atomic_dec(&cpu_buffer->record_disabled);
778
779}
780
781static void
782rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
783 struct list_head *pages, unsigned nr_pages)
784{
Steven Rostedt044fa782008-12-02 23:50:03 -0500785 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400786 struct list_head *p;
787 unsigned i;
788
789 atomic_inc(&cpu_buffer->record_disabled);
790 synchronize_sched();
791
792 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500793 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
794 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400795 p = pages->next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500796 bpage = list_entry(p, struct buffer_page, list);
797 list_del_init(&bpage->list);
798 list_add_tail(&bpage->list, &cpu_buffer->pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400799 }
800 rb_reset_cpu(cpu_buffer);
801
802 rb_check_pages(cpu_buffer);
803
804 atomic_dec(&cpu_buffer->record_disabled);
805}
806
807/**
808 * ring_buffer_resize - resize the ring buffer
809 * @buffer: the buffer to resize.
810 * @size: the new size.
811 *
812 * The tracer is responsible for making sure that the buffer is
813 * not being used while changing the size.
814 * Note: We may be able to change the above requirement by using
815 * RCU synchronizations.
816 *
817 * Minimum size is 2 * BUF_PAGE_SIZE.
818 *
819 * Returns -1 on failure.
820 */
821int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
822{
823 struct ring_buffer_per_cpu *cpu_buffer;
824 unsigned nr_pages, rm_pages, new_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500825 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400826 unsigned long buffer_size;
827 unsigned long addr;
828 LIST_HEAD(pages);
829 int i, cpu;
830
Ingo Molnaree51a1d2008-11-13 14:58:31 +0100831 /*
832 * Always succeed at resizing a non-existent buffer:
833 */
834 if (!buffer)
835 return size;
836
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400837 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
838 size *= BUF_PAGE_SIZE;
839 buffer_size = buffer->pages * BUF_PAGE_SIZE;
840
841 /* we need a minimum of two pages */
842 if (size < BUF_PAGE_SIZE * 2)
843 size = BUF_PAGE_SIZE * 2;
844
845 if (size == buffer_size)
846 return size;
847
848 mutex_lock(&buffer->mutex);
Steven Rostedt554f7862009-03-11 22:00:13 -0400849 get_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400850
851 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
852
853 if (size < buffer_size) {
854
855 /* easy case, just free pages */
Steven Rostedt554f7862009-03-11 22:00:13 -0400856 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
857 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400858
859 rm_pages = buffer->pages - nr_pages;
860
861 for_each_buffer_cpu(buffer, cpu) {
862 cpu_buffer = buffer->buffers[cpu];
863 rb_remove_pages(cpu_buffer, rm_pages);
864 }
865 goto out;
866 }
867
868 /*
869 * This is a bit more difficult. We only want to add pages
870 * when we can allocate enough for all CPUs. We do this
871 * by allocating all the pages and storing them on a local
872 * link list. If we succeed in our allocation, then we
873 * add these pages to the cpu_buffers. Otherwise we just free
874 * them all and return -ENOMEM;
875 */
Steven Rostedt554f7862009-03-11 22:00:13 -0400876 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
877 goto out_fail;
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500878
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400879 new_pages = nr_pages - buffer->pages;
880
881 for_each_buffer_cpu(buffer, cpu) {
882 for (i = 0; i < new_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500883 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400884 cache_line_size()),
885 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500886 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400887 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500888 list_add(&bpage->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400889 addr = __get_free_page(GFP_KERNEL);
890 if (!addr)
891 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500892 bpage->page = (void *)addr;
893 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400894 }
895 }
896
897 for_each_buffer_cpu(buffer, cpu) {
898 cpu_buffer = buffer->buffers[cpu];
899 rb_insert_pages(cpu_buffer, &pages, new_pages);
900 }
901
Steven Rostedt554f7862009-03-11 22:00:13 -0400902 if (RB_WARN_ON(buffer, !list_empty(&pages)))
903 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400904
905 out:
906 buffer->pages = nr_pages;
Steven Rostedt554f7862009-03-11 22:00:13 -0400907 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400908 mutex_unlock(&buffer->mutex);
909
910 return size;
911
912 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500913 list_for_each_entry_safe(bpage, tmp, &pages, list) {
914 list_del_init(&bpage->list);
915 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400916 }
Steven Rostedt554f7862009-03-11 22:00:13 -0400917 put_online_cpus();
Vegard Nossum641d2f62008-11-18 19:22:13 +0100918 mutex_unlock(&buffer->mutex);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400919 return -ENOMEM;
Steven Rostedt554f7862009-03-11 22:00:13 -0400920
921 /*
922 * Something went totally wrong, and we are too paranoid
923 * to even clean up the mess.
924 */
925 out_fail:
926 put_online_cpus();
927 mutex_unlock(&buffer->mutex);
928 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400929}
Robert Richterc4f50182008-12-11 16:49:22 +0100930EXPORT_SYMBOL_GPL(ring_buffer_resize);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400931
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500932static inline void *
Steven Rostedt044fa782008-12-02 23:50:03 -0500933__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500934{
Steven Rostedt044fa782008-12-02 23:50:03 -0500935 return bpage->data + index;
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500936}
937
Steven Rostedt044fa782008-12-02 23:50:03 -0500938static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400939{
Steven Rostedt044fa782008-12-02 23:50:03 -0500940 return bpage->page->data + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400941}
942
943static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400944rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400945{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400946 return __rb_page_index(cpu_buffer->reader_page,
947 cpu_buffer->reader_page->read);
948}
949
950static inline struct ring_buffer_event *
951rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
952{
953 return __rb_page_index(cpu_buffer->head_page,
954 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400955}
956
957static inline struct ring_buffer_event *
958rb_iter_head_event(struct ring_buffer_iter *iter)
959{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400960 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400961}
962
Steven Rostedtbf41a152008-10-04 02:00:59 -0400963static inline unsigned rb_page_write(struct buffer_page *bpage)
964{
965 return local_read(&bpage->write);
966}
967
968static inline unsigned rb_page_commit(struct buffer_page *bpage)
969{
Steven Rostedtabc9b562008-12-02 15:34:06 -0500970 return local_read(&bpage->page->commit);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400971}
972
973/* Size is determined by what has been commited */
974static inline unsigned rb_page_size(struct buffer_page *bpage)
975{
976 return rb_page_commit(bpage);
977}
978
979static inline unsigned
980rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
981{
982 return rb_page_commit(cpu_buffer->commit_page);
983}
984
985static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
986{
987 return rb_page_commit(cpu_buffer->head_page);
988}
989
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400990static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500991 struct buffer_page **bpage)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400992{
Steven Rostedt044fa782008-12-02 23:50:03 -0500993 struct list_head *p = (*bpage)->list.next;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400994
995 if (p == &cpu_buffer->pages)
996 p = p->next;
997
Steven Rostedt044fa782008-12-02 23:50:03 -0500998 *bpage = list_entry(p, struct buffer_page, list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400999}
1000
Steven Rostedtbf41a152008-10-04 02:00:59 -04001001static inline unsigned
1002rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001003{
Steven Rostedtbf41a152008-10-04 02:00:59 -04001004 unsigned long addr = (unsigned long)event;
1005
Steven Rostedt22f470f2009-06-11 09:29:58 -04001006 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001007}
1008
Steven Rostedt0f0c85f2009-05-11 16:08:00 -04001009static inline int
Steven Rostedtfa743952009-06-16 12:37:57 -04001010rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1011 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001012{
Steven Rostedtbf41a152008-10-04 02:00:59 -04001013 unsigned long addr = (unsigned long)event;
1014 unsigned long index;
1015
1016 index = rb_event_index(event);
1017 addr &= PAGE_MASK;
1018
1019 return cpu_buffer->commit_page->page == (void *)addr &&
1020 rb_commit_index(cpu_buffer) == index;
1021}
1022
Andrew Morton34a148b2009-01-09 12:27:09 -08001023static void
Steven Rostedtbf41a152008-10-04 02:00:59 -04001024rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1025{
1026 /*
1027 * We only race with interrupts and NMIs on this CPU.
1028 * If we own the commit event, then we can commit
1029 * all others that interrupted us, since the interruptions
1030 * are in stack format (they finish before they come
1031 * back to us). This allows us to do a simple loop to
1032 * assign the commit to the tail.
1033 */
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001034 again:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001035 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001036 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001037 cpu_buffer->commit_page->write;
1038 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001039 cpu_buffer->write_stamp =
1040 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001041 /* add barrier to keep gcc from optimizing too much */
1042 barrier();
1043 }
1044 while (rb_commit_index(cpu_buffer) !=
1045 rb_page_write(cpu_buffer->commit_page)) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001046 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001047 cpu_buffer->commit_page->write;
1048 barrier();
1049 }
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001050
1051 /* again, keep gcc from optimizing */
1052 barrier();
1053
1054 /*
1055 * If an interrupt came in just after the first while loop
1056 * and pushed the tail page forward, we will be left with
1057 * a dangling commit that will never go forward.
1058 */
1059 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1060 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001061}
1062
Steven Rostedtd7690412008-10-01 00:29:53 -04001063static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001064{
Steven Rostedtabc9b562008-12-02 15:34:06 -05001065 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001066 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001067}
1068
Andrew Morton34a148b2009-01-09 12:27:09 -08001069static void rb_inc_iter(struct ring_buffer_iter *iter)
Steven Rostedtd7690412008-10-01 00:29:53 -04001070{
1071 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1072
1073 /*
1074 * The iterator could be on the reader page (it starts there).
1075 * But the head could have moved, since the reader was
1076 * found. Check for this case and assign the iterator
1077 * to the head page instead of next.
1078 */
1079 if (iter->head_page == cpu_buffer->reader_page)
1080 iter->head_page = cpu_buffer->head_page;
1081 else
1082 rb_inc_page(cpu_buffer, &iter->head_page);
1083
Steven Rostedtabc9b562008-12-02 15:34:06 -05001084 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001085 iter->head = 0;
1086}
1087
1088/**
1089 * ring_buffer_update_event - update event type and data
1090 * @event: the even to update
1091 * @type: the type of event
1092 * @length: the size of the event field in the ring buffer
1093 *
1094 * Update the type and data fields of the event. The length
1095 * is the actual size that is written to the ring buffer,
1096 * and with this, we can determine what to place into the
1097 * data field.
1098 */
Andrew Morton34a148b2009-01-09 12:27:09 -08001099static void
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001100rb_update_event(struct ring_buffer_event *event,
1101 unsigned type, unsigned length)
1102{
Lai Jiangshan334d4162009-04-24 11:27:05 +08001103 event->type_len = type;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001104
1105 switch (type) {
1106
1107 case RINGBUF_TYPE_PADDING:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001108 case RINGBUF_TYPE_TIME_EXTEND:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001109 case RINGBUF_TYPE_TIME_STAMP:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001110 break;
1111
Lai Jiangshan334d4162009-04-24 11:27:05 +08001112 case 0:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001113 length -= RB_EVNT_HDR_SIZE;
Lai Jiangshan334d4162009-04-24 11:27:05 +08001114 if (length > RB_MAX_SMALL_DATA)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001115 event->array[0] = length;
Lai Jiangshan334d4162009-04-24 11:27:05 +08001116 else
1117 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001118 break;
1119 default:
1120 BUG();
1121 }
1122}
1123
Andrew Morton34a148b2009-01-09 12:27:09 -08001124static unsigned rb_calculate_event_length(unsigned length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001125{
1126 struct ring_buffer_event event; /* Used only for sizeof array */
1127
1128 /* zero length can cause confusions */
1129 if (!length)
1130 length = 1;
1131
1132 if (length > RB_MAX_SMALL_DATA)
1133 length += sizeof(event.array[0]);
1134
1135 length += RB_EVNT_HDR_SIZE;
1136 length = ALIGN(length, RB_ALIGNMENT);
1137
1138 return length;
1139}
1140
Steven Rostedtc7b09302009-06-11 11:12:00 -04001141static inline void
1142rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1143 struct buffer_page *tail_page,
1144 unsigned long tail, unsigned long length)
1145{
1146 struct ring_buffer_event *event;
1147
1148 /*
1149 * Only the event that crossed the page boundary
1150 * must fill the old tail_page with padding.
1151 */
1152 if (tail >= BUF_PAGE_SIZE) {
1153 local_sub(length, &tail_page->write);
1154 return;
1155 }
1156
1157 event = __rb_page_index(tail_page, tail);
Linus Torvaldsb0b70652009-06-20 10:56:46 -07001158 kmemcheck_annotate_bitfield(event, bitfield);
Steven Rostedtc7b09302009-06-11 11:12:00 -04001159
1160 /*
1161 * If this event is bigger than the minimum size, then
1162 * we need to be careful that we don't subtract the
1163 * write counter enough to allow another writer to slip
1164 * in on this page.
1165 * We put in a discarded commit instead, to make sure
1166 * that this space is not used again.
1167 *
1168 * If we are less than the minimum size, we don't need to
1169 * worry about it.
1170 */
1171 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1172 /* No room for any events */
1173
1174 /* Mark the rest of the page with padding */
1175 rb_event_set_padding(event);
1176
1177 /* Set the write back to the previous setting */
1178 local_sub(length, &tail_page->write);
1179 return;
1180 }
1181
1182 /* Put in a discarded event */
1183 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1184 event->type_len = RINGBUF_TYPE_PADDING;
1185 /* time delta must be non zero */
1186 event->time_delta = 1;
1187 /* Account for this as an entry */
1188 local_inc(&tail_page->entries);
1189 local_inc(&cpu_buffer->entries);
1190
1191 /* Set write to end of buffer */
1192 length = (tail + length) - BUF_PAGE_SIZE;
1193 local_sub(length, &tail_page->write);
1194}
Steven Rostedt6634ff22009-05-06 15:30:07 -04001195
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001196static struct ring_buffer_event *
Steven Rostedt6634ff22009-05-06 15:30:07 -04001197rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1198 unsigned long length, unsigned long tail,
1199 struct buffer_page *commit_page,
1200 struct buffer_page *tail_page, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001201{
Steven Rostedt6634ff22009-05-06 15:30:07 -04001202 struct buffer_page *next_page, *head_page, *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001203 struct ring_buffer *buffer = cpu_buffer->buffer;
Steven Rostedt78d904b2009-02-05 18:43:07 -05001204 bool lock_taken = false;
Steven Rostedt6634ff22009-05-06 15:30:07 -04001205 unsigned long flags;
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001206
1207 next_page = tail_page;
1208
1209 local_irq_save(flags);
1210 /*
1211 * Since the write to the buffer is still not
1212 * fully lockless, we must be careful with NMIs.
1213 * The locks in the writers are taken when a write
1214 * crosses to a new page. The locks protect against
1215 * races with the readers (this will soon be fixed
1216 * with a lockless solution).
1217 *
1218 * Because we can not protect against NMIs, and we
1219 * want to keep traces reentrant, we need to manage
1220 * what happens when we are in an NMI.
1221 *
1222 * NMIs can happen after we take the lock.
1223 * If we are in an NMI, only take the lock
1224 * if it is not already taken. Otherwise
1225 * simply fail.
1226 */
1227 if (unlikely(in_nmi())) {
1228 if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1229 cpu_buffer->nmi_dropped++;
1230 goto out_reset;
1231 }
1232 } else
1233 __raw_spin_lock(&cpu_buffer->lock);
1234
1235 lock_taken = true;
1236
1237 rb_inc_page(cpu_buffer, &next_page);
1238
1239 head_page = cpu_buffer->head_page;
1240 reader_page = cpu_buffer->reader_page;
1241
1242 /* we grabbed the lock before incrementing */
1243 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1244 goto out_reset;
1245
1246 /*
1247 * If for some reason, we had an interrupt storm that made
1248 * it all the way around the buffer, bail, and warn
1249 * about it.
1250 */
1251 if (unlikely(next_page == commit_page)) {
1252 cpu_buffer->commit_overrun++;
1253 goto out_reset;
1254 }
1255
1256 if (next_page == head_page) {
1257 if (!(buffer->flags & RB_FL_OVERWRITE))
1258 goto out_reset;
1259
1260 /* tail_page has not moved yet? */
1261 if (tail_page == cpu_buffer->tail_page) {
1262 /* count overflows */
1263 cpu_buffer->overrun +=
1264 local_read(&head_page->entries);
1265
1266 rb_inc_page(cpu_buffer, &head_page);
1267 cpu_buffer->head_page = head_page;
1268 cpu_buffer->head_page->read = 0;
1269 }
1270 }
1271
1272 /*
1273 * If the tail page is still the same as what we think
1274 * it is, then it is up to us to update the tail
1275 * pointer.
1276 */
1277 if (tail_page == cpu_buffer->tail_page) {
1278 local_set(&next_page->write, 0);
1279 local_set(&next_page->entries, 0);
1280 local_set(&next_page->page->commit, 0);
1281 cpu_buffer->tail_page = next_page;
1282
1283 /* reread the time stamp */
Steven Rostedt88eb0122009-05-11 16:28:23 -04001284 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001285 cpu_buffer->tail_page->page->time_stamp = *ts;
1286 }
1287
Steven Rostedtc7b09302009-06-11 11:12:00 -04001288 rb_reset_tail(cpu_buffer, tail_page, tail, length);
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001289
1290 __raw_spin_unlock(&cpu_buffer->lock);
1291 local_irq_restore(flags);
1292
1293 /* fail and let the caller try again */
1294 return ERR_PTR(-EAGAIN);
1295
Steven Rostedt45141d42009-02-12 13:19:48 -05001296 out_reset:
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001297 /* reset write */
Steven Rostedtc7b09302009-06-11 11:12:00 -04001298 rb_reset_tail(cpu_buffer, tail_page, tail, length);
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001299
Steven Rostedt78d904b2009-02-05 18:43:07 -05001300 if (likely(lock_taken))
1301 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001302 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001303 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001304}
1305
Steven Rostedt6634ff22009-05-06 15:30:07 -04001306static struct ring_buffer_event *
1307__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1308 unsigned type, unsigned long length, u64 *ts)
1309{
1310 struct buffer_page *tail_page, *commit_page;
1311 struct ring_buffer_event *event;
1312 unsigned long tail, write;
1313
1314 commit_page = cpu_buffer->commit_page;
1315 /* we just need to protect against interrupts */
1316 barrier();
1317 tail_page = cpu_buffer->tail_page;
1318 write = local_add_return(length, &tail_page->write);
1319 tail = write - length;
1320
1321 /* See if we shot pass the end of this buffer page */
1322 if (write > BUF_PAGE_SIZE)
1323 return rb_move_tail(cpu_buffer, length, tail,
1324 commit_page, tail_page, ts);
1325
1326 /* We reserved something on the buffer */
1327
Steven Rostedt6634ff22009-05-06 15:30:07 -04001328 event = __rb_page_index(tail_page, tail);
Vegard Nossum1744a212009-02-28 08:29:44 +01001329 kmemcheck_annotate_bitfield(event, bitfield);
Steven Rostedt6634ff22009-05-06 15:30:07 -04001330 rb_update_event(event, type, length);
1331
1332 /* The passed in type is zero for DATA */
1333 if (likely(!type))
1334 local_inc(&tail_page->entries);
1335
1336 /*
Steven Rostedtfa743952009-06-16 12:37:57 -04001337 * If this is the first commit on the page, then update
1338 * its timestamp.
Steven Rostedt6634ff22009-05-06 15:30:07 -04001339 */
Steven Rostedtfa743952009-06-16 12:37:57 -04001340 if (!tail)
1341 tail_page->page->time_stamp = *ts;
Steven Rostedt6634ff22009-05-06 15:30:07 -04001342
1343 return event;
1344}
1345
Steven Rostedtedd813bf2009-06-02 23:00:53 -04001346static inline int
1347rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1348 struct ring_buffer_event *event)
1349{
1350 unsigned long new_index, old_index;
1351 struct buffer_page *bpage;
1352 unsigned long index;
1353 unsigned long addr;
1354
1355 new_index = rb_event_index(event);
1356 old_index = new_index + rb_event_length(event);
1357 addr = (unsigned long)event;
1358 addr &= PAGE_MASK;
1359
1360 bpage = cpu_buffer->tail_page;
1361
1362 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1363 /*
1364 * This is on the tail page. It is possible that
1365 * a write could come in and move the tail page
1366 * and write to the next page. That is fine
1367 * because we just shorten what is on this page.
1368 */
1369 index = local_cmpxchg(&bpage->write, old_index, new_index);
1370 if (index == old_index)
1371 return 1;
1372 }
1373
1374 /* could not discard */
1375 return 0;
1376}
1377
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001378static int
1379rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1380 u64 *ts, u64 *delta)
1381{
1382 struct ring_buffer_event *event;
1383 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001384 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001385
1386 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1387 printk(KERN_WARNING "Delta way too big! %llu"
1388 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +11001389 (unsigned long long)*delta,
1390 (unsigned long long)*ts,
1391 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001392 WARN_ON(1);
1393 }
1394
1395 /*
1396 * The delta is too big, we to add a
1397 * new timestamp.
1398 */
1399 event = __rb_reserve_next(cpu_buffer,
1400 RINGBUF_TYPE_TIME_EXTEND,
1401 RB_LEN_TIME_EXTEND,
1402 ts);
1403 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001404 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001405
Steven Rostedtbf41a152008-10-04 02:00:59 -04001406 if (PTR_ERR(event) == -EAGAIN)
1407 return -EAGAIN;
1408
1409 /* Only a commited time event can update the write stamp */
Steven Rostedtfa743952009-06-16 12:37:57 -04001410 if (rb_event_is_commit(cpu_buffer, event)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001411 /*
Steven Rostedtfa743952009-06-16 12:37:57 -04001412 * If this is the first on the page, then it was
1413 * updated with the page itself. Try to discard it
1414 * and if we can't just make it zero.
Steven Rostedtbf41a152008-10-04 02:00:59 -04001415 */
1416 if (rb_event_index(event)) {
1417 event->time_delta = *delta & TS_MASK;
1418 event->array[0] = *delta >> TS_SHIFT;
1419 } else {
Steven Rostedtea05b572009-06-03 09:30:10 -04001420 /* try to discard, since we do not need this */
1421 if (!rb_try_to_discard(cpu_buffer, event)) {
1422 /* nope, just zero it */
1423 event->time_delta = 0;
1424 event->array[0] = 0;
1425 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001426 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001427 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001428 /* let the caller know this was the commit */
1429 ret = 1;
1430 } else {
Steven Rostedtedd813bf2009-06-02 23:00:53 -04001431 /* Try to discard the event */
1432 if (!rb_try_to_discard(cpu_buffer, event)) {
1433 /* Darn, this is just wasted space */
1434 event->time_delta = 0;
1435 event->array[0] = 0;
Steven Rostedtedd813bf2009-06-02 23:00:53 -04001436 }
Steven Rostedtf57a8a12009-06-05 14:11:30 -04001437 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001438 }
1439
Steven Rostedtbf41a152008-10-04 02:00:59 -04001440 *delta = 0;
1441
1442 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001443}
1444
Steven Rostedtfa743952009-06-16 12:37:57 -04001445static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
1446{
1447 local_inc(&cpu_buffer->committing);
1448 local_inc(&cpu_buffer->commits);
1449}
1450
1451static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
1452{
1453 unsigned long commits;
1454
1455 if (RB_WARN_ON(cpu_buffer,
1456 !local_read(&cpu_buffer->committing)))
1457 return;
1458
1459 again:
1460 commits = local_read(&cpu_buffer->commits);
1461 /* synchronize with interrupts */
1462 barrier();
1463 if (local_read(&cpu_buffer->committing) == 1)
1464 rb_set_commit_to_write(cpu_buffer);
1465
1466 local_dec(&cpu_buffer->committing);
1467
1468 /* synchronize with interrupts */
1469 barrier();
1470
1471 /*
1472 * Need to account for interrupts coming in between the
1473 * updating of the commit page and the clearing of the
1474 * committing counter.
1475 */
1476 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
1477 !local_read(&cpu_buffer->committing)) {
1478 local_inc(&cpu_buffer->committing);
1479 goto again;
1480 }
1481}
1482
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001483static struct ring_buffer_event *
1484rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt1cd8d732009-05-11 14:08:09 -04001485 unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001486{
1487 struct ring_buffer_event *event;
Steven Rostedt168b6b12009-05-11 22:11:05 -04001488 u64 ts, delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001489 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001490 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001491
Steven Rostedtfa743952009-06-16 12:37:57 -04001492 rb_start_commit(cpu_buffer);
1493
Steven Rostedtbe957c42009-05-11 14:42:53 -04001494 length = rb_calculate_event_length(length);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001495 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001496 /*
1497 * We allow for interrupts to reenter here and do a trace.
1498 * If one does, it will cause this original code to loop
1499 * back here. Even with heavy interrupts happening, this
1500 * should only happen a few times in a row. If this happens
1501 * 1000 times in a row, there must be either an interrupt
1502 * storm or we have something buggy.
1503 * Bail!
1504 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001505 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
Steven Rostedtfa743952009-06-16 12:37:57 -04001506 goto out_fail;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001507
Steven Rostedt88eb0122009-05-11 16:28:23 -04001508 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001509
Steven Rostedtbf41a152008-10-04 02:00:59 -04001510 /*
1511 * Only the first commit can update the timestamp.
1512 * Yes there is a race here. If an interrupt comes in
1513 * just after the conditional and it traces too, then it
1514 * will also check the deltas. More than one timestamp may
1515 * also be made. But only the entry that did the actual
1516 * commit will be something other than zero.
1517 */
Steven Rostedt0f0c85f2009-05-11 16:08:00 -04001518 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1519 rb_page_write(cpu_buffer->tail_page) ==
1520 rb_commit_index(cpu_buffer))) {
Steven Rostedt168b6b12009-05-11 22:11:05 -04001521 u64 diff;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001522
Steven Rostedt168b6b12009-05-11 22:11:05 -04001523 diff = ts - cpu_buffer->write_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001524
Steven Rostedt168b6b12009-05-11 22:11:05 -04001525 /* make sure this diff is calculated here */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001526 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001527
Steven Rostedtbf41a152008-10-04 02:00:59 -04001528 /* Did the write stamp get updated already? */
1529 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt168b6b12009-05-11 22:11:05 -04001530 goto get_event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001531
Steven Rostedt168b6b12009-05-11 22:11:05 -04001532 delta = diff;
1533 if (unlikely(test_time_stamp(delta))) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001534
1535 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001536 if (commit == -EBUSY)
Steven Rostedtfa743952009-06-16 12:37:57 -04001537 goto out_fail;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001538
1539 if (commit == -EAGAIN)
1540 goto again;
1541
1542 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001543 }
Steven Rostedt168b6b12009-05-11 22:11:05 -04001544 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001545
Steven Rostedt168b6b12009-05-11 22:11:05 -04001546 get_event:
Steven Rostedt1cd8d732009-05-11 14:08:09 -04001547 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
Steven Rostedt168b6b12009-05-11 22:11:05 -04001548 if (unlikely(PTR_ERR(event) == -EAGAIN))
Steven Rostedtbf41a152008-10-04 02:00:59 -04001549 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001550
Steven Rostedtfa743952009-06-16 12:37:57 -04001551 if (!event)
1552 goto out_fail;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001553
Steven Rostedtfa743952009-06-16 12:37:57 -04001554 if (!rb_event_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001555 delta = 0;
1556
1557 event->time_delta = delta;
1558
1559 return event;
Steven Rostedtfa743952009-06-16 12:37:57 -04001560
1561 out_fail:
1562 rb_end_commit(cpu_buffer);
1563 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001564}
1565
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001566#define TRACE_RECURSIVE_DEPTH 16
Steven Rostedt261842b2009-04-16 21:41:52 -04001567
1568static int trace_recursive_lock(void)
1569{
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001570 current->trace_recursion++;
Steven Rostedt261842b2009-04-16 21:41:52 -04001571
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001572 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1573 return 0;
Steven Rostedt261842b2009-04-16 21:41:52 -04001574
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001575 /* Disable all tracing before we do anything else */
1576 tracing_off_permanent();
Frederic Weisbeckere057a5e2009-04-19 23:38:12 +02001577
Steven Rostedt7d7d2b82009-04-27 12:37:49 -04001578 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001579 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1580 current->trace_recursion,
1581 hardirq_count() >> HARDIRQ_SHIFT,
1582 softirq_count() >> SOFTIRQ_SHIFT,
1583 in_nmi());
Frederic Weisbeckere057a5e2009-04-19 23:38:12 +02001584
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001585 WARN_ON_ONCE(1);
1586 return -1;
Steven Rostedt261842b2009-04-16 21:41:52 -04001587}
1588
1589static void trace_recursive_unlock(void)
1590{
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001591 WARN_ON_ONCE(!current->trace_recursion);
Steven Rostedt261842b2009-04-16 21:41:52 -04001592
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001593 current->trace_recursion--;
Steven Rostedt261842b2009-04-16 21:41:52 -04001594}
1595
Steven Rostedtbf41a152008-10-04 02:00:59 -04001596static DEFINE_PER_CPU(int, rb_need_resched);
1597
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001598/**
1599 * ring_buffer_lock_reserve - reserve a part of the buffer
1600 * @buffer: the ring buffer to reserve from
1601 * @length: the length of the data to reserve (excluding event header)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001602 *
1603 * Returns a reseverd event on the ring buffer to copy directly to.
1604 * The user of this interface will need to get the body to write into
1605 * and can use the ring_buffer_event_data() interface.
1606 *
1607 * The length is the length of the data needed, not the event length
1608 * which also includes the event header.
1609 *
1610 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1611 * If NULL is returned, then nothing has been allocated or locked.
1612 */
1613struct ring_buffer_event *
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001614ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001615{
1616 struct ring_buffer_per_cpu *cpu_buffer;
1617 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001618 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001619
Steven Rostedt033601a2008-11-21 12:41:55 -05001620 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001621 return NULL;
1622
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001623 if (atomic_read(&buffer->record_disabled))
1624 return NULL;
1625
Steven Rostedtbf41a152008-10-04 02:00:59 -04001626 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001627 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001628
Steven Rostedt261842b2009-04-16 21:41:52 -04001629 if (trace_recursive_lock())
1630 goto out_nocheck;
1631
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001632 cpu = raw_smp_processor_id();
1633
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301634 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001635 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001636
1637 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001638
1639 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001640 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001641
Steven Rostedtbe957c42009-05-11 14:42:53 -04001642 if (length > BUF_MAX_DATA_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001643 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001644
Steven Rostedt1cd8d732009-05-11 14:08:09 -04001645 event = rb_reserve_next_event(cpu_buffer, length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001646 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001647 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001648
Steven Rostedtbf41a152008-10-04 02:00:59 -04001649 /*
1650 * Need to store resched state on this cpu.
1651 * Only the first needs to.
1652 */
1653
1654 if (preempt_count() == 1)
1655 per_cpu(rb_need_resched, cpu) = resched;
1656
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001657 return event;
1658
Steven Rostedtd7690412008-10-01 00:29:53 -04001659 out:
Steven Rostedt261842b2009-04-16 21:41:52 -04001660 trace_recursive_unlock();
1661
1662 out_nocheck:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001663 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001664 return NULL;
1665}
Robert Richterc4f50182008-12-11 16:49:22 +01001666EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001667
1668static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1669 struct ring_buffer_event *event)
1670{
Steven Rostedte4906ef2009-04-30 20:49:44 -04001671 local_inc(&cpu_buffer->entries);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001672
Steven Rostedtfa743952009-06-16 12:37:57 -04001673 /*
1674 * The event first in the commit queue updates the
1675 * time stamp.
1676 */
1677 if (rb_event_is_commit(cpu_buffer, event))
1678 cpu_buffer->write_stamp += event->time_delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001679
Steven Rostedtfa743952009-06-16 12:37:57 -04001680 rb_end_commit(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001681}
1682
1683/**
1684 * ring_buffer_unlock_commit - commit a reserved
1685 * @buffer: The buffer to commit to
1686 * @event: The event pointer to commit.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001687 *
1688 * This commits the data to the ring buffer, and releases any locks held.
1689 *
1690 * Must be paired with ring_buffer_lock_reserve.
1691 */
1692int ring_buffer_unlock_commit(struct ring_buffer *buffer,
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001693 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001694{
1695 struct ring_buffer_per_cpu *cpu_buffer;
1696 int cpu = raw_smp_processor_id();
1697
1698 cpu_buffer = buffer->buffers[cpu];
1699
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001700 rb_commit(cpu_buffer, event);
1701
Steven Rostedt261842b2009-04-16 21:41:52 -04001702 trace_recursive_unlock();
1703
Steven Rostedtbf41a152008-10-04 02:00:59 -04001704 /*
1705 * Only the last preempt count needs to restore preemption.
1706 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001707 if (preempt_count() == 1)
1708 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1709 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04001710 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001711
1712 return 0;
1713}
Robert Richterc4f50182008-12-11 16:49:22 +01001714EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001715
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001716static inline void rb_event_discard(struct ring_buffer_event *event)
1717{
Lai Jiangshan334d4162009-04-24 11:27:05 +08001718 /* array[0] holds the actual length for the discarded event */
1719 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1720 event->type_len = RINGBUF_TYPE_PADDING;
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001721 /* time delta must be non zero */
1722 if (!event->time_delta)
1723 event->time_delta = 1;
1724}
1725
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001726/**
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001727 * ring_buffer_event_discard - discard any event in the ring buffer
1728 * @event: the event to discard
1729 *
1730 * Sometimes a event that is in the ring buffer needs to be ignored.
1731 * This function lets the user discard an event in the ring buffer
1732 * and then that event will not be read later.
1733 *
1734 * Note, it is up to the user to be careful with this, and protect
1735 * against races. If the user discards an event that has been consumed
1736 * it is possible that it could corrupt the ring buffer.
1737 */
1738void ring_buffer_event_discard(struct ring_buffer_event *event)
1739{
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001740 rb_event_discard(event);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001741}
1742EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1743
1744/**
1745 * ring_buffer_commit_discard - discard an event that has not been committed
1746 * @buffer: the ring buffer
1747 * @event: non committed event to discard
1748 *
1749 * This is similar to ring_buffer_event_discard but must only be
1750 * performed on an event that has not been committed yet. The difference
1751 * is that this will also try to free the event from the ring buffer
1752 * if another event has not been added behind it.
1753 *
1754 * If another event has been added behind it, it will set the event
1755 * up as discarded, and perform the commit.
1756 *
1757 * If this function is called, do not call ring_buffer_unlock_commit on
1758 * the event.
1759 */
1760void ring_buffer_discard_commit(struct ring_buffer *buffer,
1761 struct ring_buffer_event *event)
1762{
1763 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001764 int cpu;
1765
1766 /* The event is discarded regardless */
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001767 rb_event_discard(event);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001768
Steven Rostedtfa743952009-06-16 12:37:57 -04001769 cpu = smp_processor_id();
1770 cpu_buffer = buffer->buffers[cpu];
1771
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001772 /*
1773 * This must only be called if the event has not been
1774 * committed yet. Thus we can assume that preemption
1775 * is still disabled.
1776 */
Steven Rostedtfa743952009-06-16 12:37:57 -04001777 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001778
Steven Rostedtedd813bf2009-06-02 23:00:53 -04001779 if (!rb_try_to_discard(cpu_buffer, event))
1780 goto out;
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001781
1782 /*
1783 * The commit is still visible by the reader, so we
1784 * must increment entries.
1785 */
Steven Rostedte4906ef2009-04-30 20:49:44 -04001786 local_inc(&cpu_buffer->entries);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001787 out:
Steven Rostedtfa743952009-06-16 12:37:57 -04001788 rb_end_commit(cpu_buffer);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001789
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001790 trace_recursive_unlock();
1791
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001792 /*
1793 * Only the last preempt count needs to restore preemption.
1794 */
1795 if (preempt_count() == 1)
1796 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1797 else
1798 preempt_enable_no_resched_notrace();
1799
1800}
1801EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1802
1803/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001804 * ring_buffer_write - write data to the buffer without reserving
1805 * @buffer: The ring buffer to write to.
1806 * @length: The length of the data being written (excluding the event header)
1807 * @data: The data to write to the buffer.
1808 *
1809 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1810 * one function. If you already have the data to write to the buffer, it
1811 * may be easier to simply call this function.
1812 *
1813 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1814 * and not the length of the event which would hold the header.
1815 */
1816int ring_buffer_write(struct ring_buffer *buffer,
1817 unsigned long length,
1818 void *data)
1819{
1820 struct ring_buffer_per_cpu *cpu_buffer;
1821 struct ring_buffer_event *event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001822 void *body;
1823 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001824 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001825
Steven Rostedt033601a2008-11-21 12:41:55 -05001826 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001827 return -EBUSY;
1828
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001829 if (atomic_read(&buffer->record_disabled))
1830 return -EBUSY;
1831
Steven Rostedt182e9f52008-11-03 23:15:56 -05001832 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001833
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001834 cpu = raw_smp_processor_id();
1835
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301836 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001837 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001838
1839 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001840
1841 if (atomic_read(&cpu_buffer->record_disabled))
1842 goto out;
1843
Steven Rostedtbe957c42009-05-11 14:42:53 -04001844 if (length > BUF_MAX_DATA_SIZE)
1845 goto out;
1846
1847 event = rb_reserve_next_event(cpu_buffer, length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001848 if (!event)
1849 goto out;
1850
1851 body = rb_event_data(event);
1852
1853 memcpy(body, data, length);
1854
1855 rb_commit(cpu_buffer, event);
1856
1857 ret = 0;
1858 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001859 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001860
1861 return ret;
1862}
Robert Richterc4f50182008-12-11 16:49:22 +01001863EXPORT_SYMBOL_GPL(ring_buffer_write);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001864
Andrew Morton34a148b2009-01-09 12:27:09 -08001865static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001866{
1867 struct buffer_page *reader = cpu_buffer->reader_page;
1868 struct buffer_page *head = cpu_buffer->head_page;
1869 struct buffer_page *commit = cpu_buffer->commit_page;
1870
1871 return reader->read == rb_page_commit(reader) &&
1872 (commit == reader ||
1873 (commit == head &&
1874 head->read == rb_page_commit(commit)));
1875}
1876
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001877/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001878 * ring_buffer_record_disable - stop all writes into the buffer
1879 * @buffer: The ring buffer to stop writes to.
1880 *
1881 * This prevents all writes to the buffer. Any attempt to write
1882 * to the buffer after this will fail and return NULL.
1883 *
1884 * The caller should call synchronize_sched() after this.
1885 */
1886void ring_buffer_record_disable(struct ring_buffer *buffer)
1887{
1888 atomic_inc(&buffer->record_disabled);
1889}
Robert Richterc4f50182008-12-11 16:49:22 +01001890EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001891
1892/**
1893 * ring_buffer_record_enable - enable writes to the buffer
1894 * @buffer: The ring buffer to enable writes
1895 *
1896 * Note, multiple disables will need the same number of enables
1897 * to truely enable the writing (much like preempt_disable).
1898 */
1899void ring_buffer_record_enable(struct ring_buffer *buffer)
1900{
1901 atomic_dec(&buffer->record_disabled);
1902}
Robert Richterc4f50182008-12-11 16:49:22 +01001903EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001904
1905/**
1906 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1907 * @buffer: The ring buffer to stop writes to.
1908 * @cpu: The CPU buffer to stop
1909 *
1910 * This prevents all writes to the buffer. Any attempt to write
1911 * to the buffer after this will fail and return NULL.
1912 *
1913 * The caller should call synchronize_sched() after this.
1914 */
1915void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1916{
1917 struct ring_buffer_per_cpu *cpu_buffer;
1918
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301919 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001920 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001921
1922 cpu_buffer = buffer->buffers[cpu];
1923 atomic_inc(&cpu_buffer->record_disabled);
1924}
Robert Richterc4f50182008-12-11 16:49:22 +01001925EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001926
1927/**
1928 * ring_buffer_record_enable_cpu - enable writes to the buffer
1929 * @buffer: The ring buffer to enable writes
1930 * @cpu: The CPU to enable.
1931 *
1932 * Note, multiple disables will need the same number of enables
1933 * to truely enable the writing (much like preempt_disable).
1934 */
1935void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1936{
1937 struct ring_buffer_per_cpu *cpu_buffer;
1938
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301939 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001940 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001941
1942 cpu_buffer = buffer->buffers[cpu];
1943 atomic_dec(&cpu_buffer->record_disabled);
1944}
Robert Richterc4f50182008-12-11 16:49:22 +01001945EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001946
1947/**
1948 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1949 * @buffer: The ring buffer
1950 * @cpu: The per CPU buffer to get the entries from.
1951 */
1952unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1953{
1954 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001955 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001956
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301957 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001958 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001959
1960 cpu_buffer = buffer->buffers[cpu];
Steven Rostedte4906ef2009-04-30 20:49:44 -04001961 ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1962 - cpu_buffer->read;
Steven Rostedt554f7862009-03-11 22:00:13 -04001963
1964 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001965}
Robert Richterc4f50182008-12-11 16:49:22 +01001966EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001967
1968/**
1969 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1970 * @buffer: The ring buffer
1971 * @cpu: The per CPU buffer to get the number of overruns from
1972 */
1973unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1974{
1975 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001976 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001977
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301978 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001979 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001980
1981 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04001982 ret = cpu_buffer->overrun;
Steven Rostedt554f7862009-03-11 22:00:13 -04001983
1984 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001985}
Robert Richterc4f50182008-12-11 16:49:22 +01001986EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001987
1988/**
Steven Rostedtf0d2c682009-04-29 13:43:37 -04001989 * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
1990 * @buffer: The ring buffer
1991 * @cpu: The per CPU buffer to get the number of overruns from
1992 */
1993unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
1994{
1995 struct ring_buffer_per_cpu *cpu_buffer;
1996 unsigned long ret;
1997
1998 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1999 return 0;
2000
2001 cpu_buffer = buffer->buffers[cpu];
2002 ret = cpu_buffer->nmi_dropped;
2003
2004 return ret;
2005}
2006EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
2007
2008/**
2009 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2010 * @buffer: The ring buffer
2011 * @cpu: The per CPU buffer to get the number of overruns from
2012 */
2013unsigned long
2014ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2015{
2016 struct ring_buffer_per_cpu *cpu_buffer;
2017 unsigned long ret;
2018
2019 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2020 return 0;
2021
2022 cpu_buffer = buffer->buffers[cpu];
2023 ret = cpu_buffer->commit_overrun;
2024
2025 return ret;
2026}
2027EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2028
2029/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002030 * ring_buffer_entries - get the number of entries in a buffer
2031 * @buffer: The ring buffer
2032 *
2033 * Returns the total number of entries in the ring buffer
2034 * (all CPU entries)
2035 */
2036unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2037{
2038 struct ring_buffer_per_cpu *cpu_buffer;
2039 unsigned long entries = 0;
2040 int cpu;
2041
2042 /* if you care about this being correct, lock the buffer */
2043 for_each_buffer_cpu(buffer, cpu) {
2044 cpu_buffer = buffer->buffers[cpu];
Steven Rostedte4906ef2009-04-30 20:49:44 -04002045 entries += (local_read(&cpu_buffer->entries) -
2046 cpu_buffer->overrun) - cpu_buffer->read;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002047 }
2048
2049 return entries;
2050}
Robert Richterc4f50182008-12-11 16:49:22 +01002051EXPORT_SYMBOL_GPL(ring_buffer_entries);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002052
2053/**
2054 * ring_buffer_overrun_cpu - get the number of overruns in buffer
2055 * @buffer: The ring buffer
2056 *
2057 * Returns the total number of overruns in the ring buffer
2058 * (all CPU entries)
2059 */
2060unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2061{
2062 struct ring_buffer_per_cpu *cpu_buffer;
2063 unsigned long overruns = 0;
2064 int cpu;
2065
2066 /* if you care about this being correct, lock the buffer */
2067 for_each_buffer_cpu(buffer, cpu) {
2068 cpu_buffer = buffer->buffers[cpu];
2069 overruns += cpu_buffer->overrun;
2070 }
2071
2072 return overruns;
2073}
Robert Richterc4f50182008-12-11 16:49:22 +01002074EXPORT_SYMBOL_GPL(ring_buffer_overruns);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002075
Steven Rostedt642edba2008-11-12 00:01:26 -05002076static void rb_iter_reset(struct ring_buffer_iter *iter)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002077{
2078 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2079
Steven Rostedtd7690412008-10-01 00:29:53 -04002080 /* Iterator usage is expected to have record disabled */
2081 if (list_empty(&cpu_buffer->reader_page->list)) {
2082 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002083 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04002084 } else {
2085 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002086 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04002087 }
2088 if (iter->head)
2089 iter->read_stamp = cpu_buffer->read_stamp;
2090 else
Steven Rostedtabc9b562008-12-02 15:34:06 -05002091 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt642edba2008-11-12 00:01:26 -05002092}
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002093
Steven Rostedt642edba2008-11-12 00:01:26 -05002094/**
2095 * ring_buffer_iter_reset - reset an iterator
2096 * @iter: The iterator to reset
2097 *
2098 * Resets the iterator, so that it will start from the beginning
2099 * again.
2100 */
2101void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2102{
Steven Rostedt554f7862009-03-11 22:00:13 -04002103 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt642edba2008-11-12 00:01:26 -05002104 unsigned long flags;
2105
Steven Rostedt554f7862009-03-11 22:00:13 -04002106 if (!iter)
2107 return;
2108
2109 cpu_buffer = iter->cpu_buffer;
2110
Steven Rostedt642edba2008-11-12 00:01:26 -05002111 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2112 rb_iter_reset(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002113 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002114}
Robert Richterc4f50182008-12-11 16:49:22 +01002115EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002116
2117/**
2118 * ring_buffer_iter_empty - check if an iterator has no more to read
2119 * @iter: The iterator to check
2120 */
2121int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2122{
2123 struct ring_buffer_per_cpu *cpu_buffer;
2124
2125 cpu_buffer = iter->cpu_buffer;
2126
Steven Rostedtbf41a152008-10-04 02:00:59 -04002127 return iter->head_page == cpu_buffer->commit_page &&
2128 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002129}
Robert Richterc4f50182008-12-11 16:49:22 +01002130EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002131
2132static void
2133rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2134 struct ring_buffer_event *event)
2135{
2136 u64 delta;
2137
Lai Jiangshan334d4162009-04-24 11:27:05 +08002138 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002139 case RINGBUF_TYPE_PADDING:
2140 return;
2141
2142 case RINGBUF_TYPE_TIME_EXTEND:
2143 delta = event->array[0];
2144 delta <<= TS_SHIFT;
2145 delta += event->time_delta;
2146 cpu_buffer->read_stamp += delta;
2147 return;
2148
2149 case RINGBUF_TYPE_TIME_STAMP:
2150 /* FIXME: not implemented */
2151 return;
2152
2153 case RINGBUF_TYPE_DATA:
2154 cpu_buffer->read_stamp += event->time_delta;
2155 return;
2156
2157 default:
2158 BUG();
2159 }
2160 return;
2161}
2162
2163static void
2164rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2165 struct ring_buffer_event *event)
2166{
2167 u64 delta;
2168
Lai Jiangshan334d4162009-04-24 11:27:05 +08002169 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002170 case RINGBUF_TYPE_PADDING:
2171 return;
2172
2173 case RINGBUF_TYPE_TIME_EXTEND:
2174 delta = event->array[0];
2175 delta <<= TS_SHIFT;
2176 delta += event->time_delta;
2177 iter->read_stamp += delta;
2178 return;
2179
2180 case RINGBUF_TYPE_TIME_STAMP:
2181 /* FIXME: not implemented */
2182 return;
2183
2184 case RINGBUF_TYPE_DATA:
2185 iter->read_stamp += event->time_delta;
2186 return;
2187
2188 default:
2189 BUG();
2190 }
2191 return;
2192}
2193
Steven Rostedtd7690412008-10-01 00:29:53 -04002194static struct buffer_page *
2195rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002196{
Steven Rostedtd7690412008-10-01 00:29:53 -04002197 struct buffer_page *reader = NULL;
2198 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002199 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002200
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002201 local_irq_save(flags);
2202 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04002203
2204 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002205 /*
2206 * This should normally only loop twice. But because the
2207 * start of the reader inserts an empty page, it causes
2208 * a case where we will loop three times. There should be no
2209 * reason to loop four times (that I know of).
2210 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002211 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002212 reader = NULL;
2213 goto out;
2214 }
2215
Steven Rostedtd7690412008-10-01 00:29:53 -04002216 reader = cpu_buffer->reader_page;
2217
2218 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002219 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04002220 goto out;
2221
2222 /* Never should we have an index greater than the size */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002223 if (RB_WARN_ON(cpu_buffer,
2224 cpu_buffer->reader_page->read > rb_page_size(reader)))
2225 goto out;
Steven Rostedtd7690412008-10-01 00:29:53 -04002226
2227 /* check if we caught up to the tail */
2228 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002229 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04002230 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002231
2232 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04002233 * Splice the empty reader page into the list around the head.
2234 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002235 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002236
Steven Rostedtd7690412008-10-01 00:29:53 -04002237 reader = cpu_buffer->head_page;
2238 cpu_buffer->reader_page->list.next = reader->list.next;
2239 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002240
2241 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04002242 local_set(&cpu_buffer->reader_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002243 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04002244
2245 /* Make the reader page now replace the head */
2246 reader->list.prev->next = &cpu_buffer->reader_page->list;
2247 reader->list.next->prev = &cpu_buffer->reader_page->list;
2248
2249 /*
2250 * If the tail is on the reader, then we must set the head
2251 * to the inserted page, otherwise we set it one before.
2252 */
2253 cpu_buffer->head_page = cpu_buffer->reader_page;
2254
Steven Rostedtbf41a152008-10-04 02:00:59 -04002255 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04002256 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2257
2258 /* Finally update the reader page to the new head */
2259 cpu_buffer->reader_page = reader;
2260 rb_reset_reader_page(cpu_buffer);
2261
2262 goto again;
2263
2264 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002265 __raw_spin_unlock(&cpu_buffer->lock);
2266 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04002267
2268 return reader;
2269}
2270
2271static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2272{
2273 struct ring_buffer_event *event;
2274 struct buffer_page *reader;
2275 unsigned length;
2276
2277 reader = rb_get_reader_page(cpu_buffer);
2278
2279 /* This function should not be called when buffer is empty */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002280 if (RB_WARN_ON(cpu_buffer, !reader))
2281 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002282
2283 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002284
Lai Jiangshan334d4162009-04-24 11:27:05 +08002285 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2286 || rb_discarded_event(event))
Steven Rostedte4906ef2009-04-30 20:49:44 -04002287 cpu_buffer->read++;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002288
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002289 rb_update_read_stamp(cpu_buffer, event);
2290
Steven Rostedtd7690412008-10-01 00:29:53 -04002291 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002292 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002293}
2294
2295static void rb_advance_iter(struct ring_buffer_iter *iter)
2296{
2297 struct ring_buffer *buffer;
2298 struct ring_buffer_per_cpu *cpu_buffer;
2299 struct ring_buffer_event *event;
2300 unsigned length;
2301
2302 cpu_buffer = iter->cpu_buffer;
2303 buffer = cpu_buffer->buffer;
2304
2305 /*
2306 * Check if we are at the end of the buffer.
2307 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002308 if (iter->head >= rb_page_size(iter->head_page)) {
Steven Rostedtea05b572009-06-03 09:30:10 -04002309 /* discarded commits can make the page empty */
2310 if (iter->head_page == cpu_buffer->commit_page)
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002311 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002312 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002313 return;
2314 }
2315
2316 event = rb_iter_head_event(iter);
2317
2318 length = rb_event_length(event);
2319
2320 /*
2321 * This should not be called to advance the header if we are
2322 * at the tail of the buffer.
2323 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002324 if (RB_WARN_ON(cpu_buffer,
Steven Rostedtf536aaf2008-11-10 23:07:30 -05002325 (iter->head_page == cpu_buffer->commit_page) &&
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002326 (iter->head + length > rb_commit_index(cpu_buffer))))
2327 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002328
2329 rb_update_iter_read_stamp(iter, event);
2330
2331 iter->head += length;
2332
2333 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002334 if ((iter->head >= rb_page_size(iter->head_page)) &&
2335 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002336 rb_advance_iter(iter);
2337}
2338
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002339static struct ring_buffer_event *
2340rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002341{
2342 struct ring_buffer_per_cpu *cpu_buffer;
2343 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04002344 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002345 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002346
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002347 cpu_buffer = buffer->buffers[cpu];
2348
2349 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002350 /*
2351 * We repeat when a timestamp is encountered. It is possible
2352 * to get multiple timestamps from an interrupt entering just
Steven Rostedtea05b572009-06-03 09:30:10 -04002353 * as one timestamp is about to be written, or from discarded
2354 * commits. The most that we can have is the number on a single page.
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002355 */
Steven Rostedtea05b572009-06-03 09:30:10 -04002356 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002357 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002358
Steven Rostedtd7690412008-10-01 00:29:53 -04002359 reader = rb_get_reader_page(cpu_buffer);
2360 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002361 return NULL;
2362
Steven Rostedtd7690412008-10-01 00:29:53 -04002363 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002364
Lai Jiangshan334d4162009-04-24 11:27:05 +08002365 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002366 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05002367 if (rb_null_event(event))
2368 RB_WARN_ON(cpu_buffer, 1);
2369 /*
2370 * Because the writer could be discarding every
2371 * event it creates (which would probably be bad)
2372 * if we were to go back to "again" then we may never
2373 * catch up, and will trigger the warn on, or lock
2374 * the box. Return the padding, and we will release
2375 * the current locks, and try again.
2376 */
Steven Rostedtd7690412008-10-01 00:29:53 -04002377 rb_advance_reader(cpu_buffer);
Tom Zanussi2d622712009-03-22 03:30:49 -05002378 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002379
2380 case RINGBUF_TYPE_TIME_EXTEND:
2381 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04002382 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002383 goto again;
2384
2385 case RINGBUF_TYPE_TIME_STAMP:
2386 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04002387 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002388 goto again;
2389
2390 case RINGBUF_TYPE_DATA:
2391 if (ts) {
2392 *ts = cpu_buffer->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002393 ring_buffer_normalize_time_stamp(buffer,
2394 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002395 }
2396 return event;
2397
2398 default:
2399 BUG();
2400 }
2401
2402 return NULL;
2403}
Robert Richterc4f50182008-12-11 16:49:22 +01002404EXPORT_SYMBOL_GPL(ring_buffer_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002405
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002406static struct ring_buffer_event *
2407rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002408{
2409 struct ring_buffer *buffer;
2410 struct ring_buffer_per_cpu *cpu_buffer;
2411 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002412 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002413
2414 if (ring_buffer_iter_empty(iter))
2415 return NULL;
2416
2417 cpu_buffer = iter->cpu_buffer;
2418 buffer = cpu_buffer->buffer;
2419
2420 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002421 /*
Steven Rostedtea05b572009-06-03 09:30:10 -04002422 * We repeat when a timestamp is encountered.
2423 * We can get multiple timestamps by nested interrupts or also
2424 * if filtering is on (discarding commits). Since discarding
2425 * commits can be frequent we can get a lot of timestamps.
2426 * But we limit them by not adding timestamps if they begin
2427 * at the start of a page.
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002428 */
Steven Rostedtea05b572009-06-03 09:30:10 -04002429 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002430 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002431
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002432 if (rb_per_cpu_empty(cpu_buffer))
2433 return NULL;
2434
2435 event = rb_iter_head_event(iter);
2436
Lai Jiangshan334d4162009-04-24 11:27:05 +08002437 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002438 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05002439 if (rb_null_event(event)) {
2440 rb_inc_iter(iter);
2441 goto again;
2442 }
2443 rb_advance_iter(iter);
2444 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002445
2446 case RINGBUF_TYPE_TIME_EXTEND:
2447 /* Internal data, OK to advance */
2448 rb_advance_iter(iter);
2449 goto again;
2450
2451 case RINGBUF_TYPE_TIME_STAMP:
2452 /* FIXME: not implemented */
2453 rb_advance_iter(iter);
2454 goto again;
2455
2456 case RINGBUF_TYPE_DATA:
2457 if (ts) {
2458 *ts = iter->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002459 ring_buffer_normalize_time_stamp(buffer,
2460 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002461 }
2462 return event;
2463
2464 default:
2465 BUG();
2466 }
2467
2468 return NULL;
2469}
Robert Richterc4f50182008-12-11 16:49:22 +01002470EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002471
Steven Rostedt8d707e82009-06-16 21:22:48 -04002472static inline int rb_ok_to_lock(void)
2473{
2474 /*
2475 * If an NMI die dumps out the content of the ring buffer
2476 * do not grab locks. We also permanently disable the ring
2477 * buffer too. A one time deal is all you get from reading
2478 * the ring buffer from an NMI.
2479 */
2480 if (likely(!in_nmi() && !oops_in_progress))
2481 return 1;
2482
2483 tracing_off_permanent();
2484 return 0;
2485}
2486
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002487/**
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002488 * ring_buffer_peek - peek at the next event to be read
2489 * @buffer: The ring buffer to read
2490 * @cpu: The cpu to peak at
2491 * @ts: The timestamp counter of this event.
2492 *
2493 * This will return the event that will be read next, but does
2494 * not consume the data.
2495 */
2496struct ring_buffer_event *
2497ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2498{
2499 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8aabee52009-03-12 13:13:49 -04002500 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002501 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04002502 int dolock;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002503
Steven Rostedt554f7862009-03-11 22:00:13 -04002504 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002505 return NULL;
Steven Rostedt554f7862009-03-11 22:00:13 -04002506
Steven Rostedt8d707e82009-06-16 21:22:48 -04002507 dolock = rb_ok_to_lock();
Tom Zanussi2d622712009-03-22 03:30:49 -05002508 again:
Steven Rostedt8d707e82009-06-16 21:22:48 -04002509 local_irq_save(flags);
2510 if (dolock)
2511 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002512 event = rb_buffer_peek(buffer, cpu, ts);
Steven Rostedt8d707e82009-06-16 21:22:48 -04002513 if (dolock)
2514 spin_unlock(&cpu_buffer->reader_lock);
2515 local_irq_restore(flags);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002516
Lai Jiangshan334d4162009-04-24 11:27:05 +08002517 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002518 cpu_relax();
2519 goto again;
2520 }
2521
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002522 return event;
2523}
2524
2525/**
2526 * ring_buffer_iter_peek - peek at the next event to be read
2527 * @iter: The ring buffer iterator
2528 * @ts: The timestamp counter of this event.
2529 *
2530 * This will return the event that will be read next, but does
2531 * not increment the iterator.
2532 */
2533struct ring_buffer_event *
2534ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2535{
2536 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2537 struct ring_buffer_event *event;
2538 unsigned long flags;
2539
Tom Zanussi2d622712009-03-22 03:30:49 -05002540 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002541 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2542 event = rb_iter_peek(iter, ts);
2543 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2544
Lai Jiangshan334d4162009-04-24 11:27:05 +08002545 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002546 cpu_relax();
2547 goto again;
2548 }
2549
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002550 return event;
2551}
2552
2553/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002554 * ring_buffer_consume - return an event and consume it
2555 * @buffer: The ring buffer to get the next event from
2556 *
2557 * Returns the next event in the ring buffer, and that event is consumed.
2558 * Meaning, that sequential reads will keep returning a different event,
2559 * and eventually empty the ring buffer if the producer is slower.
2560 */
2561struct ring_buffer_event *
2562ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2563{
Steven Rostedt554f7862009-03-11 22:00:13 -04002564 struct ring_buffer_per_cpu *cpu_buffer;
2565 struct ring_buffer_event *event = NULL;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002566 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04002567 int dolock;
2568
2569 dolock = rb_ok_to_lock();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002570
Tom Zanussi2d622712009-03-22 03:30:49 -05002571 again:
Steven Rostedt554f7862009-03-11 22:00:13 -04002572 /* might be called in atomic */
2573 preempt_disable();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002574
Steven Rostedt554f7862009-03-11 22:00:13 -04002575 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2576 goto out;
2577
2578 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8d707e82009-06-16 21:22:48 -04002579 local_irq_save(flags);
2580 if (dolock)
2581 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002582
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002583 event = rb_buffer_peek(buffer, cpu, ts);
2584 if (!event)
Steven Rostedt554f7862009-03-11 22:00:13 -04002585 goto out_unlock;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002586
Steven Rostedtd7690412008-10-01 00:29:53 -04002587 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002588
Steven Rostedt554f7862009-03-11 22:00:13 -04002589 out_unlock:
Steven Rostedt8d707e82009-06-16 21:22:48 -04002590 if (dolock)
2591 spin_unlock(&cpu_buffer->reader_lock);
2592 local_irq_restore(flags);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002593
Steven Rostedt554f7862009-03-11 22:00:13 -04002594 out:
2595 preempt_enable();
2596
Lai Jiangshan334d4162009-04-24 11:27:05 +08002597 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002598 cpu_relax();
2599 goto again;
2600 }
2601
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002602 return event;
2603}
Robert Richterc4f50182008-12-11 16:49:22 +01002604EXPORT_SYMBOL_GPL(ring_buffer_consume);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002605
2606/**
2607 * ring_buffer_read_start - start a non consuming read of the buffer
2608 * @buffer: The ring buffer to read from
2609 * @cpu: The cpu buffer to iterate over
2610 *
2611 * This starts up an iteration through the buffer. It also disables
2612 * the recording to the buffer until the reading is finished.
2613 * This prevents the reading from being corrupted. This is not
2614 * a consuming read, so a producer is not expected.
2615 *
2616 * Must be paired with ring_buffer_finish.
2617 */
2618struct ring_buffer_iter *
2619ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2620{
2621 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002622 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04002623 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002624
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302625 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002626 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002627
2628 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2629 if (!iter)
Steven Rostedt8aabee52009-03-12 13:13:49 -04002630 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002631
2632 cpu_buffer = buffer->buffers[cpu];
2633
2634 iter->cpu_buffer = cpu_buffer;
2635
2636 atomic_inc(&cpu_buffer->record_disabled);
2637 synchronize_sched();
2638
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002639 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002640 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt642edba2008-11-12 00:01:26 -05002641 rb_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002642 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002643 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002644
2645 return iter;
2646}
Robert Richterc4f50182008-12-11 16:49:22 +01002647EXPORT_SYMBOL_GPL(ring_buffer_read_start);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002648
2649/**
2650 * ring_buffer_finish - finish reading the iterator of the buffer
2651 * @iter: The iterator retrieved by ring_buffer_start
2652 *
2653 * This re-enables the recording to the buffer, and frees the
2654 * iterator.
2655 */
2656void
2657ring_buffer_read_finish(struct ring_buffer_iter *iter)
2658{
2659 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2660
2661 atomic_dec(&cpu_buffer->record_disabled);
2662 kfree(iter);
2663}
Robert Richterc4f50182008-12-11 16:49:22 +01002664EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002665
2666/**
2667 * ring_buffer_read - read the next item in the ring buffer by the iterator
2668 * @iter: The ring buffer iterator
2669 * @ts: The time stamp of the event read.
2670 *
2671 * This reads the next event in the ring buffer and increments the iterator.
2672 */
2673struct ring_buffer_event *
2674ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2675{
2676 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002677 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2678 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002679
Tom Zanussi2d622712009-03-22 03:30:49 -05002680 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002681 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2682 event = rb_iter_peek(iter, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002683 if (!event)
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002684 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002685
2686 rb_advance_iter(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002687 out:
2688 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002689
Lai Jiangshan334d4162009-04-24 11:27:05 +08002690 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002691 cpu_relax();
2692 goto again;
2693 }
2694
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002695 return event;
2696}
Robert Richterc4f50182008-12-11 16:49:22 +01002697EXPORT_SYMBOL_GPL(ring_buffer_read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002698
2699/**
2700 * ring_buffer_size - return the size of the ring buffer (in bytes)
2701 * @buffer: The ring buffer.
2702 */
2703unsigned long ring_buffer_size(struct ring_buffer *buffer)
2704{
2705 return BUF_PAGE_SIZE * buffer->pages;
2706}
Robert Richterc4f50182008-12-11 16:49:22 +01002707EXPORT_SYMBOL_GPL(ring_buffer_size);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002708
2709static void
2710rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2711{
2712 cpu_buffer->head_page
2713 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002714 local_set(&cpu_buffer->head_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04002715 local_set(&cpu_buffer->head_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002716 local_set(&cpu_buffer->head_page->page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002717
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002718 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002719
2720 cpu_buffer->tail_page = cpu_buffer->head_page;
2721 cpu_buffer->commit_page = cpu_buffer->head_page;
2722
2723 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2724 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04002725 local_set(&cpu_buffer->reader_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002726 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002727 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002728
Steven Rostedtf0d2c682009-04-29 13:43:37 -04002729 cpu_buffer->nmi_dropped = 0;
2730 cpu_buffer->commit_overrun = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002731 cpu_buffer->overrun = 0;
Steven Rostedte4906ef2009-04-30 20:49:44 -04002732 cpu_buffer->read = 0;
2733 local_set(&cpu_buffer->entries, 0);
Steven Rostedtfa743952009-06-16 12:37:57 -04002734 local_set(&cpu_buffer->committing, 0);
2735 local_set(&cpu_buffer->commits, 0);
Steven Rostedt69507c02009-01-21 18:45:57 -05002736
2737 cpu_buffer->write_stamp = 0;
2738 cpu_buffer->read_stamp = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002739}
2740
2741/**
2742 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2743 * @buffer: The ring buffer to reset a per cpu buffer of
2744 * @cpu: The CPU buffer to be reset
2745 */
2746void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2747{
2748 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2749 unsigned long flags;
2750
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302751 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002752 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002753
Steven Rostedt41ede232009-05-01 20:26:54 -04002754 atomic_inc(&cpu_buffer->record_disabled);
2755
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002756 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2757
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002758 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002759
2760 rb_reset_cpu(cpu_buffer);
2761
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002762 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002763
2764 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt41ede232009-05-01 20:26:54 -04002765
2766 atomic_dec(&cpu_buffer->record_disabled);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002767}
Robert Richterc4f50182008-12-11 16:49:22 +01002768EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002769
2770/**
2771 * ring_buffer_reset - reset a ring buffer
2772 * @buffer: The ring buffer to reset all cpu buffers
2773 */
2774void ring_buffer_reset(struct ring_buffer *buffer)
2775{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002776 int cpu;
2777
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002778 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002779 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002780}
Robert Richterc4f50182008-12-11 16:49:22 +01002781EXPORT_SYMBOL_GPL(ring_buffer_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002782
2783/**
2784 * rind_buffer_empty - is the ring buffer empty?
2785 * @buffer: The ring buffer to test
2786 */
2787int ring_buffer_empty(struct ring_buffer *buffer)
2788{
2789 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtd4788202009-06-17 00:39:43 -04002790 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04002791 int dolock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002792 int cpu;
Steven Rostedtd4788202009-06-17 00:39:43 -04002793 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002794
Steven Rostedt8d707e82009-06-16 21:22:48 -04002795 dolock = rb_ok_to_lock();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002796
2797 /* yes this is racy, but if you don't like the race, lock the buffer */
2798 for_each_buffer_cpu(buffer, cpu) {
2799 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8d707e82009-06-16 21:22:48 -04002800 local_irq_save(flags);
2801 if (dolock)
2802 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedtd4788202009-06-17 00:39:43 -04002803 ret = rb_per_cpu_empty(cpu_buffer);
Steven Rostedt8d707e82009-06-16 21:22:48 -04002804 if (dolock)
2805 spin_unlock(&cpu_buffer->reader_lock);
2806 local_irq_restore(flags);
2807
Steven Rostedtd4788202009-06-17 00:39:43 -04002808 if (!ret)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002809 return 0;
2810 }
Steven Rostedt554f7862009-03-11 22:00:13 -04002811
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002812 return 1;
2813}
Robert Richterc4f50182008-12-11 16:49:22 +01002814EXPORT_SYMBOL_GPL(ring_buffer_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002815
2816/**
2817 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2818 * @buffer: The ring buffer
2819 * @cpu: The CPU buffer to test
2820 */
2821int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2822{
2823 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtd4788202009-06-17 00:39:43 -04002824 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04002825 int dolock;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002826 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002827
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302828 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002829 return 1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002830
Steven Rostedt8d707e82009-06-16 21:22:48 -04002831 dolock = rb_ok_to_lock();
Steven Rostedt554f7862009-03-11 22:00:13 -04002832
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002833 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8d707e82009-06-16 21:22:48 -04002834 local_irq_save(flags);
2835 if (dolock)
2836 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedt554f7862009-03-11 22:00:13 -04002837 ret = rb_per_cpu_empty(cpu_buffer);
Steven Rostedt8d707e82009-06-16 21:22:48 -04002838 if (dolock)
2839 spin_unlock(&cpu_buffer->reader_lock);
2840 local_irq_restore(flags);
Steven Rostedt554f7862009-03-11 22:00:13 -04002841
2842 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002843}
Robert Richterc4f50182008-12-11 16:49:22 +01002844EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002845
2846/**
2847 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2848 * @buffer_a: One buffer to swap with
2849 * @buffer_b: The other buffer to swap with
2850 *
2851 * This function is useful for tracers that want to take a "snapshot"
2852 * of a CPU buffer and has another back up buffer lying around.
2853 * it is expected that the tracer handles the cpu buffer not being
2854 * used at the moment.
2855 */
2856int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2857 struct ring_buffer *buffer_b, int cpu)
2858{
2859 struct ring_buffer_per_cpu *cpu_buffer_a;
2860 struct ring_buffer_per_cpu *cpu_buffer_b;
Steven Rostedt554f7862009-03-11 22:00:13 -04002861 int ret = -EINVAL;
2862
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302863 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2864 !cpumask_test_cpu(cpu, buffer_b->cpumask))
Steven Rostedt554f7862009-03-11 22:00:13 -04002865 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002866
2867 /* At least make sure the two buffers are somewhat the same */
Lai Jiangshan6d102bc2008-12-17 17:48:23 +08002868 if (buffer_a->pages != buffer_b->pages)
Steven Rostedt554f7862009-03-11 22:00:13 -04002869 goto out;
2870
2871 ret = -EAGAIN;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002872
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002873 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedt554f7862009-03-11 22:00:13 -04002874 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002875
2876 if (atomic_read(&buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002877 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002878
2879 if (atomic_read(&buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002880 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002881
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002882 cpu_buffer_a = buffer_a->buffers[cpu];
2883 cpu_buffer_b = buffer_b->buffers[cpu];
2884
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002885 if (atomic_read(&cpu_buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002886 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002887
2888 if (atomic_read(&cpu_buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002889 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002890
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002891 /*
2892 * We can't do a synchronize_sched here because this
2893 * function can be called in atomic context.
2894 * Normally this will be called from the same CPU as cpu.
2895 * If not it's up to the caller to protect this.
2896 */
2897 atomic_inc(&cpu_buffer_a->record_disabled);
2898 atomic_inc(&cpu_buffer_b->record_disabled);
2899
2900 buffer_a->buffers[cpu] = cpu_buffer_b;
2901 buffer_b->buffers[cpu] = cpu_buffer_a;
2902
2903 cpu_buffer_b->buffer = buffer_a;
2904 cpu_buffer_a->buffer = buffer_b;
2905
2906 atomic_dec(&cpu_buffer_a->record_disabled);
2907 atomic_dec(&cpu_buffer_b->record_disabled);
2908
Steven Rostedt554f7862009-03-11 22:00:13 -04002909 ret = 0;
2910out:
Steven Rostedt554f7862009-03-11 22:00:13 -04002911 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002912}
Robert Richterc4f50182008-12-11 16:49:22 +01002913EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002914
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002915/**
2916 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2917 * @buffer: the buffer to allocate for.
2918 *
2919 * This function is used in conjunction with ring_buffer_read_page.
2920 * When reading a full page from the ring buffer, these functions
2921 * can be used to speed up the process. The calling function should
2922 * allocate a few pages first with this function. Then when it
2923 * needs to get pages from the ring buffer, it passes the result
2924 * of this function into ring_buffer_read_page, which will swap
2925 * the page that was allocated, with the read page of the buffer.
2926 *
2927 * Returns:
2928 * The page allocated, or NULL on error.
2929 */
2930void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2931{
Steven Rostedt044fa782008-12-02 23:50:03 -05002932 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002933 unsigned long addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002934
2935 addr = __get_free_page(GFP_KERNEL);
2936 if (!addr)
2937 return NULL;
2938
Steven Rostedt044fa782008-12-02 23:50:03 -05002939 bpage = (void *)addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002940
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002941 rb_init_page(bpage);
2942
Steven Rostedt044fa782008-12-02 23:50:03 -05002943 return bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002944}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04002945EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002946
2947/**
2948 * ring_buffer_free_read_page - free an allocated read page
2949 * @buffer: the buffer the page was allocate for
2950 * @data: the page to free
2951 *
2952 * Free a page allocated from ring_buffer_alloc_read_page.
2953 */
2954void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2955{
2956 free_page((unsigned long)data);
2957}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04002958EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002959
2960/**
2961 * ring_buffer_read_page - extract a page from the ring buffer
2962 * @buffer: buffer to extract from
2963 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002964 * @len: amount to extract
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002965 * @cpu: the cpu of the buffer to extract
2966 * @full: should the extraction only happen when the page is full.
2967 *
2968 * This function will pull out a page from the ring buffer and consume it.
2969 * @data_page must be the address of the variable that was returned
2970 * from ring_buffer_alloc_read_page. This is because the page might be used
2971 * to swap with a page in the ring buffer.
2972 *
2973 * for example:
Lai Jiangshanb85fa012009-02-09 14:21:14 +08002974 * rpage = ring_buffer_alloc_read_page(buffer);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002975 * if (!rpage)
2976 * return error;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002977 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002978 * if (ret >= 0)
2979 * process_page(rpage, ret);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002980 *
2981 * When @full is set, the function will not return true unless
2982 * the writer is off the reader page.
2983 *
2984 * Note: it is up to the calling functions to handle sleeps and wakeups.
2985 * The ring buffer can be used anywhere in the kernel and can not
2986 * blindly call wake_up. The layer that uses the ring buffer must be
2987 * responsible for that.
2988 *
2989 * Returns:
Lai Jiangshan667d2412009-02-09 14:21:17 +08002990 * >=0 if data has been transferred, returns the offset of consumed data.
2991 * <0 if no data has been transferred.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002992 */
2993int ring_buffer_read_page(struct ring_buffer *buffer,
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002994 void **data_page, size_t len, int cpu, int full)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002995{
2996 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2997 struct ring_buffer_event *event;
Steven Rostedt044fa782008-12-02 23:50:03 -05002998 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002999 struct buffer_page *reader;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003000 unsigned long flags;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003001 unsigned int commit;
Lai Jiangshan667d2412009-02-09 14:21:17 +08003002 unsigned int read;
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003003 u64 save_timestamp;
Lai Jiangshan667d2412009-02-09 14:21:17 +08003004 int ret = -1;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003005
Steven Rostedt554f7862009-03-11 22:00:13 -04003006 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3007 goto out;
3008
Steven Rostedt474d32b2009-03-03 19:51:40 -05003009 /*
3010 * If len is not big enough to hold the page header, then
3011 * we can not copy anything.
3012 */
3013 if (len <= BUF_PAGE_HDR_SIZE)
Steven Rostedt554f7862009-03-11 22:00:13 -04003014 goto out;
Steven Rostedt474d32b2009-03-03 19:51:40 -05003015
3016 len -= BUF_PAGE_HDR_SIZE;
3017
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003018 if (!data_page)
Steven Rostedt554f7862009-03-11 22:00:13 -04003019 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003020
Steven Rostedt044fa782008-12-02 23:50:03 -05003021 bpage = *data_page;
3022 if (!bpage)
Steven Rostedt554f7862009-03-11 22:00:13 -04003023 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003024
3025 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3026
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003027 reader = rb_get_reader_page(cpu_buffer);
3028 if (!reader)
Steven Rostedt554f7862009-03-11 22:00:13 -04003029 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003030
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003031 event = rb_reader_event(cpu_buffer);
Lai Jiangshan667d2412009-02-09 14:21:17 +08003032
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003033 read = reader->read;
3034 commit = rb_page_commit(reader);
3035
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003036 /*
Steven Rostedt474d32b2009-03-03 19:51:40 -05003037 * If this page has been partially read or
3038 * if len is not big enough to read the rest of the page or
3039 * a writer is still on the page, then
3040 * we must copy the data from the page to the buffer.
3041 * Otherwise, we can simply swap the page with the one passed in.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003042 */
Steven Rostedt474d32b2009-03-03 19:51:40 -05003043 if (read || (len < (commit - read)) ||
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003044 cpu_buffer->reader_page == cpu_buffer->commit_page) {
Lai Jiangshan667d2412009-02-09 14:21:17 +08003045 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
Steven Rostedt474d32b2009-03-03 19:51:40 -05003046 unsigned int rpos = read;
3047 unsigned int pos = 0;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003048 unsigned int size;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003049
3050 if (full)
Steven Rostedt554f7862009-03-11 22:00:13 -04003051 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003052
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003053 if (len > (commit - read))
3054 len = (commit - read);
3055
3056 size = rb_event_length(event);
3057
3058 if (len < size)
Steven Rostedt554f7862009-03-11 22:00:13 -04003059 goto out_unlock;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003060
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003061 /* save the current timestamp, since the user will need it */
3062 save_timestamp = cpu_buffer->read_stamp;
3063
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003064 /* Need to copy one event at a time */
3065 do {
Steven Rostedt474d32b2009-03-03 19:51:40 -05003066 memcpy(bpage->data + pos, rpage->data + rpos, size);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003067
3068 len -= size;
3069
3070 rb_advance_reader(cpu_buffer);
Steven Rostedt474d32b2009-03-03 19:51:40 -05003071 rpos = reader->read;
3072 pos += size;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003073
3074 event = rb_reader_event(cpu_buffer);
3075 size = rb_event_length(event);
3076 } while (len > size);
Lai Jiangshan667d2412009-02-09 14:21:17 +08003077
3078 /* update bpage */
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003079 local_set(&bpage->commit, pos);
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003080 bpage->time_stamp = save_timestamp;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003081
Steven Rostedt474d32b2009-03-03 19:51:40 -05003082 /* we copied everything to the beginning */
3083 read = 0;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003084 } else {
Steven Rostedtafbab762009-05-01 19:40:05 -04003085 /* update the entry counter */
3086 cpu_buffer->read += local_read(&reader->entries);
3087
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003088 /* swap the pages */
Steven Rostedt044fa782008-12-02 23:50:03 -05003089 rb_init_page(bpage);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003090 bpage = reader->page;
3091 reader->page = *data_page;
3092 local_set(&reader->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04003093 local_set(&reader->entries, 0);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003094 reader->read = 0;
Steven Rostedt044fa782008-12-02 23:50:03 -05003095 *data_page = bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003096 }
Lai Jiangshan667d2412009-02-09 14:21:17 +08003097 ret = read;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003098
Steven Rostedt554f7862009-03-11 22:00:13 -04003099 out_unlock:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003100 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3101
Steven Rostedt554f7862009-03-11 22:00:13 -04003102 out:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003103 return ret;
3104}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04003105EXPORT_SYMBOL_GPL(ring_buffer_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003106
Steven Rostedta3583242008-11-11 15:01:42 -05003107static ssize_t
3108rb_simple_read(struct file *filp, char __user *ubuf,
3109 size_t cnt, loff_t *ppos)
3110{
Hannes Eder5e398412009-02-10 19:44:34 +01003111 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05003112 char buf[64];
3113 int r;
3114
Steven Rostedt033601a2008-11-21 12:41:55 -05003115 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3116 r = sprintf(buf, "permanently disabled\n");
3117 else
3118 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
Steven Rostedta3583242008-11-11 15:01:42 -05003119
3120 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3121}
3122
3123static ssize_t
3124rb_simple_write(struct file *filp, const char __user *ubuf,
3125 size_t cnt, loff_t *ppos)
3126{
Hannes Eder5e398412009-02-10 19:44:34 +01003127 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05003128 char buf[64];
Hannes Eder5e398412009-02-10 19:44:34 +01003129 unsigned long val;
Steven Rostedta3583242008-11-11 15:01:42 -05003130 int ret;
3131
3132 if (cnt >= sizeof(buf))
3133 return -EINVAL;
3134
3135 if (copy_from_user(&buf, ubuf, cnt))
3136 return -EFAULT;
3137
3138 buf[cnt] = 0;
3139
3140 ret = strict_strtoul(buf, 10, &val);
3141 if (ret < 0)
3142 return ret;
3143
Steven Rostedt033601a2008-11-21 12:41:55 -05003144 if (val)
3145 set_bit(RB_BUFFERS_ON_BIT, p);
3146 else
3147 clear_bit(RB_BUFFERS_ON_BIT, p);
Steven Rostedta3583242008-11-11 15:01:42 -05003148
3149 (*ppos)++;
3150
3151 return cnt;
3152}
3153
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003154static const struct file_operations rb_simple_fops = {
Steven Rostedta3583242008-11-11 15:01:42 -05003155 .open = tracing_open_generic,
3156 .read = rb_simple_read,
3157 .write = rb_simple_write,
3158};
3159
3160
3161static __init int rb_init_debugfs(void)
3162{
3163 struct dentry *d_tracer;
Steven Rostedta3583242008-11-11 15:01:42 -05003164
3165 d_tracer = tracing_init_dentry();
3166
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003167 trace_create_file("tracing_on", 0644, d_tracer,
3168 &ring_buffer_flags, &rb_simple_fops);
Steven Rostedta3583242008-11-11 15:01:42 -05003169
3170 return 0;
3171}
3172
3173fs_initcall(rb_init_debugfs);
Steven Rostedt554f7862009-03-11 22:00:13 -04003174
Steven Rostedt59222ef2009-03-12 11:46:03 -04003175#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +01003176static int rb_cpu_notify(struct notifier_block *self,
3177 unsigned long action, void *hcpu)
Steven Rostedt554f7862009-03-11 22:00:13 -04003178{
3179 struct ring_buffer *buffer =
3180 container_of(self, struct ring_buffer, cpu_notify);
3181 long cpu = (long)hcpu;
3182
3183 switch (action) {
3184 case CPU_UP_PREPARE:
3185 case CPU_UP_PREPARE_FROZEN:
Rusty Russell3f237a72009-06-12 21:15:30 +09303186 if (cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt554f7862009-03-11 22:00:13 -04003187 return NOTIFY_OK;
3188
3189 buffer->buffers[cpu] =
3190 rb_allocate_cpu_buffer(buffer, cpu);
3191 if (!buffer->buffers[cpu]) {
3192 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3193 cpu);
3194 return NOTIFY_OK;
3195 }
3196 smp_wmb();
Rusty Russell3f237a72009-06-12 21:15:30 +09303197 cpumask_set_cpu(cpu, buffer->cpumask);
Steven Rostedt554f7862009-03-11 22:00:13 -04003198 break;
3199 case CPU_DOWN_PREPARE:
3200 case CPU_DOWN_PREPARE_FROZEN:
3201 /*
3202 * Do nothing.
3203 * If we were to free the buffer, then the user would
3204 * lose any trace that was in the buffer.
3205 */
3206 break;
3207 default:
3208 break;
3209 }
3210 return NOTIFY_OK;
3211}
3212#endif