blob: d92f0020502ef1fd2e8e76aa0259b39e08149dfc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file cpu_buffer.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf <barry.kasindorf@amd.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them.
12 * Eventually each CPU's buffer is processed into the global
13 * event buffer by sync_buffer().
14 *
15 * We use a local buffer for two reasons: an NMI or similar
16 * interrupt cannot synchronise, and high sampling rates
17 * would lead to catastrophic global synchronisation if
18 * a global buffer was used.
19 */
20
21#include <linux/sched.h>
22#include <linux/oprofile.h>
23#include <linux/vmalloc.h>
24#include <linux/errno.h>
Robert Richter6a180372008-10-16 15:01:40 +020025
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include "event_buffer.h"
27#include "cpu_buffer.h"
28#include "buffer_sync.h"
29#include "oprof.h"
30
Robert Richter6dad8282008-12-09 01:21:32 +010031#define OP_BUFFER_FLAGS 0
32
33/*
34 * Read and write access is using spin locking. Thus, writing to the
35 * buffer by NMI handler (x86) could occur also during critical
36 * sections when reading the buffer. To avoid this, there are 2
37 * buffers for independent read and write access. Read access is in
38 * process context only, write access only in the NMI handler. If the
39 * read buffer runs empty, both buffers are swapped atomically. There
40 * is potentially a small window during swapping where the buffers are
41 * disabled and samples could be lost.
42 *
43 * Using 2 buffers is a little bit overhead, but the solution is clear
44 * and does not require changes in the ring buffer implementation. It
45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code.
47 */
Robert Richter99667182008-12-16 16:19:54 +010048static struct ring_buffer *op_ring_buffer_read;
49static struct ring_buffer *op_ring_buffer_write;
Eric Dumazet8b8b4982008-05-14 16:05:31 -070050DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
David Howellsc4028952006-11-22 14:57:56 +000052static void wq_sync_buffer(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54#define DEFAULT_TIMER_EXPIRE (HZ / 10)
55static int work_enabled;
56
Carl Lovea5598ca2008-10-14 23:37:01 +000057unsigned long oprofile_get_cpu_buffer_size(void)
58{
Robert Richterbd2172f2008-12-16 16:19:54 +010059 return oprofile_cpu_buffer_size;
Carl Lovea5598ca2008-10-14 23:37:01 +000060}
61
62void oprofile_cpu_buffer_inc_smpl_lost(void)
63{
64 struct oprofile_cpu_buffer *cpu_buf
65 = &__get_cpu_var(cpu_buffer);
66
67 cpu_buf->sample_lost_overflow++;
68}
69
Robert Richter30015772008-12-23 01:35:12 +010070void free_cpu_buffers(void)
71{
72 if (op_ring_buffer_read)
73 ring_buffer_free(op_ring_buffer_read);
74 op_ring_buffer_read = NULL;
75 if (op_ring_buffer_write)
76 ring_buffer_free(op_ring_buffer_write);
77 op_ring_buffer_write = NULL;
78}
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080int alloc_cpu_buffers(void)
81{
82 int i;
Robert Richter6a180372008-10-16 15:01:40 +020083
Robert Richterbd2172f2008-12-16 16:19:54 +010084 unsigned long buffer_size = oprofile_cpu_buffer_size;
Robert Richter6a180372008-10-16 15:01:40 +020085
Robert Richter6dad8282008-12-09 01:21:32 +010086 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
87 if (!op_ring_buffer_read)
88 goto fail;
89 op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
90 if (!op_ring_buffer_write)
91 goto fail;
92
Chris J Arges4bd9b9d2008-10-15 11:03:39 -050093 for_each_possible_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -070094 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Robert Richter6a180372008-10-16 15:01:40 +020095
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 b->last_task = NULL;
97 b->last_is_kernel = -1;
98 b->tracing = 0;
99 b->buffer_size = buffer_size;
100 b->tail_pos = 0;
101 b->head_pos = 0;
102 b->sample_received = 0;
103 b->sample_lost_overflow = 0;
Philippe Eliedf9d1772007-11-14 16:58:48 -0800104 b->backtrace_aborted = 0;
105 b->sample_invalid_eip = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 b->cpu = i;
David Howellsc4028952006-11-22 14:57:56 +0000107 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109 return 0;
110
111fail:
112 free_cpu_buffers();
113 return -ENOMEM;
114}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116void start_cpu_work(void)
117{
118 int i;
119
120 work_enabled = 1;
121
122 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700123 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125 /*
126 * Spread the work by 1 jiffy per cpu so they dont all
127 * fire at once.
128 */
129 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
130 }
131}
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133void end_cpu_work(void)
134{
135 int i;
136
137 work_enabled = 0;
138
139 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700140 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142 cancel_delayed_work(&b->work);
143 }
144
145 flush_scheduled_work();
146}
147
Robert Richter99667182008-12-16 16:19:54 +0100148int op_cpu_buffer_write_entry(struct op_entry *entry)
149{
150 entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
151 sizeof(struct op_sample),
152 &entry->irq_flags);
153 if (entry->event)
154 entry->sample = ring_buffer_event_data(entry->event);
155 else
156 entry->sample = NULL;
157
158 if (!entry->sample)
159 return -ENOMEM;
160
161 return 0;
162}
163
164int op_cpu_buffer_write_commit(struct op_entry *entry)
165{
166 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
167 entry->irq_flags);
168}
169
170struct op_sample *op_cpu_buffer_read_entry(int cpu)
171{
172 struct ring_buffer_event *e;
173 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
174 if (e)
175 return ring_buffer_event_data(e);
176 if (ring_buffer_swap_cpu(op_ring_buffer_read,
177 op_ring_buffer_write,
178 cpu))
179 return NULL;
180 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
181 if (e)
182 return ring_buffer_event_data(e);
183 return NULL;
184}
185
186unsigned long op_cpu_buffer_entries(int cpu)
187{
188 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
189 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
190}
191
Robert Richter211117f2008-12-09 02:13:25 +0100192static inline int
Robert Richter25ad2912008-09-05 17:12:36 +0200193add_sample(struct oprofile_cpu_buffer *cpu_buf,
Robert Richter6a180372008-10-16 15:01:40 +0200194 unsigned long pc, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Robert Richter6dad8282008-12-09 01:21:32 +0100196 struct op_entry entry;
Robert Richter211117f2008-12-09 02:13:25 +0100197 int ret;
Robert Richter6dad8282008-12-09 01:21:32 +0100198
Robert Richter6d2c53f2008-12-24 16:53:53 +0100199 ret = op_cpu_buffer_write_entry(&entry);
Robert Richter211117f2008-12-09 02:13:25 +0100200 if (ret)
201 return ret;
Robert Richter6dad8282008-12-09 01:21:32 +0100202
203 entry.sample->eip = pc;
204 entry.sample->event = event;
205
Robert Richter3967e932008-12-30 05:10:58 +0100206 return op_cpu_buffer_write_commit(&entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
Robert Richter211117f2008-12-09 02:13:25 +0100209static inline int
Robert Richter25ad2912008-09-05 17:12:36 +0200210add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Robert Richter211117f2008-12-09 02:13:25 +0100212 return add_sample(buffer, ESCAPE_CODE, value);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215/* This must be safe from any context. It's safe writing here
216 * because of the head/tail separation of the writer and reader
217 * of the CPU buffer.
218 *
219 * is_kernel is needed because on some architectures you cannot
220 * tell if you are in kernel or user space simply by looking at
221 * pc. We tag this in the buffer by generating kernel enter/exit
222 * events whenever is_kernel changes
223 */
Robert Richter25ad2912008-09-05 17:12:36 +0200224static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 int is_kernel, unsigned long event)
226{
Robert Richter25ad2912008-09-05 17:12:36 +0200227 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
229 cpu_buf->sample_received++;
230
Philippe Eliedf9d1772007-11-14 16:58:48 -0800231 if (pc == ESCAPE_CODE) {
232 cpu_buf->sample_invalid_eip++;
233 return 0;
234 }
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 is_kernel = !!is_kernel;
237
238 task = current;
239
240 /* notice a switch from user->kernel or vice versa */
241 if (cpu_buf->last_is_kernel != is_kernel) {
242 cpu_buf->last_is_kernel = is_kernel;
Robert Richter211117f2008-12-09 02:13:25 +0100243 if (add_code(cpu_buf, is_kernel))
244 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 }
246
247 /* notice a task switch */
248 if (cpu_buf->last_task != task) {
249 cpu_buf->last_task = task;
Robert Richter211117f2008-12-09 02:13:25 +0100250 if (add_code(cpu_buf, (unsigned long)task))
251 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 }
Robert Richter6a180372008-10-16 15:01:40 +0200253
Robert Richter211117f2008-12-09 02:13:25 +0100254 if (add_sample(cpu_buf, pc, event))
255 goto fail;
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 return 1;
Robert Richter211117f2008-12-09 02:13:25 +0100258
259fail:
260 cpu_buf->sample_lost_overflow++;
261 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262}
263
Robert Richter6352d922008-12-18 22:09:13 +0100264static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 add_code(cpu_buf, CPU_TRACE_BEGIN);
267 cpu_buf->tracing = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
269
Robert Richter6352d922008-12-18 22:09:13 +0100270static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
272 cpu_buf->tracing = 0;
273}
274
Robert Richterd45d23b2008-12-16 12:00:10 +0100275static inline void
276__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
277 unsigned long event, int is_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278{
Mike Travis608dfdd2008-04-28 02:14:15 -0700279 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Robert Richterbd2172f2008-12-16 16:19:54 +0100281 if (!oprofile_backtrace_depth) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 log_sample(cpu_buf, pc, is_kernel, event);
283 return;
284 }
285
Robert Richter6352d922008-12-18 22:09:13 +0100286 oprofile_begin_trace(cpu_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
Robert Richterfd13f6c2008-10-19 21:00:09 +0200288 /*
289 * if log_sample() fail we can't backtrace since we lost the
290 * source of this event
291 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 if (log_sample(cpu_buf, pc, is_kernel, event))
Robert Richterbd2172f2008-12-16 16:19:54 +0100293 oprofile_ops.backtrace(regs, oprofile_backtrace_depth);
Robert Richter6352d922008-12-18 22:09:13 +0100294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 oprofile_end_trace(cpu_buf);
296}
297
Robert Richterd45d23b2008-12-16 12:00:10 +0100298void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
299 unsigned long event, int is_kernel)
300{
301 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
302}
303
Brian Rogan27357712006-03-28 01:56:20 -0800304void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
305{
306 int is_kernel = !user_mode(regs);
307 unsigned long pc = profile_pc(regs);
308
Robert Richterd45d23b2008-12-16 12:00:10 +0100309 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
Brian Rogan27357712006-03-28 01:56:20 -0800310}
311
Robert Richter852402c2008-07-22 21:09:06 +0200312#ifdef CONFIG_OPROFILE_IBS
313
Robert Richtere2fee272008-07-18 17:36:20 +0200314#define MAX_IBS_SAMPLE_SIZE 14
315
Robert Richtercdc18342008-09-26 22:18:44 -0400316void oprofile_add_ibs_sample(struct pt_regs * const regs,
317 unsigned int * const ibs_sample, int ibs_code)
Barry Kasindorf345c2572008-07-22 21:08:54 +0200318{
Robert Richtere2fee272008-07-18 17:36:20 +0200319 int is_kernel = !user_mode(regs);
320 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200321 struct task_struct *task;
Robert Richter211117f2008-12-09 02:13:25 +0100322 int fail = 0;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200323
324 cpu_buf->sample_received++;
325
Barry Kasindorf345c2572008-07-22 21:08:54 +0200326 /* notice a switch from user->kernel or vice versa */
327 if (cpu_buf->last_is_kernel != is_kernel) {
Robert Richter211117f2008-12-09 02:13:25 +0100328 if (add_code(cpu_buf, is_kernel))
329 goto fail;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200330 cpu_buf->last_is_kernel = is_kernel;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200331 }
332
333 /* notice a task switch */
334 if (!is_kernel) {
335 task = current;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200336 if (cpu_buf->last_task != task) {
Robert Richter211117f2008-12-09 02:13:25 +0100337 if (add_code(cpu_buf, (unsigned long)task))
338 goto fail;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200339 cpu_buf->last_task = task;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200340 }
341 }
342
Robert Richter211117f2008-12-09 02:13:25 +0100343 fail = fail || add_code(cpu_buf, ibs_code);
344 fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
345 fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
346 fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200347
348 if (ibs_code == IBS_OP_BEGIN) {
Robert Richter211117f2008-12-09 02:13:25 +0100349 fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
350 fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
351 fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200352 }
353
Robert Richter211117f2008-12-09 02:13:25 +0100354 if (fail)
355 goto fail;
356
Robert Richterbd2172f2008-12-16 16:19:54 +0100357 if (oprofile_backtrace_depth)
358 oprofile_ops.backtrace(regs, oprofile_backtrace_depth);
Robert Richter211117f2008-12-09 02:13:25 +0100359
360 return;
361
362fail:
363 cpu_buf->sample_lost_overflow++;
364 return;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200365}
366
Robert Richter852402c2008-07-22 21:09:06 +0200367#endif
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
370{
Mike Travis608dfdd2008-04-28 02:14:15 -0700371 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 log_sample(cpu_buf, pc, is_kernel, event);
373}
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375void oprofile_add_trace(unsigned long pc)
376{
Mike Travis608dfdd2008-04-28 02:14:15 -0700377 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
379 if (!cpu_buf->tracing)
380 return;
381
Robert Richterfd13f6c2008-10-19 21:00:09 +0200382 /*
383 * broken frame can give an eip with the same value as an
384 * escape code, abort the trace if we get it
385 */
Robert Richter211117f2008-12-09 02:13:25 +0100386 if (pc == ESCAPE_CODE)
387 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Robert Richter211117f2008-12-09 02:13:25 +0100389 if (add_sample(cpu_buf, pc, 0))
390 goto fail;
391
392 return;
393fail:
394 cpu_buf->tracing = 0;
395 cpu_buf->backtrace_aborted++;
396 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399/*
400 * This serves to avoid cpu buffer overflow, and makes sure
401 * the task mortuary progresses
402 *
403 * By using schedule_delayed_work_on and then schedule_delayed_work
404 * we guarantee this will stay on the correct cpu
405 */
David Howellsc4028952006-11-22 14:57:56 +0000406static void wq_sync_buffer(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407{
Robert Richter25ad2912008-09-05 17:12:36 +0200408 struct oprofile_cpu_buffer *b =
David Howellsc4028952006-11-22 14:57:56 +0000409 container_of(work, struct oprofile_cpu_buffer, work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 if (b->cpu != smp_processor_id()) {
Robert Richterbd17b622008-07-22 21:09:07 +0200411 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 smp_processor_id(), b->cpu);
Chris J Arges4bd9b9d2008-10-15 11:03:39 -0500413
414 if (!cpu_online(b->cpu)) {
415 cancel_delayed_work(&b->work);
416 return;
417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 }
419 sync_buffer(b->cpu);
420
421 /* don't re-add the work if we're shutting down */
422 if (work_enabled)
423 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
424}