blob: 95966561ba3dec92df7505496c6acb8245d6bde4 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
17#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020018#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/hardirq.h>
20#include <linux/linkage.h>
21#include <linux/uaccess.h>
22#include <linux/ftrace.h>
23#include <linux/module.h>
24#include <linux/percpu.h>
25#include <linux/ctype.h>
26#include <linux/init.h>
27#include <linux/gfp.h>
28#include <linux/fs.h>
29
30#include "trace.h"
31
32unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33unsigned long __read_mostly tracing_thresh;
34
35static long notrace
36ns2usecs(cycle_t nsec)
37{
38 nsec += 500;
39 do_div(nsec, 1000);
40 return nsec;
41}
42
43static atomic_t tracer_counter;
44static struct trace_array global_trace;
45
46static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
47
48static struct trace_array max_tr;
49
50static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
51
52static int tracer_enabled;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020053static unsigned long trace_nr_entries = 16384UL;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
55static struct tracer *trace_types __read_mostly;
56static struct tracer *current_trace __read_mostly;
57static int max_tracer_type_len;
58
59static DEFINE_MUTEX(trace_types_lock);
60
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020061#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
62
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020063static int __init set_nr_entries(char *str)
64{
65 if (!str)
66 return 0;
67 trace_nr_entries = simple_strtoul(str, &str, 0);
68 return 1;
69}
70__setup("trace_entries=", set_nr_entries);
71
72enum trace_type {
73 __TRACE_FIRST_TYPE = 0,
74
75 TRACE_FN,
76 TRACE_CTX,
77
78 __TRACE_LAST_TYPE
79};
80
81enum trace_flag_type {
82 TRACE_FLAG_IRQS_OFF = 0x01,
83 TRACE_FLAG_NEED_RESCHED = 0x02,
84 TRACE_FLAG_HARDIRQ = 0x04,
85 TRACE_FLAG_SOFTIRQ = 0x08,
86};
87
88enum trace_iterator_flags {
89 TRACE_ITER_PRINT_PARENT = 0x01,
90 TRACE_ITER_SYM_OFFSET = 0x02,
91 TRACE_ITER_SYM_ADDR = 0x04,
92 TRACE_ITER_VERBOSE = 0x08,
93};
94
95#define TRACE_ITER_SYM_MASK \
96 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
97
98/* These must match the bit postions above */
99static const char *trace_options[] = {
100 "print-parent",
101 "sym-offset",
102 "sym-addr",
103 "verbose",
104 NULL
105};
106
107static unsigned trace_flags;
108
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200109static DEFINE_SPINLOCK(ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200110
111/*
112 * Copy the new maximum trace into the separate maximum-trace
113 * structure. (this way the maximum trace is permanently saved,
114 * for later retrieval via /debugfs/tracing/latency_trace)
115 */
116static void notrace
117__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
118{
119 struct trace_array_cpu *data = tr->data[cpu];
120
121 max_tr.cpu = cpu;
122 max_tr.time_start = data->preempt_timestamp;
123
124 data = max_tr.data[cpu];
125 data->saved_latency = tracing_max_latency;
126
127 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
128 data->pid = tsk->pid;
129 data->uid = tsk->uid;
130 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
131 data->policy = tsk->policy;
132 data->rt_priority = tsk->rt_priority;
133
134 /* record this tasks comm */
135 tracing_record_cmdline(current);
136}
137
138notrace void
139update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
140{
141 struct trace_array_cpu *data;
142 void *save_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200143 struct list_head save_pages;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200144 int i;
145
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200146 WARN_ON_ONCE(!irqs_disabled());
147 spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200148 /* clear out all the previous traces */
149 for_each_possible_cpu(i) {
150 data = tr->data[i];
151 save_trace = max_tr.data[i]->trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200152 save_pages = max_tr.data[i]->trace_pages;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200153 memcpy(max_tr.data[i], data, sizeof(*data));
154 data->trace = save_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200155 data->trace_pages = save_pages;
Steven Rostedt89b2f972008-05-12 21:20:44 +0200156 tracing_reset(data);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200157 }
158
159 __update_max_tr(tr, tsk, cpu);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200160 spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200161}
162
163/**
164 * update_max_tr_single - only copy one trace over, and reset the rest
165 * @tr - tracer
166 * @tsk - task with the latency
167 * @cpu - the cpu of the buffer to copy.
168 */
169notrace void
170update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
171{
172 struct trace_array_cpu *data = tr->data[cpu];
173 void *save_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200174 struct list_head save_pages;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200175 int i;
176
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200177 WARN_ON_ONCE(!irqs_disabled());
178 spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200179 for_each_possible_cpu(i)
180 tracing_reset(max_tr.data[i]);
181
182 save_trace = max_tr.data[cpu]->trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200183 save_pages = max_tr.data[cpu]->trace_pages;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200184 memcpy(max_tr.data[cpu], data, sizeof(*data));
185 data->trace = save_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200186 data->trace_pages = save_pages;
Steven Rostedt89b2f972008-05-12 21:20:44 +0200187 tracing_reset(data);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200188
189 __update_max_tr(tr, tsk, cpu);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200190 spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200191}
192
193int register_tracer(struct tracer *type)
194{
195 struct tracer *t;
196 int len;
197 int ret = 0;
198
199 if (!type->name) {
200 pr_info("Tracer must have a name\n");
201 return -1;
202 }
203
204 mutex_lock(&trace_types_lock);
205 for (t = trace_types; t; t = t->next) {
206 if (strcmp(type->name, t->name) == 0) {
207 /* already found */
208 pr_info("Trace %s already registered\n",
209 type->name);
210 ret = -1;
211 goto out;
212 }
213 }
214
215 type->next = trace_types;
216 trace_types = type;
217 len = strlen(type->name);
218 if (len > max_tracer_type_len)
219 max_tracer_type_len = len;
220 out:
221 mutex_unlock(&trace_types_lock);
222
223 return ret;
224}
225
226void unregister_tracer(struct tracer *type)
227{
228 struct tracer **t;
229 int len;
230
231 mutex_lock(&trace_types_lock);
232 for (t = &trace_types; *t; t = &(*t)->next) {
233 if (*t == type)
234 goto found;
235 }
236 pr_info("Trace %s not registered\n", type->name);
237 goto out;
238
239 found:
240 *t = (*t)->next;
241 if (strlen(type->name) != max_tracer_type_len)
242 goto out;
243
244 max_tracer_type_len = 0;
245 for (t = &trace_types; *t; t = &(*t)->next) {
246 len = strlen((*t)->name);
247 if (len > max_tracer_type_len)
248 max_tracer_type_len = len;
249 }
250 out:
251 mutex_unlock(&trace_types_lock);
252}
253
254void notrace tracing_reset(struct trace_array_cpu *data)
255{
256 data->trace_idx = 0;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200257 data->trace_current = data->trace;
258 data->trace_current_idx = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200259}
260
261#ifdef CONFIG_FTRACE
262static void notrace
263function_trace_call(unsigned long ip, unsigned long parent_ip)
264{
265 struct trace_array *tr = &global_trace;
266 struct trace_array_cpu *data;
267 unsigned long flags;
268 long disabled;
269 int cpu;
270
271 if (unlikely(!tracer_enabled))
272 return;
273
Steven Rostedt18cef372008-05-12 21:20:44 +0200274 local_irq_save(flags);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200275 cpu = raw_smp_processor_id();
276 data = tr->data[cpu];
277 disabled = atomic_inc_return(&data->disabled);
278
279 if (likely(disabled == 1))
280 ftrace(tr, data, ip, parent_ip, flags);
281
282 atomic_dec(&data->disabled);
Steven Rostedt18cef372008-05-12 21:20:44 +0200283 local_irq_restore(flags);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200284}
285
286static struct ftrace_ops trace_ops __read_mostly =
287{
288 .func = function_trace_call,
289};
290#endif
291
292notrace void tracing_start_function_trace(void)
293{
294 register_ftrace_function(&trace_ops);
295}
296
297notrace void tracing_stop_function_trace(void)
298{
299 unregister_ftrace_function(&trace_ops);
300}
301
302#define SAVED_CMDLINES 128
303static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
304static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
305static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
306static int cmdline_idx;
307static DEFINE_SPINLOCK(trace_cmdline_lock);
308atomic_t trace_record_cmdline_disabled;
309
310static void trace_init_cmdlines(void)
311{
312 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
313 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
314 cmdline_idx = 0;
315}
316
317notrace void trace_stop_cmdline_recording(void);
318
319static void notrace trace_save_cmdline(struct task_struct *tsk)
320{
321 unsigned map;
322 unsigned idx;
323
324 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
325 return;
326
327 /*
328 * It's not the end of the world if we don't get
329 * the lock, but we also don't want to spin
330 * nor do we want to disable interrupts,
331 * so if we miss here, then better luck next time.
332 */
333 if (!spin_trylock(&trace_cmdline_lock))
334 return;
335
336 idx = map_pid_to_cmdline[tsk->pid];
337 if (idx >= SAVED_CMDLINES) {
338 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
339
340 map = map_cmdline_to_pid[idx];
341 if (map <= PID_MAX_DEFAULT)
342 map_pid_to_cmdline[map] = (unsigned)-1;
343
344 map_pid_to_cmdline[tsk->pid] = idx;
345
346 cmdline_idx = idx;
347 }
348
349 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
350
351 spin_unlock(&trace_cmdline_lock);
352}
353
354static notrace char *trace_find_cmdline(int pid)
355{
356 char *cmdline = "<...>";
357 unsigned map;
358
359 if (!pid)
360 return "<idle>";
361
362 if (pid > PID_MAX_DEFAULT)
363 goto out;
364
365 map = map_pid_to_cmdline[pid];
366 if (map >= SAVED_CMDLINES)
367 goto out;
368
369 cmdline = saved_cmdlines[map];
370
371 out:
372 return cmdline;
373}
374
375notrace void tracing_record_cmdline(struct task_struct *tsk)
376{
377 if (atomic_read(&trace_record_cmdline_disabled))
378 return;
379
380 trace_save_cmdline(tsk);
381}
382
383static inline notrace struct trace_entry *
384tracing_get_trace_entry(struct trace_array *tr,
385 struct trace_array_cpu *data)
386{
387 unsigned long idx, idx_next;
388 struct trace_entry *entry;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200389 struct page *page;
390 struct list_head *next;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200391
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200392 data->trace_idx++;
393 idx = data->trace_current_idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200394 idx_next = idx + 1;
395
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200396 entry = data->trace_current + idx * TRACE_ENTRY_SIZE;
397
398 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
399 page = virt_to_page(data->trace_current);
400 if (unlikely(&page->lru == data->trace_pages.prev))
401 next = data->trace_pages.next;
402 else
403 next = page->lru.next;
404 page = list_entry(next, struct page, lru);
405 data->trace_current = page_address(page);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200406 idx_next = 0;
407 }
408
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200409 data->trace_current_idx = idx_next;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200410
411 return entry;
412}
413
414static inline notrace void
415tracing_generic_entry_update(struct trace_entry *entry,
416 unsigned long flags)
417{
418 struct task_struct *tsk = current;
419 unsigned long pc;
420
421 pc = preempt_count();
422
423 entry->idx = atomic_inc_return(&tracer_counter);
424 entry->preempt_count = pc & 0xff;
425 entry->pid = tsk->pid;
426 entry->t = now(raw_smp_processor_id());
427 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
428 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
429 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
430 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
431}
432
433notrace void
434ftrace(struct trace_array *tr, struct trace_array_cpu *data,
435 unsigned long ip, unsigned long parent_ip,
436 unsigned long flags)
437{
438 struct trace_entry *entry;
439
440 entry = tracing_get_trace_entry(tr, data);
441 tracing_generic_entry_update(entry, flags);
442 entry->type = TRACE_FN;
443 entry->fn.ip = ip;
444 entry->fn.parent_ip = parent_ip;
445}
446
447notrace void
448tracing_sched_switch_trace(struct trace_array *tr,
449 struct trace_array_cpu *data,
450 struct task_struct *prev, struct task_struct *next,
451 unsigned long flags)
452{
453 struct trace_entry *entry;
454
455 entry = tracing_get_trace_entry(tr, data);
456 tracing_generic_entry_update(entry, flags);
457 entry->type = TRACE_CTX;
458 entry->ctx.prev_pid = prev->pid;
459 entry->ctx.prev_prio = prev->prio;
460 entry->ctx.prev_state = prev->state;
461 entry->ctx.next_pid = next->pid;
462 entry->ctx.next_prio = next->prio;
463}
464
465enum trace_file_type {
466 TRACE_FILE_LAT_FMT = 1,
467};
468
469static struct trace_entry *
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200470trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
471 struct trace_iterator *iter, int cpu)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200472{
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200473 struct page *page;
474 struct trace_entry *array;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200475
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200476 if (iter->next_idx[cpu] >= tr->entries ||
477 iter->next_idx[cpu] >= data->trace_idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200478 return NULL;
479
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200480 if (!iter->next_page[cpu]) {
481 /*
482 * Initialize. If the count of elements in
483 * this buffer is greater than the max entries
484 * we had an underrun. Which means we looped around.
485 * We can simply use the current pointer as our
486 * starting point.
487 */
488 if (data->trace_idx >= tr->entries) {
489 page = virt_to_page(data->trace_current);
490 iter->next_page[cpu] = &page->lru;
491 iter->next_page_idx[cpu] = data->trace_current_idx;
492 } else {
493 iter->next_page[cpu] = data->trace_pages.next;
494 iter->next_page_idx[cpu] = 0;
495 }
496 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200497
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200498 page = list_entry(iter->next_page[cpu], struct page, lru);
499 array = page_address(page);
500
501 return &array[iter->next_page_idx[cpu]];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200502}
503
504static struct notrace trace_entry *
505find_next_entry(struct trace_iterator *iter, int *ent_cpu)
506{
507 struct trace_array *tr = iter->tr;
508 struct trace_entry *ent, *next = NULL;
509 int next_cpu = -1;
510 int cpu;
511
512 for_each_possible_cpu(cpu) {
513 if (!tr->data[cpu]->trace)
514 continue;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200515 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200516 if (ent &&
517 (!next || (long)(next->idx - ent->idx) > 0)) {
518 next = ent;
519 next_cpu = cpu;
520 }
521 }
522
523 if (ent_cpu)
524 *ent_cpu = next_cpu;
525
526 return next;
527}
528
529static void *find_next_entry_inc(struct trace_iterator *iter)
530{
531 struct trace_entry *next;
532 int next_cpu = -1;
533
534 next = find_next_entry(iter, &next_cpu);
535
536 if (next) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200537 iter->idx++;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200538 iter->next_idx[next_cpu]++;
539 iter->next_page_idx[next_cpu]++;
540 if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) {
541 struct trace_array_cpu *data = iter->tr->data[next_cpu];
542
543 iter->next_page_idx[next_cpu] = 0;
544 iter->next_page[next_cpu] =
545 iter->next_page[next_cpu]->next;
546 if (iter->next_page[next_cpu] == &data->trace_pages)
547 iter->next_page[next_cpu] =
548 data->trace_pages.next;
549 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200550 }
551 iter->ent = next;
552 iter->cpu = next_cpu;
553
554 return next ? iter : NULL;
555}
556
557static void notrace *
558s_next(struct seq_file *m, void *v, loff_t *pos)
559{
560 struct trace_iterator *iter = m->private;
561 void *ent;
562 void *last_ent = iter->ent;
563 int i = (int)*pos;
564
565 (*pos)++;
566
567 /* can't go backwards */
568 if (iter->idx > i)
569 return NULL;
570
571 if (iter->idx < 0)
572 ent = find_next_entry_inc(iter);
573 else
574 ent = iter;
575
576 while (ent && iter->idx < i)
577 ent = find_next_entry_inc(iter);
578
579 iter->pos = *pos;
580
581 if (last_ent && !ent)
582 seq_puts(m, "\n\nvim:ft=help\n");
583
584 return ent;
585}
586
587static void *s_start(struct seq_file *m, loff_t *pos)
588{
589 struct trace_iterator *iter = m->private;
590 void *p = NULL;
591 loff_t l = 0;
592 int i;
593
594 mutex_lock(&trace_types_lock);
595
596 if (!current_trace || current_trace != iter->trace)
597 return NULL;
598
599 atomic_inc(&trace_record_cmdline_disabled);
600
601 /* let the tracer grab locks here if needed */
602 if (current_trace->start)
603 current_trace->start(iter);
604
605 if (*pos != iter->pos) {
606 iter->ent = NULL;
607 iter->cpu = 0;
608 iter->idx = -1;
609
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200610 for_each_possible_cpu(i) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200611 iter->next_idx[i] = 0;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200612 iter->next_page[i] = NULL;
613 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200614
615 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
616 ;
617
618 } else {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200619 l = *pos - 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620 p = s_next(m, p, &l);
621 }
622
623 return p;
624}
625
626static void s_stop(struct seq_file *m, void *p)
627{
628 struct trace_iterator *iter = m->private;
629
630 atomic_dec(&trace_record_cmdline_disabled);
631
632 /* let the tracer release locks here if needed */
633 if (current_trace && current_trace == iter->trace && iter->trace->stop)
634 iter->trace->stop(iter);
635
636 mutex_unlock(&trace_types_lock);
637}
638
639static void
640seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
641{
642#ifdef CONFIG_KALLSYMS
643 char str[KSYM_SYMBOL_LEN];
644
645 kallsyms_lookup(address, NULL, NULL, NULL, str);
646
647 seq_printf(m, fmt, str);
648#endif
649}
650
651static void
652seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
653{
654#ifdef CONFIG_KALLSYMS
655 char str[KSYM_SYMBOL_LEN];
656
657 sprint_symbol(str, address);
658 seq_printf(m, fmt, str);
659#endif
660}
661
662#ifndef CONFIG_64BIT
663# define IP_FMT "%08lx"
664#else
665# define IP_FMT "%016lx"
666#endif
667
668static void notrace
669seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
670{
671 if (!ip) {
672 seq_printf(m, "0");
673 return;
674 }
675
676 if (sym_flags & TRACE_ITER_SYM_OFFSET)
677 seq_print_sym_offset(m, "%s", ip);
678 else
679 seq_print_sym_short(m, "%s", ip);
680
681 if (sym_flags & TRACE_ITER_SYM_ADDR)
682 seq_printf(m, " <" IP_FMT ">", ip);
683}
684
685static void notrace print_lat_help_header(struct seq_file *m)
686{
687 seq_puts(m, "# _------=> CPU# \n");
688 seq_puts(m, "# / _-----=> irqs-off \n");
689 seq_puts(m, "# | / _----=> need-resched \n");
690 seq_puts(m, "# || / _---=> hardirq/softirq \n");
691 seq_puts(m, "# ||| / _--=> preempt-depth \n");
692 seq_puts(m, "# |||| / \n");
693 seq_puts(m, "# ||||| delay \n");
694 seq_puts(m, "# cmd pid ||||| time | caller \n");
695 seq_puts(m, "# \\ / ||||| \\ | / \n");
696}
697
698static void notrace print_func_help_header(struct seq_file *m)
699{
700 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
701 seq_puts(m, "# | | | | |\n");
702}
703
704
705static void notrace
706print_trace_header(struct seq_file *m, struct trace_iterator *iter)
707{
708 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
709 struct trace_array *tr = iter->tr;
710 struct trace_array_cpu *data = tr->data[tr->cpu];
711 struct tracer *type = current_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200712 unsigned long total = 0;
713 unsigned long entries = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200714 int cpu;
715 const char *name = "preemption";
716
717 if (type)
718 name = type->name;
719
720 for_each_possible_cpu(cpu) {
721 if (tr->data[cpu]->trace) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200722 total += tr->data[cpu]->trace_idx;
723 if (tr->data[cpu]->trace_idx > tr->entries)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200724 entries += tr->entries;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200725 else
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200726 entries += tr->data[cpu]->trace_idx;
727 }
728 }
729
730 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
731 name, UTS_RELEASE);
732 seq_puts(m, "-----------------------------------"
733 "---------------------------------\n");
734 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
735 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
736 data->saved_latency,
737 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200738 total,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739 tr->cpu,
740#if defined(CONFIG_PREEMPT_NONE)
741 "server",
742#elif defined(CONFIG_PREEMPT_VOLUNTARY)
743 "desktop",
744#elif defined(CONFIG_PREEMPT_DESKTOP)
745 "preempt",
746#else
747 "unknown",
748#endif
749 /* These are reserved for later use */
750 0, 0, 0, 0);
751#ifdef CONFIG_SMP
752 seq_printf(m, " #P:%d)\n", num_online_cpus());
753#else
754 seq_puts(m, ")\n");
755#endif
756 seq_puts(m, " -----------------\n");
757 seq_printf(m, " | task: %.16s-%d "
758 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
759 data->comm, data->pid, data->uid, data->nice,
760 data->policy, data->rt_priority);
761 seq_puts(m, " -----------------\n");
762
763 if (data->critical_start) {
764 seq_puts(m, " => started at: ");
765 seq_print_ip_sym(m, data->critical_start, sym_flags);
766 seq_puts(m, "\n => ended at: ");
767 seq_print_ip_sym(m, data->critical_end, sym_flags);
768 seq_puts(m, "\n");
769 }
770
771 seq_puts(m, "\n");
772}
773
774unsigned long nsecs_to_usecs(unsigned long nsecs)
775{
776 return nsecs / 1000;
777}
778
779static void notrace
780lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
781{
782 int hardirq, softirq;
783 char *comm;
784
785 comm = trace_find_cmdline(entry->pid);
786
787 seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
788 seq_printf(m, "%d", cpu);
789 seq_printf(m, "%c%c",
790 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
791 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
792
793 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
794 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
795 if (hardirq && softirq)
796 seq_putc(m, 'H');
797 else {
798 if (hardirq)
799 seq_putc(m, 'h');
800 else {
801 if (softirq)
802 seq_putc(m, 's');
803 else
804 seq_putc(m, '.');
805 }
806 }
807
808 if (entry->preempt_count)
809 seq_printf(m, "%x", entry->preempt_count);
810 else
811 seq_puts(m, ".");
812}
813
814unsigned long preempt_mark_thresh = 100;
815
816static void notrace
817lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
818 unsigned long rel_usecs)
819{
820 seq_printf(m, " %4lldus", abs_usecs);
821 if (rel_usecs > preempt_mark_thresh)
822 seq_puts(m, "!: ");
823 else if (rel_usecs > 1)
824 seq_puts(m, "+: ");
825 else
826 seq_puts(m, " : ");
827}
828
829static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
830
831static void notrace
832print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
833 unsigned int trace_idx, int cpu)
834{
835 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
836 struct trace_entry *next_entry = find_next_entry(iter, NULL);
837 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
838 struct trace_entry *entry = iter->ent;
839 unsigned long abs_usecs;
840 unsigned long rel_usecs;
841 char *comm;
842 int S;
843
844 if (!next_entry)
845 next_entry = entry;
846 rel_usecs = ns2usecs(next_entry->t - entry->t);
847 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
848
849 if (verbose) {
850 comm = trace_find_cmdline(entry->pid);
851 seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
852 " %ld.%03ldms (+%ld.%03ldms): ",
853 comm,
854 entry->pid, cpu, entry->flags,
855 entry->preempt_count, trace_idx,
856 ns2usecs(entry->t),
857 abs_usecs/1000,
858 abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
859 } else {
860 lat_print_generic(m, entry, cpu);
861 lat_print_timestamp(m, abs_usecs, rel_usecs);
862 }
863 switch (entry->type) {
864 case TRACE_FN:
865 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
866 seq_puts(m, " (");
867 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
868 seq_puts(m, ")\n");
869 break;
870 case TRACE_CTX:
871 S = entry->ctx.prev_state < sizeof(state_to_char) ?
872 state_to_char[entry->ctx.prev_state] : 'X';
873 comm = trace_find_cmdline(entry->ctx.next_pid);
874 seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
875 entry->ctx.prev_pid,
876 entry->ctx.prev_prio,
877 S,
878 entry->ctx.next_pid,
879 entry->ctx.next_prio,
880 comm);
881 break;
Steven Rostedt89b2f972008-05-12 21:20:44 +0200882 default:
883 seq_printf(m, "Unknown type %d\n", entry->type);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200884 }
885}
886
887static void notrace
888print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
889{
890 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
891 struct trace_entry *entry = iter->ent;
892 unsigned long usec_rem;
893 unsigned long long t;
894 unsigned long secs;
895 char *comm;
896 int S;
897
898 comm = trace_find_cmdline(iter->ent->pid);
899
900 t = ns2usecs(entry->t);
901 usec_rem = do_div(t, 1000000ULL);
902 secs = (unsigned long)t;
903
904 seq_printf(m, "%16s-%-5d ", comm, entry->pid);
905 seq_printf(m, "[%02d] ", iter->cpu);
906 seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
907
908 switch (entry->type) {
909 case TRACE_FN:
910 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
911 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
912 entry->fn.parent_ip) {
913 seq_printf(m, " <-");
914 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
915 }
916 break;
917 case TRACE_CTX:
918 S = entry->ctx.prev_state < sizeof(state_to_char) ?
919 state_to_char[entry->ctx.prev_state] : 'X';
920 seq_printf(m, " %d:%d:%c ==> %d:%d\n",
921 entry->ctx.prev_pid,
922 entry->ctx.prev_prio,
923 S,
924 entry->ctx.next_pid,
925 entry->ctx.next_prio);
926 break;
927 }
928 seq_printf(m, "\n");
929}
930
931static int trace_empty(struct trace_iterator *iter)
932{
933 struct trace_array_cpu *data;
934 int cpu;
935
936 for_each_possible_cpu(cpu) {
937 data = iter->tr->data[cpu];
938
939 if (data->trace &&
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200940 data->trace_idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200941 return 0;
942 }
943 return 1;
944}
945
946static int s_show(struct seq_file *m, void *v)
947{
948 struct trace_iterator *iter = v;
949
950 if (iter->ent == NULL) {
951 if (iter->tr) {
952 seq_printf(m, "# tracer: %s\n", iter->trace->name);
953 seq_puts(m, "#\n");
954 }
955 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
956 /* print nothing if the buffers are empty */
957 if (trace_empty(iter))
958 return 0;
959 print_trace_header(m, iter);
960 if (!(trace_flags & TRACE_ITER_VERBOSE))
961 print_lat_help_header(m);
962 } else {
963 if (!(trace_flags & TRACE_ITER_VERBOSE))
964 print_func_help_header(m);
965 }
966 } else {
967 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
968 print_lat_fmt(m, iter, iter->idx, iter->cpu);
969 else
970 print_trace_fmt(m, iter);
971 }
972
973 return 0;
974}
975
976static struct seq_operations tracer_seq_ops = {
977 .start = s_start,
978 .next = s_next,
979 .stop = s_stop,
980 .show = s_show,
981};
982
983static struct trace_iterator notrace *
984__tracing_open(struct inode *inode, struct file *file, int *ret)
985{
986 struct trace_iterator *iter;
987
988 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
989 if (!iter) {
990 *ret = -ENOMEM;
991 goto out;
992 }
993
994 mutex_lock(&trace_types_lock);
995 if (current_trace && current_trace->print_max)
996 iter->tr = &max_tr;
997 else
998 iter->tr = inode->i_private;
999 iter->trace = current_trace;
1000 iter->pos = -1;
1001
1002 /* TODO stop tracer */
1003 *ret = seq_open(file, &tracer_seq_ops);
1004 if (!*ret) {
1005 struct seq_file *m = file->private_data;
1006 m->private = iter;
1007
1008 /* stop the trace while dumping */
1009 if (iter->tr->ctrl)
1010 tracer_enabled = 0;
1011
1012 if (iter->trace && iter->trace->open)
1013 iter->trace->open(iter);
1014 } else {
1015 kfree(iter);
1016 iter = NULL;
1017 }
1018 mutex_unlock(&trace_types_lock);
1019
1020 out:
1021 return iter;
1022}
1023
1024int tracing_open_generic(struct inode *inode, struct file *filp)
1025{
1026 filp->private_data = inode->i_private;
1027 return 0;
1028}
1029
1030int tracing_release(struct inode *inode, struct file *file)
1031{
1032 struct seq_file *m = (struct seq_file *)file->private_data;
1033 struct trace_iterator *iter = m->private;
1034
1035 mutex_lock(&trace_types_lock);
1036 if (iter->trace && iter->trace->close)
1037 iter->trace->close(iter);
1038
1039 /* reenable tracing if it was previously enabled */
1040 if (iter->tr->ctrl)
1041 tracer_enabled = 1;
1042 mutex_unlock(&trace_types_lock);
1043
1044 seq_release(inode, file);
1045 kfree(iter);
1046 return 0;
1047}
1048
1049static int tracing_open(struct inode *inode, struct file *file)
1050{
1051 int ret;
1052
1053 __tracing_open(inode, file, &ret);
1054
1055 return ret;
1056}
1057
1058static int tracing_lt_open(struct inode *inode, struct file *file)
1059{
1060 struct trace_iterator *iter;
1061 int ret;
1062
1063 iter = __tracing_open(inode, file, &ret);
1064
1065 if (!ret)
1066 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1067
1068 return ret;
1069}
1070
1071
1072static void notrace *
1073t_next(struct seq_file *m, void *v, loff_t *pos)
1074{
1075 struct tracer *t = m->private;
1076
1077 (*pos)++;
1078
1079 if (t)
1080 t = t->next;
1081
1082 m->private = t;
1083
1084 return t;
1085}
1086
1087static void *t_start(struct seq_file *m, loff_t *pos)
1088{
1089 struct tracer *t = m->private;
1090 loff_t l = 0;
1091
1092 mutex_lock(&trace_types_lock);
1093 for (; t && l < *pos; t = t_next(m, t, &l))
1094 ;
1095
1096 return t;
1097}
1098
1099static void t_stop(struct seq_file *m, void *p)
1100{
1101 mutex_unlock(&trace_types_lock);
1102}
1103
1104static int t_show(struct seq_file *m, void *v)
1105{
1106 struct tracer *t = v;
1107
1108 if (!t)
1109 return 0;
1110
1111 seq_printf(m, "%s", t->name);
1112 if (t->next)
1113 seq_putc(m, ' ');
1114 else
1115 seq_putc(m, '\n');
1116
1117 return 0;
1118}
1119
1120static struct seq_operations show_traces_seq_ops = {
1121 .start = t_start,
1122 .next = t_next,
1123 .stop = t_stop,
1124 .show = t_show,
1125};
1126
1127static int show_traces_open(struct inode *inode, struct file *file)
1128{
1129 int ret;
1130
1131 ret = seq_open(file, &show_traces_seq_ops);
1132 if (!ret) {
1133 struct seq_file *m = file->private_data;
1134 m->private = trace_types;
1135 }
1136
1137 return ret;
1138}
1139
1140static struct file_operations tracing_fops = {
1141 .open = tracing_open,
1142 .read = seq_read,
1143 .llseek = seq_lseek,
1144 .release = tracing_release,
1145};
1146
1147static struct file_operations tracing_lt_fops = {
1148 .open = tracing_lt_open,
1149 .read = seq_read,
1150 .llseek = seq_lseek,
1151 .release = tracing_release,
1152};
1153
1154static struct file_operations show_traces_fops = {
1155 .open = show_traces_open,
1156 .read = seq_read,
1157 .release = seq_release,
1158};
1159
1160static ssize_t
1161tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1162 size_t cnt, loff_t *ppos)
1163{
1164 char *buf;
1165 int r = 0;
1166 int len = 0;
1167 int i;
1168
1169 /* calulate max size */
1170 for (i = 0; trace_options[i]; i++) {
1171 len += strlen(trace_options[i]);
1172 len += 3; /* "no" and space */
1173 }
1174
1175 /* +2 for \n and \0 */
1176 buf = kmalloc(len + 2, GFP_KERNEL);
1177 if (!buf)
1178 return -ENOMEM;
1179
1180 for (i = 0; trace_options[i]; i++) {
1181 if (trace_flags & (1 << i))
1182 r += sprintf(buf + r, "%s ", trace_options[i]);
1183 else
1184 r += sprintf(buf + r, "no%s ", trace_options[i]);
1185 }
1186
1187 r += sprintf(buf + r, "\n");
1188 WARN_ON(r >= len + 2);
1189
1190 r = simple_read_from_buffer(ubuf, cnt, ppos,
1191 buf, r);
1192
1193 kfree(buf);
1194
1195 return r;
1196}
1197
1198static ssize_t
1199tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1200 size_t cnt, loff_t *ppos)
1201{
1202 char buf[64];
1203 char *cmp = buf;
1204 int neg = 0;
1205 int i;
1206
1207 if (cnt > 63)
1208 cnt = 63;
1209
1210 if (copy_from_user(&buf, ubuf, cnt))
1211 return -EFAULT;
1212
1213 buf[cnt] = 0;
1214
1215 if (strncmp(buf, "no", 2) == 0) {
1216 neg = 1;
1217 cmp += 2;
1218 }
1219
1220 for (i = 0; trace_options[i]; i++) {
1221 int len = strlen(trace_options[i]);
1222
1223 if (strncmp(cmp, trace_options[i], len) == 0) {
1224 if (neg)
1225 trace_flags &= ~(1 << i);
1226 else
1227 trace_flags |= (1 << i);
1228 break;
1229 }
1230 }
1231
1232 filp->f_pos += cnt;
1233
1234 return cnt;
1235}
1236
1237static struct file_operations tracing_iter_fops = {
1238 .open = tracing_open_generic,
1239 .read = tracing_iter_ctrl_read,
1240 .write = tracing_iter_ctrl_write,
1241};
1242
1243static ssize_t
1244tracing_ctrl_read(struct file *filp, char __user *ubuf,
1245 size_t cnt, loff_t *ppos)
1246{
1247 struct trace_array *tr = filp->private_data;
1248 char buf[64];
1249 int r;
1250
1251 r = sprintf(buf, "%ld\n", tr->ctrl);
1252 return simple_read_from_buffer(ubuf, cnt, ppos,
1253 buf, r);
1254}
1255
1256static ssize_t
1257tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1258 size_t cnt, loff_t *ppos)
1259{
1260 struct trace_array *tr = filp->private_data;
1261 long val;
1262 char buf[64];
1263
1264 if (cnt > 63)
1265 cnt = 63;
1266
1267 if (copy_from_user(&buf, ubuf, cnt))
1268 return -EFAULT;
1269
1270 buf[cnt] = 0;
1271
1272 val = simple_strtoul(buf, NULL, 10);
1273
1274 val = !!val;
1275
1276 mutex_lock(&trace_types_lock);
1277 if (tr->ctrl ^ val) {
1278 if (val)
1279 tracer_enabled = 1;
1280 else
1281 tracer_enabled = 0;
1282
1283 tr->ctrl = val;
1284
1285 if (current_trace && current_trace->ctrl_update)
1286 current_trace->ctrl_update(tr);
1287 }
1288 mutex_unlock(&trace_types_lock);
1289
1290 filp->f_pos += cnt;
1291
1292 return cnt;
1293}
1294
1295static ssize_t
1296tracing_set_trace_read(struct file *filp, char __user *ubuf,
1297 size_t cnt, loff_t *ppos)
1298{
1299 char buf[max_tracer_type_len+2];
1300 int r;
1301
1302 mutex_lock(&trace_types_lock);
1303 if (current_trace)
1304 r = sprintf(buf, "%s\n", current_trace->name);
1305 else
1306 r = sprintf(buf, "\n");
1307 mutex_unlock(&trace_types_lock);
1308
1309 return simple_read_from_buffer(ubuf, cnt, ppos,
1310 buf, r);
1311}
1312
1313static ssize_t
1314tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1315 size_t cnt, loff_t *ppos)
1316{
1317 struct trace_array *tr = &global_trace;
1318 struct tracer *t;
1319 char buf[max_tracer_type_len+1];
1320 int i;
1321
1322 if (cnt > max_tracer_type_len)
1323 cnt = max_tracer_type_len;
1324
1325 if (copy_from_user(&buf, ubuf, cnt))
1326 return -EFAULT;
1327
1328 buf[cnt] = 0;
1329
1330 /* strip ending whitespace. */
1331 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1332 buf[i] = 0;
1333
1334 mutex_lock(&trace_types_lock);
1335 for (t = trace_types; t; t = t->next) {
1336 if (strcmp(t->name, buf) == 0)
1337 break;
1338 }
1339 if (!t || t == current_trace)
1340 goto out;
1341
1342 if (current_trace && current_trace->reset)
1343 current_trace->reset(tr);
1344
1345 current_trace = t;
1346 if (t->init)
1347 t->init(tr);
1348
1349 out:
1350 mutex_unlock(&trace_types_lock);
1351
1352 filp->f_pos += cnt;
1353
1354 return cnt;
1355}
1356
1357static ssize_t
1358tracing_max_lat_read(struct file *filp, char __user *ubuf,
1359 size_t cnt, loff_t *ppos)
1360{
1361 unsigned long *ptr = filp->private_data;
1362 char buf[64];
1363 int r;
1364
1365 r = snprintf(buf, 64, "%ld\n",
1366 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1367 if (r > 64)
1368 r = 64;
1369 return simple_read_from_buffer(ubuf, cnt, ppos,
1370 buf, r);
1371}
1372
1373static ssize_t
1374tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1375 size_t cnt, loff_t *ppos)
1376{
1377 long *ptr = filp->private_data;
1378 long val;
1379 char buf[64];
1380
1381 if (cnt > 63)
1382 cnt = 63;
1383
1384 if (copy_from_user(&buf, ubuf, cnt))
1385 return -EFAULT;
1386
1387 buf[cnt] = 0;
1388
1389 val = simple_strtoul(buf, NULL, 10);
1390
1391 *ptr = val * 1000;
1392
1393 return cnt;
1394}
1395
1396static struct file_operations tracing_max_lat_fops = {
1397 .open = tracing_open_generic,
1398 .read = tracing_max_lat_read,
1399 .write = tracing_max_lat_write,
1400};
1401
1402static struct file_operations tracing_ctrl_fops = {
1403 .open = tracing_open_generic,
1404 .read = tracing_ctrl_read,
1405 .write = tracing_ctrl_write,
1406};
1407
1408static struct file_operations set_tracer_fops = {
1409 .open = tracing_open_generic,
1410 .read = tracing_set_trace_read,
1411 .write = tracing_set_trace_write,
1412};
1413
1414#ifdef CONFIG_DYNAMIC_FTRACE
1415
1416static ssize_t
1417tracing_read_long(struct file *filp, char __user *ubuf,
1418 size_t cnt, loff_t *ppos)
1419{
1420 unsigned long *p = filp->private_data;
1421 char buf[64];
1422 int r;
1423
1424 r = sprintf(buf, "%ld\n", *p);
1425 return simple_read_from_buffer(ubuf, cnt, ppos,
1426 buf, r);
1427}
1428
1429static struct file_operations tracing_read_long_fops = {
1430 .open = tracing_open_generic,
1431 .read = tracing_read_long,
1432};
1433#endif
1434
1435static struct dentry *d_tracer;
1436
1437struct dentry *tracing_init_dentry(void)
1438{
1439 static int once;
1440
1441 if (d_tracer)
1442 return d_tracer;
1443
1444 d_tracer = debugfs_create_dir("tracing", NULL);
1445
1446 if (!d_tracer && !once) {
1447 once = 1;
1448 pr_warning("Could not create debugfs directory 'tracing'\n");
1449 return NULL;
1450 }
1451
1452 return d_tracer;
1453}
1454
1455static __init void tracer_init_debugfs(void)
1456{
1457 struct dentry *d_tracer;
1458 struct dentry *entry;
1459
1460 d_tracer = tracing_init_dentry();
1461
1462 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1463 &global_trace, &tracing_ctrl_fops);
1464 if (!entry)
1465 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1466
1467 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1468 NULL, &tracing_iter_fops);
1469 if (!entry)
1470 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1471
1472 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1473 &global_trace, &tracing_lt_fops);
1474 if (!entry)
1475 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1476
1477 entry = debugfs_create_file("trace", 0444, d_tracer,
1478 &global_trace, &tracing_fops);
1479 if (!entry)
1480 pr_warning("Could not create debugfs 'trace' entry\n");
1481
1482 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1483 &global_trace, &show_traces_fops);
1484 if (!entry)
1485 pr_warning("Could not create debugfs 'trace' entry\n");
1486
1487 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1488 &global_trace, &set_tracer_fops);
1489 if (!entry)
1490 pr_warning("Could not create debugfs 'trace' entry\n");
1491
1492 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1493 &tracing_max_latency,
1494 &tracing_max_lat_fops);
1495 if (!entry)
1496 pr_warning("Could not create debugfs "
1497 "'tracing_max_latency' entry\n");
1498
1499 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
1500 &tracing_thresh, &tracing_max_lat_fops);
1501 if (!entry)
1502 pr_warning("Could not create debugfs "
1503 "'tracing_threash' entry\n");
1504
1505#ifdef CONFIG_DYNAMIC_FTRACE
1506 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
1507 &ftrace_update_tot_cnt,
1508 &tracing_read_long_fops);
1509 if (!entry)
1510 pr_warning("Could not create debugfs "
1511 "'dyn_ftrace_total_info' entry\n");
1512#endif
1513}
1514
1515/* dummy trace to disable tracing */
1516static struct tracer no_tracer __read_mostly =
1517{
1518 .name = "none",
1519};
1520
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001521static int trace_alloc_page(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522{
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001523 struct trace_array_cpu *data;
1524 void *array;
1525 struct page *page, *tmp;
1526 LIST_HEAD(pages);
1527 int i;
1528
1529 /* first allocate a page for each CPU */
1530 for_each_possible_cpu(i) {
1531 array = (void *)__get_free_page(GFP_KERNEL);
1532 if (array == NULL) {
1533 printk(KERN_ERR "tracer: failed to allocate page"
1534 "for trace buffer!\n");
1535 goto free_pages;
1536 }
1537
1538 page = virt_to_page(array);
1539 list_add(&page->lru, &pages);
1540
1541/* Only allocate if we are actually using the max trace */
1542#ifdef CONFIG_TRACER_MAX_TRACE
1543 array = (void *)__get_free_page(GFP_KERNEL);
1544 if (array == NULL) {
1545 printk(KERN_ERR "tracer: failed to allocate page"
1546 "for trace buffer!\n");
1547 goto free_pages;
1548 }
1549 page = virt_to_page(array);
1550 list_add(&page->lru, &pages);
1551#endif
1552 }
1553
1554 /* Now that we successfully allocate a page per CPU, add them */
1555 for_each_possible_cpu(i) {
1556 data = global_trace.data[i];
1557 page = list_entry(pages.next, struct page, lru);
1558 list_del(&page->lru);
1559 list_add_tail(&page->lru, &data->trace_pages);
1560 ClearPageLRU(page);
1561
1562#ifdef CONFIG_TRACER_MAX_TRACE
1563 data = max_tr.data[i];
1564 page = list_entry(pages.next, struct page, lru);
1565 list_del(&page->lru);
1566 list_add_tail(&page->lru, &data->trace_pages);
1567 SetPageLRU(page);
1568#endif
1569 }
1570 global_trace.entries += ENTRIES_PER_PAGE;
1571
1572 return 0;
1573
1574 free_pages:
1575 list_for_each_entry_safe(page, tmp, &pages, lru) {
1576 list_del(&page->lru);
1577 __free_page(page);
1578 }
1579 return -ENOMEM;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580}
1581
1582__init static int tracer_alloc_buffers(void)
1583{
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001584 struct trace_array_cpu *data;
1585 void *array;
1586 struct page *page;
1587 int pages = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588 int i;
1589
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001590 /* Allocate the first page for all buffers */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591 for_each_possible_cpu(i) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001592 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593 max_tr.data[i] = &per_cpu(max_data, i);
1594
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001595 array = (void *)__get_free_page(GFP_KERNEL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001596 if (array == NULL) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001597 printk(KERN_ERR "tracer: failed to allocate page"
1598 "for trace buffer!\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001599 goto free_buffers;
1600 }
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001601 data->trace = array;
1602
1603 /* set the array to the list */
1604 INIT_LIST_HEAD(&data->trace_pages);
1605 page = virt_to_page(array);
1606 list_add(&page->lru, &data->trace_pages);
1607 /* use the LRU flag to differentiate the two buffers */
1608 ClearPageLRU(page);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001609
1610/* Only allocate if we are actually using the max trace */
1611#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001612 array = (void *)__get_free_page(GFP_KERNEL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001613 if (array == NULL) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001614 printk(KERN_ERR "tracer: failed to allocate page"
1615 "for trace buffer!\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001616 goto free_buffers;
1617 }
1618 max_tr.data[i]->trace = array;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001619
1620 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
1621 page = virt_to_page(array);
1622 list_add(&page->lru, &max_tr.data[i]->trace_pages);
1623 SetPageLRU(page);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001624#endif
1625 }
1626
1627 /*
1628 * Since we allocate by orders of pages, we may be able to
1629 * round up a bit.
1630 */
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001631 global_trace.entries = ENTRIES_PER_PAGE;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001632 pages++;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001633
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001634 while (global_trace.entries < trace_nr_entries) {
1635 if (trace_alloc_page())
1636 break;
1637 pages++;
1638 }
Steven Rostedt89b2f972008-05-12 21:20:44 +02001639 max_tr.entries = global_trace.entries;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001640
1641 pr_info("tracer: %d pages allocated for %ld",
1642 pages, trace_nr_entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001643 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
1644 pr_info(" actual entries %ld\n", global_trace.entries);
1645
1646 tracer_init_debugfs();
1647
1648 trace_init_cmdlines();
1649
1650 register_tracer(&no_tracer);
1651 current_trace = &no_tracer;
1652
1653 return 0;
1654
1655 free_buffers:
1656 for (i-- ; i >= 0; i--) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001657 struct page *page, *tmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001658 struct trace_array_cpu *data = global_trace.data[i];
1659
1660 if (data && data->trace) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001661 list_for_each_entry_safe(page, tmp,
1662 &data->trace_pages, lru) {
1663 list_del(&page->lru);
1664 __free_page(page);
1665 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001666 data->trace = NULL;
1667 }
1668
1669#ifdef CONFIG_TRACER_MAX_TRACE
1670 data = max_tr.data[i];
1671 if (data && data->trace) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001672 list_for_each_entry_safe(page, tmp,
1673 &data->trace_pages, lru) {
1674 list_del(&page->lru);
1675 __free_page(page);
1676 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001677 data->trace = NULL;
1678 }
1679#endif
1680 }
1681 return -ENOMEM;
1682}
1683
1684device_initcall(tracer_alloc_buffers);