blob: c56d73894322bc7445ff745e8430fc7ea3ce8e38 [file] [log] [blame]
Steven Rostedt3d083392008-05-12 21:20:42 +02001/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
10 */
11
12#include <linux/spinlock.h>
13#include <linux/hardirq.h>
Steven Rostedt6f93fc02008-08-20 12:55:07 -040014#include <linux/uaccess.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020015#include <linux/ftrace.h>
16#include <linux/percpu.h>
Ingo Molnar19b3e962008-11-11 11:57:02 +010017#include <linux/sched.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020018#include <linux/init.h>
19#include <linux/list.h>
20
Steven Rostedt16239632009-02-17 17:57:30 -050021#include <asm/cacheflush.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053022#include <asm/ftrace.h>
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +010023#include <linux/ftrace.h>
Steven Rostedt732f3ca2008-08-14 18:05:05 -040024#include <asm/nops.h>
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +010025#include <asm/nmi.h>
Steven Rostedtdfa60ab2008-05-12 21:20:43 +020026
Steven Rostedt3d083392008-05-12 21:20:42 +020027
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +010028#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt3d083392008-05-12 21:20:42 +020029
Steven Rostedt16239632009-02-17 17:57:30 -050030int ftrace_arch_code_modify_prepare(void)
31{
32 set_kernel_text_rw();
33 return 0;
34}
35
36int ftrace_arch_code_modify_post_process(void)
37{
38 set_kernel_text_ro();
39 return 0;
40}
41
Steven Rostedt3d083392008-05-12 21:20:42 +020042union ftrace_code_union {
Abhishek Sagar395a59d2008-06-21 23:47:27 +053043 char code[MCOUNT_INSN_SIZE];
Steven Rostedt3d083392008-05-12 21:20:42 +020044 struct {
45 char e8;
46 int offset;
47 } __attribute__((packed));
48};
49
Steven Rostedt15adc042008-10-23 09:33:08 -040050static int ftrace_calc_offset(long ip, long addr)
Steven Rostedt3c1720f2008-05-12 21:20:43 +020051{
52 return (int)(addr - ip);
53}
54
Steven Rostedt31e88902008-11-14 16:21:19 -080055static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
Steven Rostedt3c1720f2008-05-12 21:20:43 +020056{
57 static union ftrace_code_union calc;
58
59 calc.e8 = 0xe8;
Abhishek Sagar395a59d2008-06-21 23:47:27 +053060 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
Steven Rostedt3c1720f2008-05-12 21:20:43 +020061
62 /*
63 * No locking needed, this must be called via kstop_machine
64 * which in essence is like running on a uniprocessor machine.
65 */
66 return calc.code;
67}
68
Steven Rostedt17666f02008-10-30 16:08:32 -040069/*
70 * Modifying code must take extra care. On an SMP machine, if
71 * the code being modified is also being executed on another CPU
72 * that CPU will have undefined results and possibly take a GPF.
73 * We use kstop_machine to stop other CPUS from exectuing code.
74 * But this does not stop NMIs from happening. We still need
75 * to protect against that. We separate out the modification of
76 * the code to take care of this.
77 *
78 * Two buffers are added: An IP buffer and a "code" buffer.
79 *
Steven Rostedta26a2a22008-10-31 00:03:22 -040080 * 1) Put the instruction pointer into the IP buffer
Steven Rostedt17666f02008-10-30 16:08:32 -040081 * and the new code into the "code" buffer.
82 * 2) Set a flag that says we are modifying code
83 * 3) Wait for any running NMIs to finish.
84 * 4) Write the code
85 * 5) clear the flag.
86 * 6) Wait for any running NMIs to finish.
87 *
88 * If an NMI is executed, the first thing it does is to call
89 * "ftrace_nmi_enter". This will check if the flag is set to write
90 * and if it is, it will write what is in the IP and "code" buffers.
91 *
92 * The trick is, it does not matter if everyone is writing the same
93 * content to the code location. Also, if a CPU is executing code
94 * it is OK to write to that code location if the contents being written
95 * are the same as what exists.
96 */
97
Steven Rostedta26a2a22008-10-31 00:03:22 -040098static atomic_t in_nmi = ATOMIC_INIT(0);
99static int mod_code_status; /* holds return value of text write */
100static int mod_code_write; /* set when NMI should do the write */
101static void *mod_code_ip; /* holds the IP to write to */
102static void *mod_code_newcode; /* holds the text to write to the IP */
Steven Rostedt17666f02008-10-30 16:08:32 -0400103
Steven Rostedta26a2a22008-10-31 00:03:22 -0400104static unsigned nmi_wait_count;
105static atomic_t nmi_update_count = ATOMIC_INIT(0);
Steven Rostedtb807c3d2008-10-30 16:08:33 -0400106
107int ftrace_arch_read_dyn_info(char *buf, int size)
108{
109 int r;
110
111 r = snprintf(buf, size, "%u %u",
112 nmi_wait_count,
113 atomic_read(&nmi_update_count));
114 return r;
115}
116
Steven Rostedt17666f02008-10-30 16:08:32 -0400117static void ftrace_mod_code(void)
118{
119 /*
120 * Yes, more than one CPU process can be writing to mod_code_status.
121 * (and the code itself)
122 * But if one were to fail, then they all should, and if one were
123 * to succeed, then they all should.
124 */
125 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
126 MCOUNT_INSN_SIZE);
Steven Rostedt90c7ac42009-02-19 13:32:57 -0500127
128 /* if we fail, then kill any new writers */
129 if (mod_code_status)
130 mod_code_write = 0;
Steven Rostedt17666f02008-10-30 16:08:32 -0400131}
132
133void ftrace_nmi_enter(void)
134{
135 atomic_inc(&in_nmi);
136 /* Must have in_nmi seen before reading write flag */
137 smp_mb();
Steven Rostedtb807c3d2008-10-30 16:08:33 -0400138 if (mod_code_write) {
Steven Rostedt17666f02008-10-30 16:08:32 -0400139 ftrace_mod_code();
Steven Rostedtb807c3d2008-10-30 16:08:33 -0400140 atomic_inc(&nmi_update_count);
141 }
Steven Rostedt17666f02008-10-30 16:08:32 -0400142}
143
144void ftrace_nmi_exit(void)
145{
146 /* Finish all executions before clearing in_nmi */
147 smp_wmb();
148 atomic_dec(&in_nmi);
149}
150
151static void wait_for_nmi(void)
152{
Steven Rostedtb807c3d2008-10-30 16:08:33 -0400153 int waited = 0;
154
155 while (atomic_read(&in_nmi)) {
156 waited = 1;
Steven Rostedt17666f02008-10-30 16:08:32 -0400157 cpu_relax();
Steven Rostedtb807c3d2008-10-30 16:08:33 -0400158 }
159
160 if (waited)
161 nmi_wait_count++;
Steven Rostedt17666f02008-10-30 16:08:32 -0400162}
163
164static int
165do_ftrace_mod_code(unsigned long ip, void *new_code)
166{
167 mod_code_ip = (void *)ip;
168 mod_code_newcode = new_code;
169
170 /* The buffers need to be visible before we let NMIs write them */
171 smp_wmb();
172
173 mod_code_write = 1;
174
175 /* Make sure write bit is visible before we wait on NMIs */
176 smp_mb();
177
178 wait_for_nmi();
179
180 /* Make sure all running NMIs have finished before we write the code */
181 smp_mb();
182
183 ftrace_mod_code();
184
185 /* Make sure the write happens before clearing the bit */
186 smp_wmb();
187
188 mod_code_write = 0;
189
190 /* make sure NMIs see the cleared bit */
191 smp_mb();
192
193 wait_for_nmi();
194
195 return mod_code_status;
196}
197
198
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +0100199
200
201static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
202
Steven Rostedt31e88902008-11-14 16:21:19 -0800203static unsigned char *ftrace_nop_replace(void)
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +0100204{
205 return ftrace_nop;
206}
207
Steven Rostedt31e88902008-11-14 16:21:19 -0800208static int
Steven Rostedt3d083392008-05-12 21:20:42 +0200209ftrace_modify_code(unsigned long ip, unsigned char *old_code,
210 unsigned char *new_code)
211{
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400212 unsigned char replaced[MCOUNT_INSN_SIZE];
Steven Rostedt3d083392008-05-12 21:20:42 +0200213
214 /*
215 * Note: Due to modules and __init, code can
216 * disappear and change, we need to protect against faulting
Steven Rostedt76aefee2008-10-23 09:33:00 -0400217 * as well as code changing. We do this by using the
Steven Rostedtab9a0912008-10-23 09:33:01 -0400218 * probe_kernel_* functions.
Steven Rostedt3d083392008-05-12 21:20:42 +0200219 *
220 * No real locking needed, this code is run through
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400221 * kstop_machine, or before SMP starts.
Steven Rostedt3d083392008-05-12 21:20:42 +0200222 */
Steven Rostedt76aefee2008-10-23 09:33:00 -0400223
224 /* read the text we want to modify */
Steven Rostedtab9a0912008-10-23 09:33:01 -0400225 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400226 return -EFAULT;
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400227
Steven Rostedt76aefee2008-10-23 09:33:00 -0400228 /* Make sure it is what we expect it to be */
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400229 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400230 return -EINVAL;
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400231
Steven Rostedt76aefee2008-10-23 09:33:00 -0400232 /* replace the text with the new text */
Steven Rostedt17666f02008-10-30 16:08:32 -0400233 if (do_ftrace_mod_code(ip, new_code))
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400234 return -EPERM;
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400235
Steven Rostedt3d083392008-05-12 21:20:42 +0200236 sync_core();
237
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400238 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200239}
240
Steven Rostedt31e88902008-11-14 16:21:19 -0800241int ftrace_make_nop(struct module *mod,
242 struct dyn_ftrace *rec, unsigned long addr)
243{
244 unsigned char *new, *old;
245 unsigned long ip = rec->ip;
246
247 old = ftrace_call_replace(ip, addr);
248 new = ftrace_nop_replace();
249
250 return ftrace_modify_code(rec->ip, old, new);
251}
252
253int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
254{
255 unsigned char *new, *old;
256 unsigned long ip = rec->ip;
257
258 old = ftrace_nop_replace();
259 new = ftrace_call_replace(ip, addr);
260
261 return ftrace_modify_code(rec->ip, old, new);
262}
263
Steven Rostedt15adc042008-10-23 09:33:08 -0400264int ftrace_update_ftrace_func(ftrace_func_t func)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200265{
266 unsigned long ip = (unsigned long)(&ftrace_call);
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530267 unsigned char old[MCOUNT_INSN_SIZE], *new;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200268 int ret;
269
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530270 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200271 new = ftrace_call_replace(ip, (unsigned long)func);
272 ret = ftrace_modify_code(ip, old, new);
273
274 return ret;
275}
276
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200277int __init ftrace_dyn_arch_init(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200278{
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400279 extern const unsigned char ftrace_test_p6nop[];
280 extern const unsigned char ftrace_test_nop5[];
281 extern const unsigned char ftrace_test_jmp[];
282 int faulted = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200283
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400284 /*
285 * There is no good nop for all x86 archs.
286 * We will default to using the P6_NOP5, but first we
287 * will test to make sure that the nop will actually
288 * work on this CPU. If it faults, we will then
289 * go to a lesser efficient 5 byte nop. If that fails
290 * we then just use a jmp as our nop. This isn't the most
291 * efficient nop, but we can not use a multi part nop
292 * since we would then risk being preempted in the middle
293 * of that nop, and if we enabled tracing then, it might
294 * cause a system crash.
295 *
296 * TODO: check the cpuid to determine the best nop.
297 */
298 asm volatile (
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400299 "ftrace_test_jmp:"
300 "jmp ftrace_test_p6nop\n"
Anders Kaseorg8b273862008-10-09 22:19:08 -0400301 "nop\n"
302 "nop\n"
303 "nop\n" /* 2 byte jmp + 3 bytes */
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400304 "ftrace_test_p6nop:"
305 P6_NOP5
306 "jmp 1f\n"
307 "ftrace_test_nop5:"
308 ".byte 0x66,0x66,0x66,0x66,0x90\n"
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400309 "1:"
310 ".section .fixup, \"ax\"\n"
311 "2: movl $1, %0\n"
312 " jmp ftrace_test_nop5\n"
313 "3: movl $2, %0\n"
314 " jmp 1b\n"
315 ".previous\n"
316 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
317 _ASM_EXTABLE(ftrace_test_nop5, 3b)
318 : "=r"(faulted) : "0" (faulted));
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200319
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400320 switch (faulted) {
321 case 0:
322 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
Steven Rostedt8115f3f2008-10-24 09:12:17 -0400323 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400324 break;
325 case 1:
326 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
Steven Rostedt8115f3f2008-10-24 09:12:17 -0400327 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400328 break;
329 case 2:
Anders Kaseorg8b273862008-10-09 22:19:08 -0400330 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
Steven Rostedt8115f3f2008-10-24 09:12:17 -0400331 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400332 break;
333 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200334
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400335 /* The return code is retured via data */
336 *(unsigned long *)data = 0;
Steven Rostedtdfa60ab2008-05-12 21:20:43 +0200337
Steven Rostedt3d083392008-05-12 21:20:42 +0200338 return 0;
339}
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +0100340#endif
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100341
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100342#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100343
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500344#ifdef CONFIG_DYNAMIC_FTRACE
345extern void ftrace_graph_call(void);
346
347static int ftrace_mod_jmp(unsigned long ip,
348 int old_offset, int new_offset)
349{
350 unsigned char code[MCOUNT_INSN_SIZE];
351
352 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
353 return -EFAULT;
354
355 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
356 return -EINVAL;
357
358 *(int *)(&code[1]) = new_offset;
359
360 if (do_ftrace_mod_code(ip, &code))
361 return -EPERM;
362
363 return 0;
364}
365
366int ftrace_enable_ftrace_graph_caller(void)
367{
368 unsigned long ip = (unsigned long)(&ftrace_graph_call);
369 int old_offset, new_offset;
370
371 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
372 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
373
374 return ftrace_mod_jmp(ip, old_offset, new_offset);
375}
376
377int ftrace_disable_ftrace_graph_caller(void)
378{
379 unsigned long ip = (unsigned long)(&ftrace_graph_call);
380 int old_offset, new_offset;
381
382 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
383 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
384
385 return ftrace_mod_jmp(ip, old_offset, new_offset);
386}
387
388#else /* CONFIG_DYNAMIC_FTRACE */
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100389
390/*
391 * These functions are picked from those used on
392 * this page for dynamic ftrace. They have been
393 * simplified to ignore all traces in NMI context.
394 */
395static atomic_t in_nmi;
396
397void ftrace_nmi_enter(void)
398{
399 atomic_inc(&in_nmi);
400}
401
402void ftrace_nmi_exit(void)
403{
404 atomic_dec(&in_nmi);
405}
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500406
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100407#endif /* !CONFIG_DYNAMIC_FTRACE */
408
409/* Add a function return address to the trace stack on thread info.*/
410static int push_return_trace(unsigned long ret, unsigned long long time,
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100411 unsigned long func, int *depth)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100412{
413 int index;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100414
415 if (!current->ret_stack)
416 return -EBUSY;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100417
418 /* The return trace stack is full */
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100419 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
420 atomic_inc(&current->trace_overrun);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100421 return -EBUSY;
Frederic Weisbecker02310222008-11-17 03:22:41 +0100422 }
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100423
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100424 index = ++current->curr_ret_stack;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100425 barrier();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100426 current->ret_stack[index].ret = ret;
427 current->ret_stack[index].func = func;
428 current->ret_stack[index].calltime = time;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100429 *depth = index;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100430
431 return 0;
432}
433
434/* Retrieve a function return address to the trace stack on thread info.*/
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100435static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100436{
437 int index;
438
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100439 index = current->curr_ret_stack;
Steven Rostedt62679ef2008-12-02 23:50:06 -0500440
441 if (unlikely(index < 0)) {
442 ftrace_graph_stop();
443 WARN_ON(1);
444 /* Might as well panic, otherwise we have no where to go */
445 *ret = (unsigned long)panic;
446 return;
447 }
448
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100449 *ret = current->ret_stack[index].ret;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100450 trace->func = current->ret_stack[index].func;
451 trace->calltime = current->ret_stack[index].calltime;
452 trace->overrun = atomic_read(&current->trace_overrun);
453 trace->depth = index;
Steven Rostedte49dc192008-12-02 23:50:05 -0500454 barrier();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100455 current->curr_ret_stack--;
Steven Rostedt62679ef2008-12-02 23:50:06 -0500456
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100457}
458
459/*
460 * Send the trace to the ring-buffer.
461 * @return the original return address.
462 */
463unsigned long ftrace_return_to_handler(void)
464{
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100465 struct ftrace_graph_ret trace;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100466 unsigned long ret;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100467
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100468 pop_return_trace(&trace, &ret);
469 trace.rettime = cpu_clock(raw_smp_processor_id());
470 ftrace_graph_return(&trace);
471
Steven Rostedt62679ef2008-12-02 23:50:06 -0500472 if (unlikely(!ret)) {
473 ftrace_graph_stop();
474 WARN_ON(1);
475 /* Might as well panic. What else to do? */
476 ret = (unsigned long)panic;
477 }
478
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100479 return ret;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100480}
481
482/*
483 * Hook the return address and push it in the stack of return addrs
484 * in current thread info.
485 */
486void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
487{
488 unsigned long old;
489 unsigned long long calltime;
490 int faulted;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100491 struct ftrace_graph_ent trace;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100492 unsigned long return_hooker = (unsigned long)
493 &return_to_handler;
494
495 /* Nmi's are currently unsupported */
Frederic Weisbecker380c4b12008-12-06 03:43:41 +0100496 if (unlikely(atomic_read(&in_nmi)))
497 return;
498
499 if (unlikely(atomic_read(&current->tracing_graph_pause)))
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100500 return;
501
502 /*
503 * Protect against fault, even if it shouldn't
504 * happen. This tool is too much intrusive to
505 * ignore such a protection.
506 */
507 asm volatile(
Steven Rostedtf47a4542009-02-10 11:53:23 -0500508 "1: " _ASM_MOV " (%[parent]), %[old]\n"
509 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100510 " movl $0, %[faulted]\n"
Steven Rostedte3944bf2009-02-10 13:07:13 -0500511 "3:\n"
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100512
513 ".section .fixup, \"ax\"\n"
Steven Rostedte3944bf2009-02-10 13:07:13 -0500514 "4: movl $1, %[faulted]\n"
515 " jmp 3b\n"
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100516 ".previous\n"
517
Steven Rostedte3944bf2009-02-10 13:07:13 -0500518 _ASM_EXTABLE(1b, 4b)
519 _ASM_EXTABLE(2b, 4b)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100520
Steven Rostedtf47a4542009-02-10 11:53:23 -0500521 : [old] "=r" (old), [faulted] "=r" (faulted)
522 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100523 : "memory"
524 );
525
Steven Rostedt14a866c2008-12-02 23:50:02 -0500526 if (unlikely(faulted)) {
527 ftrace_graph_stop();
528 WARN_ON(1);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100529 return;
530 }
531
Steven Rostedt14a866c2008-12-02 23:50:02 -0500532 if (unlikely(!__kernel_text_address(old))) {
533 ftrace_graph_stop();
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100534 *parent = old;
Steven Rostedt14a866c2008-12-02 23:50:02 -0500535 WARN_ON(1);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100536 return;
537 }
538
539 calltime = cpu_clock(raw_smp_processor_id());
540
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100541 if (push_return_trace(old, calltime,
542 self_addr, &trace.depth) == -EBUSY) {
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100543 *parent = old;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100544 return;
545 }
546
547 trace.func = self_addr;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100548
Steven Rostedte49dc192008-12-02 23:50:05 -0500549 /* Only trace if the calling function expects to */
550 if (!ftrace_graph_entry(&trace)) {
551 current->curr_ret_stack--;
552 *parent = old;
553 }
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100554}
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100555#endif /* CONFIG_FUNCTION_GRAPH_TRACER */