blob: e3fad2ef622c46a21629e7299e52c42f4a919347 [file] [log] [blame]
Steven Rostedt3d083392008-05-12 21:20:42 +02001/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
10 */
11
12#include <linux/spinlock.h>
13#include <linux/hardirq.h>
Steven Rostedt6f93fc02008-08-20 12:55:07 -040014#include <linux/uaccess.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020015#include <linux/ftrace.h>
16#include <linux/percpu.h>
Ingo Molnar19b3e962008-11-11 11:57:02 +010017#include <linux/sched.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020018#include <linux/init.h>
19#include <linux/list.h>
20
Abhishek Sagar395a59d2008-06-21 23:47:27 +053021#include <asm/ftrace.h>
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +010022#include <linux/ftrace.h>
Steven Rostedt732f3ca2008-08-14 18:05:05 -040023#include <asm/nops.h>
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +010024#include <asm/nmi.h>
Steven Rostedtdfa60ab2008-05-12 21:20:43 +020025
Steven Rostedt3d083392008-05-12 21:20:42 +020026
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +010027#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt3d083392008-05-12 21:20:42 +020028
Steven Rostedt3d083392008-05-12 21:20:42 +020029union ftrace_code_union {
Abhishek Sagar395a59d2008-06-21 23:47:27 +053030 char code[MCOUNT_INSN_SIZE];
Steven Rostedt3d083392008-05-12 21:20:42 +020031 struct {
32 char e8;
33 int offset;
34 } __attribute__((packed));
35};
36
Steven Rostedt15adc042008-10-23 09:33:08 -040037static int ftrace_calc_offset(long ip, long addr)
Steven Rostedt3c1720f2008-05-12 21:20:43 +020038{
39 return (int)(addr - ip);
40}
41
Steven Rostedt31e88902008-11-14 16:21:19 -080042static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
Steven Rostedt3c1720f2008-05-12 21:20:43 +020043{
44 static union ftrace_code_union calc;
45
46 calc.e8 = 0xe8;
Abhishek Sagar395a59d2008-06-21 23:47:27 +053047 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
Steven Rostedt3c1720f2008-05-12 21:20:43 +020048
49 /*
50 * No locking needed, this must be called via kstop_machine
51 * which in essence is like running on a uniprocessor machine.
52 */
53 return calc.code;
54}
55
Steven Rostedt17666f02008-10-30 16:08:32 -040056/*
57 * Modifying code must take extra care. On an SMP machine, if
58 * the code being modified is also being executed on another CPU
59 * that CPU will have undefined results and possibly take a GPF.
60 * We use kstop_machine to stop other CPUS from exectuing code.
61 * But this does not stop NMIs from happening. We still need
62 * to protect against that. We separate out the modification of
63 * the code to take care of this.
64 *
65 * Two buffers are added: An IP buffer and a "code" buffer.
66 *
Steven Rostedta26a2a22008-10-31 00:03:22 -040067 * 1) Put the instruction pointer into the IP buffer
Steven Rostedt17666f02008-10-30 16:08:32 -040068 * and the new code into the "code" buffer.
69 * 2) Set a flag that says we are modifying code
70 * 3) Wait for any running NMIs to finish.
71 * 4) Write the code
72 * 5) clear the flag.
73 * 6) Wait for any running NMIs to finish.
74 *
75 * If an NMI is executed, the first thing it does is to call
76 * "ftrace_nmi_enter". This will check if the flag is set to write
77 * and if it is, it will write what is in the IP and "code" buffers.
78 *
79 * The trick is, it does not matter if everyone is writing the same
80 * content to the code location. Also, if a CPU is executing code
81 * it is OK to write to that code location if the contents being written
82 * are the same as what exists.
83 */
84
Steven Rostedt4e6ea142009-02-05 22:30:07 -050085static atomic_t nmi_running = ATOMIC_INIT(0);
Steven Rostedta26a2a22008-10-31 00:03:22 -040086static int mod_code_status; /* holds return value of text write */
87static int mod_code_write; /* set when NMI should do the write */
88static void *mod_code_ip; /* holds the IP to write to */
89static void *mod_code_newcode; /* holds the text to write to the IP */
Steven Rostedt17666f02008-10-30 16:08:32 -040090
Steven Rostedta26a2a22008-10-31 00:03:22 -040091static unsigned nmi_wait_count;
92static atomic_t nmi_update_count = ATOMIC_INIT(0);
Steven Rostedtb807c3d2008-10-30 16:08:33 -040093
94int ftrace_arch_read_dyn_info(char *buf, int size)
95{
96 int r;
97
98 r = snprintf(buf, size, "%u %u",
99 nmi_wait_count,
100 atomic_read(&nmi_update_count));
101 return r;
102}
103
Steven Rostedt17666f02008-10-30 16:08:32 -0400104static void ftrace_mod_code(void)
105{
106 /*
107 * Yes, more than one CPU process can be writing to mod_code_status.
108 * (and the code itself)
109 * But if one were to fail, then they all should, and if one were
110 * to succeed, then they all should.
111 */
112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
113 MCOUNT_INSN_SIZE);
Steven Rostedt17666f02008-10-30 16:08:32 -0400114}
115
Steven Rostedt78d904b2009-02-05 18:43:07 -0500116void arch_ftrace_nmi_enter(void)
Steven Rostedt17666f02008-10-30 16:08:32 -0400117{
Steven Rostedt4e6ea142009-02-05 22:30:07 -0500118 atomic_inc(&nmi_running);
119 /* Must have nmi_running seen before reading write flag */
Steven Rostedt17666f02008-10-30 16:08:32 -0400120 smp_mb();
Steven Rostedtb807c3d2008-10-30 16:08:33 -0400121 if (mod_code_write) {
Steven Rostedt17666f02008-10-30 16:08:32 -0400122 ftrace_mod_code();
Steven Rostedtb807c3d2008-10-30 16:08:33 -0400123 atomic_inc(&nmi_update_count);
124 }
Steven Rostedt17666f02008-10-30 16:08:32 -0400125}
126
Steven Rostedt78d904b2009-02-05 18:43:07 -0500127void arch_ftrace_nmi_exit(void)
Steven Rostedt17666f02008-10-30 16:08:32 -0400128{
Steven Rostedt4e6ea142009-02-05 22:30:07 -0500129 /* Finish all executions before clearing nmi_running */
Steven Rostedt17666f02008-10-30 16:08:32 -0400130 smp_wmb();
Steven Rostedt4e6ea142009-02-05 22:30:07 -0500131 atomic_dec(&nmi_running);
Steven Rostedt17666f02008-10-30 16:08:32 -0400132}
133
134static void wait_for_nmi(void)
135{
Steven Rostedt4e6ea142009-02-05 22:30:07 -0500136 if (!atomic_read(&nmi_running))
Cyrill Gorcunov89025282009-01-26 18:28:02 +0300137 return;
Steven Rostedtb807c3d2008-10-30 16:08:33 -0400138
Cyrill Gorcunov89025282009-01-26 18:28:02 +0300139 do {
Steven Rostedt17666f02008-10-30 16:08:32 -0400140 cpu_relax();
Steven Rostedt4e6ea142009-02-05 22:30:07 -0500141 } while (atomic_read(&nmi_running));
Steven Rostedtb807c3d2008-10-30 16:08:33 -0400142
Cyrill Gorcunov89025282009-01-26 18:28:02 +0300143 nmi_wait_count++;
Steven Rostedt17666f02008-10-30 16:08:32 -0400144}
145
146static int
147do_ftrace_mod_code(unsigned long ip, void *new_code)
148{
149 mod_code_ip = (void *)ip;
150 mod_code_newcode = new_code;
151
152 /* The buffers need to be visible before we let NMIs write them */
153 smp_wmb();
154
155 mod_code_write = 1;
156
157 /* Make sure write bit is visible before we wait on NMIs */
158 smp_mb();
159
160 wait_for_nmi();
161
162 /* Make sure all running NMIs have finished before we write the code */
163 smp_mb();
164
165 ftrace_mod_code();
166
167 /* Make sure the write happens before clearing the bit */
168 smp_wmb();
169
170 mod_code_write = 0;
171
172 /* make sure NMIs see the cleared bit */
173 smp_mb();
174
175 wait_for_nmi();
176
177 return mod_code_status;
178}
179
180
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +0100181
182
183static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
184
Steven Rostedt31e88902008-11-14 16:21:19 -0800185static unsigned char *ftrace_nop_replace(void)
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +0100186{
187 return ftrace_nop;
188}
189
Steven Rostedt31e88902008-11-14 16:21:19 -0800190static int
Steven Rostedt3d083392008-05-12 21:20:42 +0200191ftrace_modify_code(unsigned long ip, unsigned char *old_code,
192 unsigned char *new_code)
193{
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400194 unsigned char replaced[MCOUNT_INSN_SIZE];
Steven Rostedt3d083392008-05-12 21:20:42 +0200195
196 /*
197 * Note: Due to modules and __init, code can
198 * disappear and change, we need to protect against faulting
Steven Rostedt76aefee2008-10-23 09:33:00 -0400199 * as well as code changing. We do this by using the
Steven Rostedtab9a0912008-10-23 09:33:01 -0400200 * probe_kernel_* functions.
Steven Rostedt3d083392008-05-12 21:20:42 +0200201 *
202 * No real locking needed, this code is run through
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400203 * kstop_machine, or before SMP starts.
Steven Rostedt3d083392008-05-12 21:20:42 +0200204 */
Steven Rostedt76aefee2008-10-23 09:33:00 -0400205
206 /* read the text we want to modify */
Steven Rostedtab9a0912008-10-23 09:33:01 -0400207 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400208 return -EFAULT;
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400209
Steven Rostedt76aefee2008-10-23 09:33:00 -0400210 /* Make sure it is what we expect it to be */
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400211 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400212 return -EINVAL;
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400213
Steven Rostedt76aefee2008-10-23 09:33:00 -0400214 /* replace the text with the new text */
Steven Rostedt17666f02008-10-30 16:08:32 -0400215 if (do_ftrace_mod_code(ip, new_code))
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400216 return -EPERM;
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400217
Steven Rostedt3d083392008-05-12 21:20:42 +0200218 sync_core();
219
Steven Rostedt6f93fc02008-08-20 12:55:07 -0400220 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200221}
222
Steven Rostedt31e88902008-11-14 16:21:19 -0800223int ftrace_make_nop(struct module *mod,
224 struct dyn_ftrace *rec, unsigned long addr)
225{
226 unsigned char *new, *old;
227 unsigned long ip = rec->ip;
228
229 old = ftrace_call_replace(ip, addr);
230 new = ftrace_nop_replace();
231
232 return ftrace_modify_code(rec->ip, old, new);
233}
234
235int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
236{
237 unsigned char *new, *old;
238 unsigned long ip = rec->ip;
239
240 old = ftrace_nop_replace();
241 new = ftrace_call_replace(ip, addr);
242
243 return ftrace_modify_code(rec->ip, old, new);
244}
245
Steven Rostedt15adc042008-10-23 09:33:08 -0400246int ftrace_update_ftrace_func(ftrace_func_t func)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200247{
248 unsigned long ip = (unsigned long)(&ftrace_call);
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530249 unsigned char old[MCOUNT_INSN_SIZE], *new;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200250 int ret;
251
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530252 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200253 new = ftrace_call_replace(ip, (unsigned long)func);
254 ret = ftrace_modify_code(ip, old, new);
255
256 return ret;
257}
258
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200259int __init ftrace_dyn_arch_init(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200260{
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400261 extern const unsigned char ftrace_test_p6nop[];
262 extern const unsigned char ftrace_test_nop5[];
263 extern const unsigned char ftrace_test_jmp[];
264 int faulted = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200265
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400266 /*
267 * There is no good nop for all x86 archs.
268 * We will default to using the P6_NOP5, but first we
269 * will test to make sure that the nop will actually
270 * work on this CPU. If it faults, we will then
271 * go to a lesser efficient 5 byte nop. If that fails
272 * we then just use a jmp as our nop. This isn't the most
273 * efficient nop, but we can not use a multi part nop
274 * since we would then risk being preempted in the middle
275 * of that nop, and if we enabled tracing then, it might
276 * cause a system crash.
277 *
278 * TODO: check the cpuid to determine the best nop.
279 */
280 asm volatile (
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400281 "ftrace_test_jmp:"
282 "jmp ftrace_test_p6nop\n"
Anders Kaseorg8b273862008-10-09 22:19:08 -0400283 "nop\n"
284 "nop\n"
285 "nop\n" /* 2 byte jmp + 3 bytes */
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400286 "ftrace_test_p6nop:"
287 P6_NOP5
288 "jmp 1f\n"
289 "ftrace_test_nop5:"
290 ".byte 0x66,0x66,0x66,0x66,0x90\n"
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400291 "1:"
292 ".section .fixup, \"ax\"\n"
293 "2: movl $1, %0\n"
294 " jmp ftrace_test_nop5\n"
295 "3: movl $2, %0\n"
296 " jmp 1b\n"
297 ".previous\n"
298 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
299 _ASM_EXTABLE(ftrace_test_nop5, 3b)
300 : "=r"(faulted) : "0" (faulted));
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200301
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400302 switch (faulted) {
303 case 0:
304 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
Steven Rostedt8115f3f2008-10-24 09:12:17 -0400305 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400306 break;
307 case 1:
308 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
Steven Rostedt8115f3f2008-10-24 09:12:17 -0400309 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400310 break;
311 case 2:
Anders Kaseorg8b273862008-10-09 22:19:08 -0400312 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
Steven Rostedt8115f3f2008-10-24 09:12:17 -0400313 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400314 break;
315 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200316
Steven Rostedt732f3ca2008-08-14 18:05:05 -0400317 /* The return code is retured via data */
318 *(unsigned long *)data = 0;
Steven Rostedtdfa60ab2008-05-12 21:20:43 +0200319
Steven Rostedt3d083392008-05-12 21:20:42 +0200320 return 0;
321}
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +0100322#endif
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100323
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100324#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100325
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500326#ifdef CONFIG_DYNAMIC_FTRACE
327extern void ftrace_graph_call(void);
328
329static int ftrace_mod_jmp(unsigned long ip,
330 int old_offset, int new_offset)
331{
332 unsigned char code[MCOUNT_INSN_SIZE];
333
334 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
335 return -EFAULT;
336
337 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
338 return -EINVAL;
339
340 *(int *)(&code[1]) = new_offset;
341
342 if (do_ftrace_mod_code(ip, &code))
343 return -EPERM;
344
345 return 0;
346}
347
348int ftrace_enable_ftrace_graph_caller(void)
349{
350 unsigned long ip = (unsigned long)(&ftrace_graph_call);
351 int old_offset, new_offset;
352
353 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
354 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
355
356 return ftrace_mod_jmp(ip, old_offset, new_offset);
357}
358
359int ftrace_disable_ftrace_graph_caller(void)
360{
361 unsigned long ip = (unsigned long)(&ftrace_graph_call);
362 int old_offset, new_offset;
363
364 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
365 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
366
367 return ftrace_mod_jmp(ip, old_offset, new_offset);
368}
369
370#else /* CONFIG_DYNAMIC_FTRACE */
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100371
372/*
373 * These functions are picked from those used on
374 * this page for dynamic ftrace. They have been
375 * simplified to ignore all traces in NMI context.
376 */
Steven Rostedt4e6ea142009-02-05 22:30:07 -0500377static atomic_t nmi_running;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100378
Steven Rostedt78d904b2009-02-05 18:43:07 -0500379void arch_ftrace_nmi_enter(void)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100380{
Steven Rostedt4e6ea142009-02-05 22:30:07 -0500381 atomic_inc(&nmi_running);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100382}
383
Steven Rostedt78d904b2009-02-05 18:43:07 -0500384void arch_ftrace_nmi_exit(void)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100385{
Steven Rostedt4e6ea142009-02-05 22:30:07 -0500386 atomic_dec(&nmi_running);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100387}
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500388
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100389#endif /* !CONFIG_DYNAMIC_FTRACE */
390
391/* Add a function return address to the trace stack on thread info.*/
392static int push_return_trace(unsigned long ret, unsigned long long time,
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100393 unsigned long func, int *depth)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100394{
395 int index;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100396
397 if (!current->ret_stack)
398 return -EBUSY;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100399
400 /* The return trace stack is full */
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100401 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
402 atomic_inc(&current->trace_overrun);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100403 return -EBUSY;
Frederic Weisbecker02310222008-11-17 03:22:41 +0100404 }
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100405
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100406 index = ++current->curr_ret_stack;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100407 barrier();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100408 current->ret_stack[index].ret = ret;
409 current->ret_stack[index].func = func;
410 current->ret_stack[index].calltime = time;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100411 *depth = index;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100412
413 return 0;
414}
415
416/* Retrieve a function return address to the trace stack on thread info.*/
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100417static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100418{
419 int index;
420
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100421 index = current->curr_ret_stack;
Steven Rostedt62679ef2008-12-02 23:50:06 -0500422
423 if (unlikely(index < 0)) {
424 ftrace_graph_stop();
425 WARN_ON(1);
426 /* Might as well panic, otherwise we have no where to go */
427 *ret = (unsigned long)panic;
428 return;
429 }
430
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100431 *ret = current->ret_stack[index].ret;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100432 trace->func = current->ret_stack[index].func;
433 trace->calltime = current->ret_stack[index].calltime;
434 trace->overrun = atomic_read(&current->trace_overrun);
435 trace->depth = index;
Steven Rostedte49dc192008-12-02 23:50:05 -0500436 barrier();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100437 current->curr_ret_stack--;
Steven Rostedt62679ef2008-12-02 23:50:06 -0500438
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100439}
440
441/*
442 * Send the trace to the ring-buffer.
443 * @return the original return address.
444 */
445unsigned long ftrace_return_to_handler(void)
446{
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100447 struct ftrace_graph_ret trace;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100448 unsigned long ret;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100449
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100450 pop_return_trace(&trace, &ret);
451 trace.rettime = cpu_clock(raw_smp_processor_id());
452 ftrace_graph_return(&trace);
453
Steven Rostedt62679ef2008-12-02 23:50:06 -0500454 if (unlikely(!ret)) {
455 ftrace_graph_stop();
456 WARN_ON(1);
457 /* Might as well panic. What else to do? */
458 ret = (unsigned long)panic;
459 }
460
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100461 return ret;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100462}
463
464/*
465 * Hook the return address and push it in the stack of return addrs
466 * in current thread info.
467 */
468void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
469{
470 unsigned long old;
471 unsigned long long calltime;
472 int faulted;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100473 struct ftrace_graph_ent trace;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100474 unsigned long return_hooker = (unsigned long)
475 &return_to_handler;
476
477 /* Nmi's are currently unsupported */
Steven Rostedt4e6ea142009-02-05 22:30:07 -0500478 if (unlikely(atomic_read(&nmi_running)))
Frederic Weisbecker380c4b12008-12-06 03:43:41 +0100479 return;
480
481 if (unlikely(atomic_read(&current->tracing_graph_pause)))
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100482 return;
483
484 /*
485 * Protect against fault, even if it shouldn't
486 * happen. This tool is too much intrusive to
487 * ignore such a protection.
488 */
489 asm volatile(
Steven Rostedt347fdd92008-12-02 15:34:08 -0500490 "1: " _ASM_MOV " (%[parent_old]), %[old]\n"
491 "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n"
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100492 " movl $0, %[faulted]\n"
493
494 ".section .fixup, \"ax\"\n"
495 "3: movl $1, %[faulted]\n"
496 ".previous\n"
497
Steven Rostedt347fdd92008-12-02 15:34:08 -0500498 _ASM_EXTABLE(1b, 3b)
499 _ASM_EXTABLE(2b, 3b)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100500
501 : [parent_replaced] "=r" (parent), [old] "=r" (old),
502 [faulted] "=r" (faulted)
503 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
504 : "memory"
505 );
506
Steven Rostedt14a866c2008-12-02 23:50:02 -0500507 if (unlikely(faulted)) {
508 ftrace_graph_stop();
509 WARN_ON(1);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100510 return;
511 }
512
Steven Rostedt14a866c2008-12-02 23:50:02 -0500513 if (unlikely(!__kernel_text_address(old))) {
514 ftrace_graph_stop();
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100515 *parent = old;
Steven Rostedt14a866c2008-12-02 23:50:02 -0500516 WARN_ON(1);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100517 return;
518 }
519
520 calltime = cpu_clock(raw_smp_processor_id());
521
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100522 if (push_return_trace(old, calltime,
523 self_addr, &trace.depth) == -EBUSY) {
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100524 *parent = old;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100525 return;
526 }
527
528 trace.func = self_addr;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +0100529
Steven Rostedte49dc192008-12-02 23:50:05 -0500530 /* Only trace if the calling function expects to */
531 if (!ftrace_graph_entry(&trace)) {
532 current->curr_ret_stack--;
533 *parent = old;
534 }
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100535}
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100536#endif /* CONFIG_FUNCTION_GRAPH_TRACER */