| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 1 | /* | 
| Matt Fleming | 7780b6a | 2009-06-11 09:26:43 +0100 | [diff] [blame] | 2 | * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org> | 
| Paul Mundt | b5cfeac | 2008-12-08 12:02:28 +0900 | [diff] [blame] | 3 | * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org> | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 4 | * | 
|  | 5 | * Code for replacing ftrace calls with jumps. | 
|  | 6 | * | 
|  | 7 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 
|  | 8 | * | 
|  | 9 | * Thanks goes to Ingo Molnar, for suggesting the idea. | 
|  | 10 | * Mathieu Desnoyers, for suggesting postponing the modifications. | 
|  | 11 | * Arjan van de Ven, for keeping me straight, and explaining to me | 
|  | 12 | * the dangers of modifying code on the run. | 
|  | 13 | */ | 
|  | 14 | #include <linux/uaccess.h> | 
|  | 15 | #include <linux/ftrace.h> | 
|  | 16 | #include <linux/string.h> | 
|  | 17 | #include <linux/init.h> | 
|  | 18 | #include <linux/io.h> | 
| Matt Fleming | 327933f | 2009-07-11 00:29:03 +0000 | [diff] [blame] | 19 | #include <linux/kernel.h> | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 20 | #include <asm/ftrace.h> | 
|  | 21 | #include <asm/cacheflush.h> | 
| Matt Fleming | c652d78 | 2009-07-06 20:16:33 +0900 | [diff] [blame] | 22 | #include <asm/unistd.h> | 
|  | 23 | #include <trace/syscall.h> | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 24 |  | 
| Matt Fleming | 327933f | 2009-07-11 00:29:03 +0000 | [diff] [blame] | 25 | #ifdef CONFIG_DYNAMIC_FTRACE | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 26 | static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; | 
|  | 27 |  | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 28 | static unsigned char ftrace_nop[4]; | 
|  | 29 | /* | 
|  | 30 | * If we're trying to nop out a call to a function, we instead | 
|  | 31 | * place a call to the address after the memory table. | 
|  | 32 | * | 
|  | 33 | * 8c011060 <a>: | 
|  | 34 | * 8c011060:       02 d1           mov.l   8c01106c <a+0xc>,r1 | 
|  | 35 | * 8c011062:       22 4f           sts.l   pr,@-r15 | 
|  | 36 | * 8c011064:       02 c7           mova    8c011070 <a+0x10>,r0 | 
|  | 37 | * 8c011066:       2b 41           jmp     @r1 | 
|  | 38 | * 8c011068:       2a 40           lds     r0,pr | 
|  | 39 | * 8c01106a:       09 00           nop | 
|  | 40 | * 8c01106c:       68 24           .word 0x2468     <--- ip | 
|  | 41 | * 8c01106e:       1d 8c           .word 0x8c1d | 
|  | 42 | * 8c011070:       26 4f           lds.l   @r15+,pr <--- ip + MCOUNT_INSN_SIZE | 
|  | 43 | * | 
|  | 44 | * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch | 
|  | 45 | * past the _mcount call and continue executing code like normal. | 
|  | 46 | */ | 
|  | 47 | static unsigned char *ftrace_nop_replace(unsigned long ip) | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 48 | { | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 49 | __raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop); | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 50 | return ftrace_nop; | 
|  | 51 | } | 
|  | 52 |  | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 53 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 54 | { | 
|  | 55 | /* Place the address in the memory table. */ | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 56 | __raw_writel(addr, ftrace_replaced_code); | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 57 |  | 
|  | 58 | /* | 
|  | 59 | * No locking needed, this must be called via kstop_machine | 
|  | 60 | * which in essence is like running on a uniprocessor machine. | 
|  | 61 | */ | 
|  | 62 | return ftrace_replaced_code; | 
|  | 63 | } | 
|  | 64 |  | 
| Paul Mundt | e4b053d | 2009-10-13 16:52:50 +0900 | [diff] [blame] | 65 | /* | 
|  | 66 | * Modifying code must take extra care. On an SMP machine, if | 
|  | 67 | * the code being modified is also being executed on another CPU | 
|  | 68 | * that CPU will have undefined results and possibly take a GPF. | 
|  | 69 | * We use kstop_machine to stop other CPUS from exectuing code. | 
|  | 70 | * But this does not stop NMIs from happening. We still need | 
|  | 71 | * to protect against that. We separate out the modification of | 
|  | 72 | * the code to take care of this. | 
|  | 73 | * | 
|  | 74 | * Two buffers are added: An IP buffer and a "code" buffer. | 
|  | 75 | * | 
|  | 76 | * 1) Put the instruction pointer into the IP buffer | 
|  | 77 | *    and the new code into the "code" buffer. | 
|  | 78 | * 2) Wait for any running NMIs to finish and set a flag that says | 
|  | 79 | *    we are modifying code, it is done in an atomic operation. | 
|  | 80 | * 3) Write the code | 
|  | 81 | * 4) clear the flag. | 
|  | 82 | * 5) Wait for any running NMIs to finish. | 
|  | 83 | * | 
|  | 84 | * If an NMI is executed, the first thing it does is to call | 
|  | 85 | * "ftrace_nmi_enter". This will check if the flag is set to write | 
|  | 86 | * and if it is, it will write what is in the IP and "code" buffers. | 
|  | 87 | * | 
|  | 88 | * The trick is, it does not matter if everyone is writing the same | 
|  | 89 | * content to the code location. Also, if a CPU is executing code | 
|  | 90 | * it is OK to write to that code location if the contents being written | 
|  | 91 | * are the same as what exists. | 
|  | 92 | */ | 
|  | 93 | #define MOD_CODE_WRITE_FLAG (1 << 31)	/* set when NMI should do the write */ | 
|  | 94 | static atomic_t nmi_running = ATOMIC_INIT(0); | 
|  | 95 | static int mod_code_status;		/* holds return value of text write */ | 
|  | 96 | static void *mod_code_ip;		/* holds the IP to write to */ | 
|  | 97 | static void *mod_code_newcode;		/* holds the text to write to the IP */ | 
|  | 98 |  | 
|  | 99 | static unsigned nmi_wait_count; | 
|  | 100 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | 
|  | 101 |  | 
|  | 102 | int ftrace_arch_read_dyn_info(char *buf, int size) | 
|  | 103 | { | 
|  | 104 | int r; | 
|  | 105 |  | 
|  | 106 | r = snprintf(buf, size, "%u %u", | 
|  | 107 | nmi_wait_count, | 
|  | 108 | atomic_read(&nmi_update_count)); | 
|  | 109 | return r; | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | static void clear_mod_flag(void) | 
|  | 113 | { | 
|  | 114 | int old = atomic_read(&nmi_running); | 
|  | 115 |  | 
|  | 116 | for (;;) { | 
|  | 117 | int new = old & ~MOD_CODE_WRITE_FLAG; | 
|  | 118 |  | 
|  | 119 | if (old == new) | 
|  | 120 | break; | 
|  | 121 |  | 
|  | 122 | old = atomic_cmpxchg(&nmi_running, old, new); | 
|  | 123 | } | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | static void ftrace_mod_code(void) | 
|  | 127 | { | 
|  | 128 | /* | 
|  | 129 | * Yes, more than one CPU process can be writing to mod_code_status. | 
|  | 130 | *    (and the code itself) | 
|  | 131 | * But if one were to fail, then they all should, and if one were | 
|  | 132 | * to succeed, then they all should. | 
|  | 133 | */ | 
|  | 134 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | 
|  | 135 | MCOUNT_INSN_SIZE); | 
|  | 136 |  | 
|  | 137 | /* if we fail, then kill any new writers */ | 
|  | 138 | if (mod_code_status) | 
|  | 139 | clear_mod_flag(); | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | void ftrace_nmi_enter(void) | 
|  | 143 | { | 
|  | 144 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { | 
|  | 145 | smp_rmb(); | 
|  | 146 | ftrace_mod_code(); | 
|  | 147 | atomic_inc(&nmi_update_count); | 
|  | 148 | } | 
|  | 149 | /* Must have previous changes seen before executions */ | 
|  | 150 | smp_mb(); | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | void ftrace_nmi_exit(void) | 
|  | 154 | { | 
|  | 155 | /* Finish all executions before clearing nmi_running */ | 
|  | 156 | smp_mb(); | 
|  | 157 | atomic_dec(&nmi_running); | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | static void wait_for_nmi_and_set_mod_flag(void) | 
|  | 161 | { | 
|  | 162 | if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)) | 
|  | 163 | return; | 
|  | 164 |  | 
|  | 165 | do { | 
|  | 166 | cpu_relax(); | 
|  | 167 | } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)); | 
|  | 168 |  | 
|  | 169 | nmi_wait_count++; | 
|  | 170 | } | 
|  | 171 |  | 
|  | 172 | static void wait_for_nmi(void) | 
|  | 173 | { | 
|  | 174 | if (!atomic_read(&nmi_running)) | 
|  | 175 | return; | 
|  | 176 |  | 
|  | 177 | do { | 
|  | 178 | cpu_relax(); | 
|  | 179 | } while (atomic_read(&nmi_running)); | 
|  | 180 |  | 
|  | 181 | nmi_wait_count++; | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | static int | 
|  | 185 | do_ftrace_mod_code(unsigned long ip, void *new_code) | 
|  | 186 | { | 
|  | 187 | mod_code_ip = (void *)ip; | 
|  | 188 | mod_code_newcode = new_code; | 
|  | 189 |  | 
|  | 190 | /* The buffers need to be visible before we let NMIs write them */ | 
|  | 191 | smp_mb(); | 
|  | 192 |  | 
|  | 193 | wait_for_nmi_and_set_mod_flag(); | 
|  | 194 |  | 
|  | 195 | /* Make sure all running NMIs have finished before we write the code */ | 
|  | 196 | smp_mb(); | 
|  | 197 |  | 
|  | 198 | ftrace_mod_code(); | 
|  | 199 |  | 
|  | 200 | /* Make sure the write happens before clearing the bit */ | 
|  | 201 | smp_mb(); | 
|  | 202 |  | 
|  | 203 | clear_mod_flag(); | 
|  | 204 | wait_for_nmi(); | 
|  | 205 |  | 
|  | 206 | return mod_code_status; | 
|  | 207 | } | 
|  | 208 |  | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 209 | static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 210 | unsigned char *new_code) | 
|  | 211 | { | 
|  | 212 | unsigned char replaced[MCOUNT_INSN_SIZE]; | 
|  | 213 |  | 
|  | 214 | /* | 
|  | 215 | * Note: Due to modules and __init, code can | 
|  | 216 | *  disappear and change, we need to protect against faulting | 
|  | 217 | *  as well as code changing. We do this by using the | 
|  | 218 | *  probe_kernel_* functions. | 
|  | 219 | * | 
|  | 220 | * No real locking needed, this code is run through | 
|  | 221 | * kstop_machine, or before SMP starts. | 
|  | 222 | */ | 
|  | 223 |  | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 224 | /* read the text we want to modify */ | 
|  | 225 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | 
|  | 226 | return -EFAULT; | 
|  | 227 |  | 
|  | 228 | /* Make sure it is what we expect it to be */ | 
|  | 229 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) | 
|  | 230 | return -EINVAL; | 
|  | 231 |  | 
|  | 232 | /* replace the text with the new text */ | 
| Paul Mundt | e4b053d | 2009-10-13 16:52:50 +0900 | [diff] [blame] | 233 | if (do_ftrace_mod_code(ip, new_code)) | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 234 | return -EPERM; | 
|  | 235 |  | 
|  | 236 | flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); | 
|  | 237 |  | 
|  | 238 | return 0; | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | int ftrace_update_ftrace_func(ftrace_func_t func) | 
|  | 242 | { | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 243 | unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET; | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 244 | unsigned char old[MCOUNT_INSN_SIZE], *new; | 
|  | 245 |  | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 246 | memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE); | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 247 | new = ftrace_call_replace(ip, (unsigned long)func); | 
|  | 248 |  | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 249 | return ftrace_modify_code(ip, old, new); | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 250 | } | 
|  | 251 |  | 
| Paul Mundt | b5cfeac | 2008-12-08 12:02:28 +0900 | [diff] [blame] | 252 | int ftrace_make_nop(struct module *mod, | 
|  | 253 | struct dyn_ftrace *rec, unsigned long addr) | 
|  | 254 | { | 
|  | 255 | unsigned char *new, *old; | 
|  | 256 | unsigned long ip = rec->ip; | 
|  | 257 |  | 
|  | 258 | old = ftrace_call_replace(ip, addr); | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 259 | new = ftrace_nop_replace(ip); | 
| Paul Mundt | b5cfeac | 2008-12-08 12:02:28 +0900 | [diff] [blame] | 260 |  | 
|  | 261 | return ftrace_modify_code(rec->ip, old, new); | 
|  | 262 | } | 
|  | 263 |  | 
|  | 264 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 
|  | 265 | { | 
|  | 266 | unsigned char *new, *old; | 
|  | 267 | unsigned long ip = rec->ip; | 
|  | 268 |  | 
| Matt Fleming | 9e28c46 | 2009-06-10 22:07:53 +0100 | [diff] [blame] | 269 | old = ftrace_nop_replace(ip); | 
| Paul Mundt | b5cfeac | 2008-12-08 12:02:28 +0900 | [diff] [blame] | 270 | new = ftrace_call_replace(ip, addr); | 
|  | 271 |  | 
|  | 272 | return ftrace_modify_code(rec->ip, old, new); | 
|  | 273 | } | 
|  | 274 |  | 
| Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 275 | int __init ftrace_dyn_arch_init(void *data) | 
|  | 276 | { | 
|  | 277 | /* The return code is retured via data */ | 
|  | 278 | __raw_writel(0, (unsigned long)data); | 
|  | 279 |  | 
|  | 280 | return 0; | 
|  | 281 | } | 
| Matt Fleming | 327933f | 2009-07-11 00:29:03 +0000 | [diff] [blame] | 282 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 
|  | 283 |  | 
|  | 284 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
|  | 285 | #ifdef CONFIG_DYNAMIC_FTRACE | 
|  | 286 | extern void ftrace_graph_call(void); | 
|  | 287 |  | 
|  | 288 | static int ftrace_mod(unsigned long ip, unsigned long old_addr, | 
|  | 289 | unsigned long new_addr) | 
|  | 290 | { | 
|  | 291 | unsigned char code[MCOUNT_INSN_SIZE]; | 
|  | 292 |  | 
|  | 293 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | 
|  | 294 | return -EFAULT; | 
|  | 295 |  | 
|  | 296 | if (old_addr != __raw_readl((unsigned long *)code)) | 
|  | 297 | return -EINVAL; | 
|  | 298 |  | 
|  | 299 | __raw_writel(new_addr, ip); | 
|  | 300 | return 0; | 
|  | 301 | } | 
|  | 302 |  | 
|  | 303 | int ftrace_enable_ftrace_graph_caller(void) | 
|  | 304 | { | 
|  | 305 | unsigned long ip, old_addr, new_addr; | 
|  | 306 |  | 
|  | 307 | ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; | 
|  | 308 | old_addr = (unsigned long)(&skip_trace); | 
|  | 309 | new_addr = (unsigned long)(&ftrace_graph_caller); | 
|  | 310 |  | 
|  | 311 | return ftrace_mod(ip, old_addr, new_addr); | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | int ftrace_disable_ftrace_graph_caller(void) | 
|  | 315 | { | 
|  | 316 | unsigned long ip, old_addr, new_addr; | 
|  | 317 |  | 
|  | 318 | ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; | 
|  | 319 | old_addr = (unsigned long)(&ftrace_graph_caller); | 
|  | 320 | new_addr = (unsigned long)(&skip_trace); | 
|  | 321 |  | 
|  | 322 | return ftrace_mod(ip, old_addr, new_addr); | 
|  | 323 | } | 
|  | 324 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 
|  | 325 |  | 
|  | 326 | /* | 
|  | 327 | * Hook the return address and push it in the stack of return addrs | 
|  | 328 | * in the current thread info. | 
|  | 329 | * | 
|  | 330 | * This is the main routine for the function graph tracer. The function | 
|  | 331 | * graph tracer essentially works like this: | 
|  | 332 | * | 
|  | 333 | * parent is the stack address containing self_addr's return address. | 
|  | 334 | * We pull the real return address out of parent and store it in | 
|  | 335 | * current's ret_stack. Then, we replace the return address on the stack | 
|  | 336 | * with the address of return_to_handler. self_addr is the function that | 
|  | 337 | * called mcount. | 
|  | 338 | * | 
|  | 339 | * When self_addr returns, it will jump to return_to_handler which calls | 
|  | 340 | * ftrace_return_to_handler. ftrace_return_to_handler will pull the real | 
|  | 341 | * return address off of current's ret_stack and jump to it. | 
|  | 342 | */ | 
|  | 343 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | 
|  | 344 | { | 
|  | 345 | unsigned long old; | 
|  | 346 | int faulted, err; | 
|  | 347 | struct ftrace_graph_ent trace; | 
|  | 348 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 
|  | 349 |  | 
|  | 350 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 
|  | 351 | return; | 
|  | 352 |  | 
|  | 353 | /* | 
|  | 354 | * Protect against fault, even if it shouldn't | 
|  | 355 | * happen. This tool is too much intrusive to | 
|  | 356 | * ignore such a protection. | 
|  | 357 | */ | 
|  | 358 | __asm__ __volatile__( | 
|  | 359 | "1:						\n\t" | 
|  | 360 | "mov.l		@%2, %0				\n\t" | 
|  | 361 | "2:						\n\t" | 
|  | 362 | "mov.l		%3, @%2				\n\t" | 
|  | 363 | "mov		#0, %1				\n\t" | 
|  | 364 | "3:						\n\t" | 
|  | 365 | ".section .fixup, \"ax\"			\n\t" | 
|  | 366 | "4:						\n\t" | 
|  | 367 | "mov.l		5f, %0				\n\t" | 
|  | 368 | "jmp		@%0				\n\t" | 
|  | 369 | " mov		#1, %1				\n\t" | 
|  | 370 | ".balign 4					\n\t" | 
|  | 371 | "5:	.long 3b				\n\t" | 
|  | 372 | ".previous					\n\t" | 
|  | 373 | ".section __ex_table,\"a\"			\n\t" | 
|  | 374 | ".long 1b, 4b					\n\t" | 
|  | 375 | ".long 2b, 4b					\n\t" | 
|  | 376 | ".previous					\n\t" | 
|  | 377 | : "=&r" (old), "=r" (faulted) | 
|  | 378 | : "r" (parent), "r" (return_hooker) | 
|  | 379 | ); | 
|  | 380 |  | 
|  | 381 | if (unlikely(faulted)) { | 
|  | 382 | ftrace_graph_stop(); | 
|  | 383 | WARN_ON(1); | 
|  | 384 | return; | 
|  | 385 | } | 
|  | 386 |  | 
|  | 387 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); | 
|  | 388 | if (err == -EBUSY) { | 
|  | 389 | __raw_writel(old, parent); | 
|  | 390 | return; | 
|  | 391 | } | 
|  | 392 |  | 
|  | 393 | trace.func = self_addr; | 
|  | 394 |  | 
|  | 395 | /* Only trace if the calling function expects to */ | 
|  | 396 | if (!ftrace_graph_entry(&trace)) { | 
|  | 397 | current->curr_ret_stack--; | 
|  | 398 | __raw_writel(old, parent); | 
|  | 399 | } | 
|  | 400 | } | 
|  | 401 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |