Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Code for replacing ftrace calls with jumps. |
| 3 | * |
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | * |
| 6 | * Thanks goes to Ingo Molnar, for suggesting the idea. |
| 7 | * Mathieu Desnoyers, for suggesting postponing the modifications. |
| 8 | * Arjan van de Ven, for keeping me straight, and explaining to me |
| 9 | * the dangers of modifying code on the run. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/spinlock.h> |
| 13 | #include <linux/hardirq.h> |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 14 | #include <linux/uaccess.h> |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 15 | #include <linux/ftrace.h> |
| 16 | #include <linux/percpu.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/list.h> |
| 19 | |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 20 | #include <asm/ftrace.h> |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 21 | #include <asm/nops.h> |
Steven Rostedt | dfa60ab | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 22 | |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 23 | |
Steven Rostedt | 8115f3f | 2008-10-24 09:12:17 -0400 | [diff] [blame] | 24 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 25 | |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 26 | union ftrace_code_union { |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 27 | char code[MCOUNT_INSN_SIZE]; |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 28 | struct { |
| 29 | char e8; |
| 30 | int offset; |
| 31 | } __attribute__((packed)); |
| 32 | }; |
| 33 | |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 34 | |
Steven Rostedt | 15adc04 | 2008-10-23 09:33:08 -0400 | [diff] [blame] | 35 | static int ftrace_calc_offset(long ip, long addr) |
Steven Rostedt | 3c1720f | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 36 | { |
| 37 | return (int)(addr - ip); |
| 38 | } |
| 39 | |
Steven Rostedt | 15adc04 | 2008-10-23 09:33:08 -0400 | [diff] [blame] | 40 | unsigned char *ftrace_nop_replace(void) |
Steven Rostedt | 3c1720f | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 41 | { |
Steven Rostedt | 8115f3f | 2008-10-24 09:12:17 -0400 | [diff] [blame] | 42 | return ftrace_nop; |
Steven Rostedt | 3c1720f | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 43 | } |
| 44 | |
Steven Rostedt | 15adc04 | 2008-10-23 09:33:08 -0400 | [diff] [blame] | 45 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
Steven Rostedt | 3c1720f | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 46 | { |
| 47 | static union ftrace_code_union calc; |
| 48 | |
| 49 | calc.e8 = 0xe8; |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 50 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); |
Steven Rostedt | 3c1720f | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 51 | |
| 52 | /* |
| 53 | * No locking needed, this must be called via kstop_machine |
| 54 | * which in essence is like running on a uniprocessor machine. |
| 55 | */ |
| 56 | return calc.code; |
| 57 | } |
| 58 | |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 59 | /* |
| 60 | * Modifying code must take extra care. On an SMP machine, if |
| 61 | * the code being modified is also being executed on another CPU |
| 62 | * that CPU will have undefined results and possibly take a GPF. |
| 63 | * We use kstop_machine to stop other CPUS from exectuing code. |
| 64 | * But this does not stop NMIs from happening. We still need |
| 65 | * to protect against that. We separate out the modification of |
| 66 | * the code to take care of this. |
| 67 | * |
| 68 | * Two buffers are added: An IP buffer and a "code" buffer. |
| 69 | * |
Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame^] | 70 | * 1) Put the instruction pointer into the IP buffer |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 71 | * and the new code into the "code" buffer. |
| 72 | * 2) Set a flag that says we are modifying code |
| 73 | * 3) Wait for any running NMIs to finish. |
| 74 | * 4) Write the code |
| 75 | * 5) clear the flag. |
| 76 | * 6) Wait for any running NMIs to finish. |
| 77 | * |
| 78 | * If an NMI is executed, the first thing it does is to call |
| 79 | * "ftrace_nmi_enter". This will check if the flag is set to write |
| 80 | * and if it is, it will write what is in the IP and "code" buffers. |
| 81 | * |
| 82 | * The trick is, it does not matter if everyone is writing the same |
| 83 | * content to the code location. Also, if a CPU is executing code |
| 84 | * it is OK to write to that code location if the contents being written |
| 85 | * are the same as what exists. |
| 86 | */ |
| 87 | |
Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame^] | 88 | static atomic_t in_nmi = ATOMIC_INIT(0); |
| 89 | static int mod_code_status; /* holds return value of text write */ |
| 90 | static int mod_code_write; /* set when NMI should do the write */ |
| 91 | static void *mod_code_ip; /* holds the IP to write to */ |
| 92 | static void *mod_code_newcode; /* holds the text to write to the IP */ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 93 | |
Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame^] | 94 | static unsigned nmi_wait_count; |
| 95 | static atomic_t nmi_update_count = ATOMIC_INIT(0); |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 96 | |
| 97 | int ftrace_arch_read_dyn_info(char *buf, int size) |
| 98 | { |
| 99 | int r; |
| 100 | |
| 101 | r = snprintf(buf, size, "%u %u", |
| 102 | nmi_wait_count, |
| 103 | atomic_read(&nmi_update_count)); |
| 104 | return r; |
| 105 | } |
| 106 | |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 107 | static void ftrace_mod_code(void) |
| 108 | { |
| 109 | /* |
| 110 | * Yes, more than one CPU process can be writing to mod_code_status. |
| 111 | * (and the code itself) |
| 112 | * But if one were to fail, then they all should, and if one were |
| 113 | * to succeed, then they all should. |
| 114 | */ |
| 115 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, |
| 116 | MCOUNT_INSN_SIZE); |
| 117 | |
| 118 | } |
| 119 | |
| 120 | void ftrace_nmi_enter(void) |
| 121 | { |
| 122 | atomic_inc(&in_nmi); |
| 123 | /* Must have in_nmi seen before reading write flag */ |
| 124 | smp_mb(); |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 125 | if (mod_code_write) { |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 126 | ftrace_mod_code(); |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 127 | atomic_inc(&nmi_update_count); |
| 128 | } |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | void ftrace_nmi_exit(void) |
| 132 | { |
| 133 | /* Finish all executions before clearing in_nmi */ |
| 134 | smp_wmb(); |
| 135 | atomic_dec(&in_nmi); |
| 136 | } |
| 137 | |
| 138 | static void wait_for_nmi(void) |
| 139 | { |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 140 | int waited = 0; |
| 141 | |
| 142 | while (atomic_read(&in_nmi)) { |
| 143 | waited = 1; |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 144 | cpu_relax(); |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | if (waited) |
| 148 | nmi_wait_count++; |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | static int |
| 152 | do_ftrace_mod_code(unsigned long ip, void *new_code) |
| 153 | { |
| 154 | mod_code_ip = (void *)ip; |
| 155 | mod_code_newcode = new_code; |
| 156 | |
| 157 | /* The buffers need to be visible before we let NMIs write them */ |
| 158 | smp_wmb(); |
| 159 | |
| 160 | mod_code_write = 1; |
| 161 | |
| 162 | /* Make sure write bit is visible before we wait on NMIs */ |
| 163 | smp_mb(); |
| 164 | |
| 165 | wait_for_nmi(); |
| 166 | |
| 167 | /* Make sure all running NMIs have finished before we write the code */ |
| 168 | smp_mb(); |
| 169 | |
| 170 | ftrace_mod_code(); |
| 171 | |
| 172 | /* Make sure the write happens before clearing the bit */ |
| 173 | smp_wmb(); |
| 174 | |
| 175 | mod_code_write = 0; |
| 176 | |
| 177 | /* make sure NMIs see the cleared bit */ |
| 178 | smp_mb(); |
| 179 | |
| 180 | wait_for_nmi(); |
| 181 | |
| 182 | return mod_code_status; |
| 183 | } |
| 184 | |
| 185 | |
Steven Rostedt | 15adc04 | 2008-10-23 09:33:08 -0400 | [diff] [blame] | 186 | int |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 187 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
| 188 | unsigned char *new_code) |
| 189 | { |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 190 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 191 | |
| 192 | /* |
| 193 | * Note: Due to modules and __init, code can |
| 194 | * disappear and change, we need to protect against faulting |
Steven Rostedt | 76aefee | 2008-10-23 09:33:00 -0400 | [diff] [blame] | 195 | * as well as code changing. We do this by using the |
Steven Rostedt | ab9a091 | 2008-10-23 09:33:01 -0400 | [diff] [blame] | 196 | * probe_kernel_* functions. |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 197 | * |
| 198 | * No real locking needed, this code is run through |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 199 | * kstop_machine, or before SMP starts. |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 200 | */ |
Steven Rostedt | 76aefee | 2008-10-23 09:33:00 -0400 | [diff] [blame] | 201 | |
| 202 | /* read the text we want to modify */ |
Steven Rostedt | ab9a091 | 2008-10-23 09:33:01 -0400 | [diff] [blame] | 203 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
Steven Rostedt | 593eb8a | 2008-10-23 09:32:59 -0400 | [diff] [blame] | 204 | return -EFAULT; |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 205 | |
Steven Rostedt | 76aefee | 2008-10-23 09:33:00 -0400 | [diff] [blame] | 206 | /* Make sure it is what we expect it to be */ |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 207 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
Steven Rostedt | 593eb8a | 2008-10-23 09:32:59 -0400 | [diff] [blame] | 208 | return -EINVAL; |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 209 | |
Steven Rostedt | 76aefee | 2008-10-23 09:33:00 -0400 | [diff] [blame] | 210 | /* replace the text with the new text */ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 211 | if (do_ftrace_mod_code(ip, new_code)) |
Steven Rostedt | 593eb8a | 2008-10-23 09:32:59 -0400 | [diff] [blame] | 212 | return -EPERM; |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 213 | |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 214 | sync_core(); |
| 215 | |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 216 | return 0; |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 217 | } |
| 218 | |
Steven Rostedt | 15adc04 | 2008-10-23 09:33:08 -0400 | [diff] [blame] | 219 | int ftrace_update_ftrace_func(ftrace_func_t func) |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 220 | { |
| 221 | unsigned long ip = (unsigned long)(&ftrace_call); |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 222 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 223 | int ret; |
| 224 | |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 225 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 226 | new = ftrace_call_replace(ip, (unsigned long)func); |
| 227 | ret = ftrace_modify_code(ip, old, new); |
| 228 | |
| 229 | return ret; |
| 230 | } |
| 231 | |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 232 | int __init ftrace_dyn_arch_init(void *data) |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 233 | { |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 234 | extern const unsigned char ftrace_test_p6nop[]; |
| 235 | extern const unsigned char ftrace_test_nop5[]; |
| 236 | extern const unsigned char ftrace_test_jmp[]; |
| 237 | int faulted = 0; |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 238 | |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 239 | /* |
| 240 | * There is no good nop for all x86 archs. |
| 241 | * We will default to using the P6_NOP5, but first we |
| 242 | * will test to make sure that the nop will actually |
| 243 | * work on this CPU. If it faults, we will then |
| 244 | * go to a lesser efficient 5 byte nop. If that fails |
| 245 | * we then just use a jmp as our nop. This isn't the most |
| 246 | * efficient nop, but we can not use a multi part nop |
| 247 | * since we would then risk being preempted in the middle |
| 248 | * of that nop, and if we enabled tracing then, it might |
| 249 | * cause a system crash. |
| 250 | * |
| 251 | * TODO: check the cpuid to determine the best nop. |
| 252 | */ |
| 253 | asm volatile ( |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 254 | "ftrace_test_jmp:" |
| 255 | "jmp ftrace_test_p6nop\n" |
Anders Kaseorg | 8b27386 | 2008-10-09 22:19:08 -0400 | [diff] [blame] | 256 | "nop\n" |
| 257 | "nop\n" |
| 258 | "nop\n" /* 2 byte jmp + 3 bytes */ |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 259 | "ftrace_test_p6nop:" |
| 260 | P6_NOP5 |
| 261 | "jmp 1f\n" |
| 262 | "ftrace_test_nop5:" |
| 263 | ".byte 0x66,0x66,0x66,0x66,0x90\n" |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 264 | "1:" |
| 265 | ".section .fixup, \"ax\"\n" |
| 266 | "2: movl $1, %0\n" |
| 267 | " jmp ftrace_test_nop5\n" |
| 268 | "3: movl $2, %0\n" |
| 269 | " jmp 1b\n" |
| 270 | ".previous\n" |
| 271 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) |
| 272 | _ASM_EXTABLE(ftrace_test_nop5, 3b) |
| 273 | : "=r"(faulted) : "0" (faulted)); |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 274 | |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 275 | switch (faulted) { |
| 276 | case 0: |
| 277 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); |
Steven Rostedt | 8115f3f | 2008-10-24 09:12:17 -0400 | [diff] [blame] | 278 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 279 | break; |
| 280 | case 1: |
| 281 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); |
Steven Rostedt | 8115f3f | 2008-10-24 09:12:17 -0400 | [diff] [blame] | 282 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 283 | break; |
| 284 | case 2: |
Anders Kaseorg | 8b27386 | 2008-10-09 22:19:08 -0400 | [diff] [blame] | 285 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
Steven Rostedt | 8115f3f | 2008-10-24 09:12:17 -0400 | [diff] [blame] | 286 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 287 | break; |
| 288 | } |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 289 | |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 290 | /* The return code is retured via data */ |
| 291 | *(unsigned long *)data = 0; |
Steven Rostedt | dfa60ab | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 292 | |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 293 | return 0; |
| 294 | } |