blob: 4f62eced0aec4544ffa93c04158d53ceeced4af9 [file] [log] [blame]
Matt Flemingfad57fe2008-11-12 20:11:47 +09001/*
Matt Fleming7780b6a2009-06-11 09:26:43 +01002 * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
Paul Mundtb5cfeac2008-12-08 12:02:28 +09003 * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
Matt Flemingfad57fe2008-11-12 20:11:47 +09004 *
5 * Code for replacing ftrace calls with jumps.
6 *
7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8 *
9 * Thanks goes to Ingo Molnar, for suggesting the idea.
10 * Mathieu Desnoyers, for suggesting postponing the modifications.
11 * Arjan van de Ven, for keeping me straight, and explaining to me
12 * the dangers of modifying code on the run.
13 */
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/string.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <asm/ftrace.h>
20#include <asm/cacheflush.h>
Matt Flemingc652d782009-07-06 20:16:33 +090021#include <asm/unistd.h>
22#include <trace/syscall.h>
Matt Flemingfad57fe2008-11-12 20:11:47 +090023
Matt Flemingfad57fe2008-11-12 20:11:47 +090024static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
25
Matt Fleming9e28c462009-06-10 22:07:53 +010026static unsigned char ftrace_nop[4];
27/*
28 * If we're trying to nop out a call to a function, we instead
29 * place a call to the address after the memory table.
30 *
31 * 8c011060 <a>:
32 * 8c011060: 02 d1 mov.l 8c01106c <a+0xc>,r1
33 * 8c011062: 22 4f sts.l pr,@-r15
34 * 8c011064: 02 c7 mova 8c011070 <a+0x10>,r0
35 * 8c011066: 2b 41 jmp @r1
36 * 8c011068: 2a 40 lds r0,pr
37 * 8c01106a: 09 00 nop
38 * 8c01106c: 68 24 .word 0x2468 <--- ip
39 * 8c01106e: 1d 8c .word 0x8c1d
40 * 8c011070: 26 4f lds.l @r15+,pr <--- ip + MCOUNT_INSN_SIZE
41 *
42 * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
43 * past the _mcount call and continue executing code like normal.
44 */
45static unsigned char *ftrace_nop_replace(unsigned long ip)
Matt Flemingfad57fe2008-11-12 20:11:47 +090046{
Matt Fleming9e28c462009-06-10 22:07:53 +010047 __raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
Matt Flemingfad57fe2008-11-12 20:11:47 +090048 return ftrace_nop;
49}
50
Matt Fleming9e28c462009-06-10 22:07:53 +010051static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
Matt Flemingfad57fe2008-11-12 20:11:47 +090052{
53 /* Place the address in the memory table. */
Matt Fleming9e28c462009-06-10 22:07:53 +010054 __raw_writel(addr, ftrace_replaced_code);
Matt Flemingfad57fe2008-11-12 20:11:47 +090055
56 /*
57 * No locking needed, this must be called via kstop_machine
58 * which in essence is like running on a uniprocessor machine.
59 */
60 return ftrace_replaced_code;
61}
62
Matt Fleming9e28c462009-06-10 22:07:53 +010063static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
Matt Flemingfad57fe2008-11-12 20:11:47 +090064 unsigned char *new_code)
65{
66 unsigned char replaced[MCOUNT_INSN_SIZE];
67
68 /*
69 * Note: Due to modules and __init, code can
70 * disappear and change, we need to protect against faulting
71 * as well as code changing. We do this by using the
72 * probe_kernel_* functions.
73 *
74 * No real locking needed, this code is run through
75 * kstop_machine, or before SMP starts.
76 */
77
Matt Flemingfad57fe2008-11-12 20:11:47 +090078 /* read the text we want to modify */
79 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
80 return -EFAULT;
81
82 /* Make sure it is what we expect it to be */
83 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
84 return -EINVAL;
85
86 /* replace the text with the new text */
87 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
88 return -EPERM;
89
90 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
91
92 return 0;
93}
94
95int ftrace_update_ftrace_func(ftrace_func_t func)
96{
Matt Fleming9e28c462009-06-10 22:07:53 +010097 unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
Matt Flemingfad57fe2008-11-12 20:11:47 +090098 unsigned char old[MCOUNT_INSN_SIZE], *new;
99
Matt Fleming9e28c462009-06-10 22:07:53 +0100100 memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
Matt Flemingfad57fe2008-11-12 20:11:47 +0900101 new = ftrace_call_replace(ip, (unsigned long)func);
102
Matt Fleming9e28c462009-06-10 22:07:53 +0100103 return ftrace_modify_code(ip, old, new);
Matt Flemingfad57fe2008-11-12 20:11:47 +0900104}
105
Paul Mundtb5cfeac2008-12-08 12:02:28 +0900106int ftrace_make_nop(struct module *mod,
107 struct dyn_ftrace *rec, unsigned long addr)
108{
109 unsigned char *new, *old;
110 unsigned long ip = rec->ip;
111
112 old = ftrace_call_replace(ip, addr);
Matt Fleming9e28c462009-06-10 22:07:53 +0100113 new = ftrace_nop_replace(ip);
Paul Mundtb5cfeac2008-12-08 12:02:28 +0900114
115 return ftrace_modify_code(rec->ip, old, new);
116}
117
118int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
119{
120 unsigned char *new, *old;
121 unsigned long ip = rec->ip;
122
Matt Fleming9e28c462009-06-10 22:07:53 +0100123 old = ftrace_nop_replace(ip);
Paul Mundtb5cfeac2008-12-08 12:02:28 +0900124 new = ftrace_call_replace(ip, addr);
125
126 return ftrace_modify_code(rec->ip, old, new);
127}
128
Matt Flemingfad57fe2008-11-12 20:11:47 +0900129int __init ftrace_dyn_arch_init(void *data)
130{
131 /* The return code is retured via data */
132 __raw_writel(0, (unsigned long)data);
133
134 return 0;
135}
Matt Flemingc652d782009-07-06 20:16:33 +0900136
137#ifdef CONFIG_FTRACE_SYSCALLS
138
139extern unsigned long __start_syscalls_metadata[];
140extern unsigned long __stop_syscalls_metadata[];
141extern unsigned long *sys_call_table;
142
143static struct syscall_metadata **syscalls_metadata;
144
145static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
146{
147 struct syscall_metadata *start;
148 struct syscall_metadata *stop;
149 char str[KSYM_SYMBOL_LEN];
150
151
152 start = (struct syscall_metadata *)__start_syscalls_metadata;
153 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
154 kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
155
156 for ( ; start < stop; start++) {
157 if (start->name && !strcmp(start->name, str))
158 return start;
159 }
160
161 return NULL;
162}
163
164#define FTRACE_SYSCALL_MAX (NR_syscalls - 1)
165
166struct syscall_metadata *syscall_nr_to_meta(int nr)
167{
168 if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
169 return NULL;
170
171 return syscalls_metadata[nr];
172}
173
174void arch_init_ftrace_syscalls(void)
175{
176 int i;
177 struct syscall_metadata *meta;
178 unsigned long **psys_syscall_table = &sys_call_table;
179 static atomic_t refs;
180
181 if (atomic_inc_return(&refs) != 1)
182 goto end;
183
184 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
185 FTRACE_SYSCALL_MAX, GFP_KERNEL);
186 if (!syscalls_metadata) {
187 WARN_ON(1);
188 return;
189 }
190
191 for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
192 meta = find_syscall_meta(psys_syscall_table[i]);
193 syscalls_metadata[i] = meta;
194 }
195 return;
196
197 /* Paranoid: avoid overflow */
198end:
199 atomic_dec(&refs);
200}
201#endif /* CONFIG_FTRACE_SYSCALLS */