blob: c92a10953279fc16c0645a6849a9300c59934828 [file] [log] [blame]
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +02001/*
2 * Dynamic function tracer architecture backend.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
Heiko Carstens88dbd202009-06-12 10:26:46 +020010#include <linux/hardirq.h>
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020011#include <linux/uaccess.h>
12#include <linux/ftrace.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <asm/lowcore.h>
16
Heiko Carstens88dbd202009-06-12 10:26:46 +020017#ifdef CONFIG_DYNAMIC_FTRACE
18
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020019void ftrace_disable_code(void);
Heiko Carstens88dbd202009-06-12 10:26:46 +020020void ftrace_disable_return(void);
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020021void ftrace_call_code(void);
22void ftrace_nop_code(void);
23
24#define FTRACE_INSN_SIZE 4
25
26#ifdef CONFIG_64BIT
27
28asm(
29 " .align 4\n"
30 "ftrace_disable_code:\n"
31 " j 0f\n"
32 " .word 0x0024\n"
33 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
34 " basr %r14,%r1\n"
Heiko Carstens88dbd202009-06-12 10:26:46 +020035 "ftrace_disable_return:\n"
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020036 " lg %r14,8(15)\n"
37 " lgr %r0,%r0\n"
38 "0:\n");
39
40asm(
41 " .align 4\n"
42 "ftrace_nop_code:\n"
43 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
44
45asm(
46 " .align 4\n"
47 "ftrace_call_code:\n"
48 " stg %r14,8(%r15)\n");
49
50#else /* CONFIG_64BIT */
51
52asm(
53 " .align 4\n"
54 "ftrace_disable_code:\n"
55 " j 0f\n"
56 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
57 " basr %r14,%r1\n"
Heiko Carstens88dbd202009-06-12 10:26:46 +020058 "ftrace_disable_return:\n"
Heiko Carstensdfd9f7a2009-06-12 10:26:44 +020059 " l %r14,4(%r15)\n"
60 " j 0f\n"
61 " bcr 0,%r7\n"
62 " bcr 0,%r7\n"
63 " bcr 0,%r7\n"
64 " bcr 0,%r7\n"
65 " bcr 0,%r7\n"
66 " bcr 0,%r7\n"
67 "0:\n");
68
69asm(
70 " .align 4\n"
71 "ftrace_nop_code:\n"
72 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
73
74asm(
75 " .align 4\n"
76 "ftrace_call_code:\n"
77 " st %r14,4(%r15)\n");
78
79#endif /* CONFIG_64BIT */
80
81static int ftrace_modify_code(unsigned long ip,
82 void *old_code, int old_size,
83 void *new_code, int new_size)
84{
85 unsigned char replaced[MCOUNT_INSN_SIZE];
86
87 /*
88 * Note: Due to modules code can disappear and change.
89 * We need to protect against faulting as well as code
90 * changing. We do this by using the probe_kernel_*
91 * functions.
92 * This however is just a simple sanity check.
93 */
94 if (probe_kernel_read(replaced, (void *)ip, old_size))
95 return -EFAULT;
96 if (memcmp(replaced, old_code, old_size) != 0)
97 return -EINVAL;
98 if (probe_kernel_write((void *)ip, new_code, new_size))
99 return -EPERM;
100 return 0;
101}
102
103static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
104 unsigned long addr)
105{
106 return ftrace_modify_code(rec->ip,
107 ftrace_call_code, FTRACE_INSN_SIZE,
108 ftrace_disable_code, MCOUNT_INSN_SIZE);
109}
110
111int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
112 unsigned long addr)
113{
114 if (addr == MCOUNT_ADDR)
115 return ftrace_make_initial_nop(mod, rec, addr);
116 return ftrace_modify_code(rec->ip,
117 ftrace_call_code, FTRACE_INSN_SIZE,
118 ftrace_nop_code, FTRACE_INSN_SIZE);
119}
120
121int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
122{
123 return ftrace_modify_code(rec->ip,
124 ftrace_nop_code, FTRACE_INSN_SIZE,
125 ftrace_call_code, FTRACE_INSN_SIZE);
126}
127
128int ftrace_update_ftrace_func(ftrace_func_t func)
129{
130 ftrace_dyn_func = (unsigned long)func;
131 return 0;
132}
133
134int __init ftrace_dyn_arch_init(void *data)
135{
136 *(unsigned long *)data = 0;
137 return 0;
138}
Heiko Carstens88dbd202009-06-12 10:26:46 +0200139
140#endif /* CONFIG_DYNAMIC_FTRACE */
141
142#ifdef CONFIG_FUNCTION_GRAPH_TRACER
143#ifdef CONFIG_DYNAMIC_FTRACE
144/*
145 * Patch the kernel code at ftrace_graph_caller location:
146 * The instruction there is branch relative on condition. The condition mask
147 * is either all ones (always branch aka disable ftrace_graph_caller) or all
148 * zeroes (nop aka enable ftrace_graph_caller).
149 * Instruction format for brc is a7m4xxxx where m is the condition mask.
150 */
151int ftrace_enable_ftrace_graph_caller(void)
152{
153 unsigned short opcode = 0xa704;
154
155 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
156}
157
158int ftrace_disable_ftrace_graph_caller(void)
159{
160 unsigned short opcode = 0xa7f4;
161
162 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
163}
164
165static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
166{
167 return addr - (ftrace_disable_return - ftrace_disable_code);
168}
169
170#else /* CONFIG_DYNAMIC_FTRACE */
171
172static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
173{
174 return addr - MCOUNT_OFFSET_RET;
175}
176
177#endif /* CONFIG_DYNAMIC_FTRACE */
178
179/*
180 * Hook the return address and push it in the stack of return addresses
181 * in current thread info.
182 */
183unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
184{
185 struct ftrace_graph_ent trace;
186
187 /* Nmi's are currently unsupported. */
188 if (unlikely(in_nmi()))
189 goto out;
190 if (unlikely(atomic_read(&current->tracing_graph_pause)))
191 goto out;
192 if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
193 goto out;
194 trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
195 /* Only trace if the calling function expects to. */
196 if (!ftrace_graph_entry(&trace)) {
197 current->curr_ret_stack--;
198 goto out;
199 }
200 parent = (unsigned long)return_to_handler;
201out:
202 return parent;
203}
204#endif /* CONFIG_FUNCTION_GRAPH_TRACER */