blob: 22f3d6e309f9f9ca1de820703c426601f83fee7d [file] [log] [blame]
Abhishek Sagar014c2572008-05-31 14:23:50 +05301/*
2 * Dynamic function tracing support.
3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 *
6 * For licencing details, see COPYING.
7 *
8 * Defines low-level handling of mcount calls when the kernel
9 * is compiled with the -pg flag. When using dynamic ftrace, the
10 * mcount call-sites get patched lazily with NOP till they are
11 * enabled. All code mutation routines here take effect atomically.
12 */
13
14#include <linux/ftrace.h>
15#include <asm/cacheflush.h>
16
17#define INSN_SIZE 4
18#define PC_OFFSET 8
19#define BL_OPCODE 0xeb000000
20#define BL_OFFSET_MASK 0x00ffffff
21
22static unsigned long bl_insn;
23static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */
24
Abhishek Sagar014c2572008-05-31 14:23:50 +053025unsigned char *ftrace_nop_replace(void)
26{
27 return (char *)&NOP;
28}
29
30/* construct a branch (BL) instruction to addr */
31unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
32{
33 long offset;
34
35 offset = (long)addr - (long)(pc - INSN_SIZE + PC_OFFSET);
36 if (unlikely(offset < -33554432 || offset > 33554428)) {
37 /* Can't generate branches that far (from ARM ARM). Ftrace
38 * doesn't generate branches outside of core kernel text.
39 */
40 WARN_ON_ONCE(1);
41 return NULL;
42 }
43 offset = (offset >> 2) & BL_OFFSET_MASK;
44 bl_insn = BL_OPCODE | offset;
45 return (unsigned char *)&bl_insn;
46}
47
48int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
49 unsigned char *new_code)
50{
51 unsigned long err = 0, replaced = 0, old, new;
52
53 old = *(unsigned long *)old_code;
54 new = *(unsigned long *)new_code;
55 pc -= INSN_SIZE;
56
57 __asm__ __volatile__ (
58 "1: ldr %1, [%2] \n"
59 " cmp %1, %4 \n"
60 "2: streq %3, [%2] \n"
61 " cmpne %1, %3 \n"
62 " movne %0, #2 \n"
63 "3:\n"
64
65 ".section .fixup, \"ax\"\n"
66 "4: mov %0, #1 \n"
67 " b 3b \n"
68 ".previous\n"
69
70 ".section __ex_table, \"a\"\n"
71 " .long 1b, 4b \n"
72 " .long 2b, 4b \n"
73 ".previous\n"
74
75 : "=r"(err), "=r"(replaced)
76 : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
77 : "memory");
78
79 if (!err && (replaced == old))
80 flush_icache_range(pc, pc + INSN_SIZE);
81
82 return err;
83}
84
85int ftrace_update_ftrace_func(ftrace_func_t func)
86{
87 int ret;
88 unsigned long pc, old;
89 unsigned char *new;
90
91 pc = (unsigned long)&ftrace_call;
92 pc += INSN_SIZE;
93 memcpy(&old, &ftrace_call, INSN_SIZE);
94 new = ftrace_call_replace(pc, (unsigned long)func);
95 ret = ftrace_modify_code(pc, (unsigned char *)&old, new);
96 return ret;
97}
98
99int ftrace_mcount_set(unsigned long *data)
100{
101 unsigned long pc, old;
102 unsigned long *addr = data;
103 unsigned char *new;
104
105 pc = (unsigned long)&mcount_call;
106 pc += INSN_SIZE;
107 memcpy(&old, &mcount_call, INSN_SIZE);
108 new = ftrace_call_replace(pc, *addr);
109 *addr = ftrace_modify_code(pc, (unsigned char *)&old, new);
110 return 0;
111}
112
113/* run from kstop_machine */
114int __init ftrace_dyn_arch_init(void *data)
115{
116 ftrace_mcount_set(data);
117 return 0;
118}