blob: ce71487b515f1a3b04ca0fee02479028592c16f2 [file] [log] [blame]
Mike Frysinger1c873be2009-06-09 07:25:09 -04001/*
2 * mcount and friends -- ftrace stuff
3 *
4 * Copyright (C) 2009 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8#include <linux/linkage.h>
9#include <asm/ftrace.h>
10
11.text
12
13/* GCC will have called us before setting up the function prologue, so we
14 * can clobber the normal scratch registers, but we need to make sure to
15 * save/restore the registers used for argument passing (R0-R2) in case
16 * the profiled function is using them. With data registers, R3 is the
17 * only one we can blow away. With pointer registers, we have P0-P2.
18 *
19 * Upon entry, the RETS will point to the top of the current profiled
20 * function. And since GCC setup the frame for us, the previous function
21 * will be waiting there. mmmm pie.
22 */
23ENTRY(__mcount)
24 /* save third function arg early so we can do testing below */
25 [--sp] = r2;
26
27 /* load the function pointer to the tracer */
28 p0.l = _ftrace_trace_function;
29 p0.h = _ftrace_trace_function;
30 r3 = [p0];
31
32 /* optional micro optimization: don't call the stub tracer */
33 r2.l = _ftrace_stub;
34 r2.h = _ftrace_stub;
35 cc = r2 == r3;
36 if ! cc jump .Ldo_trace;
37
38 r2 = [sp++];
39 rts;
40
41.Ldo_trace:
42
43 /* save first/second function arg and the return register */
44 [--sp] = r0;
45 [--sp] = r1;
46 [--sp] = rets;
47
48 /* setup the tracer function */
49 p0 = r3;
50
51 /* tracer(ulong frompc, ulong selfpc):
52 * frompc: the pc that did the call to ...
53 * selfpc: ... this location
54 * the selfpc itself will need adjusting for the mcount call
55 */
56 r1 = rets;
57 r0 = [fp + 4];
58 r1 += -MCOUNT_INSN_SIZE;
59
60 /* call the tracer */
61 call (p0);
62
63 /* restore state and get out of dodge */
64 rets = [sp++];
65 r1 = [sp++];
66 r0 = [sp++];
67 r2 = [sp++];
68
69.globl _ftrace_stub
70_ftrace_stub:
71 rts;
72ENDPROC(__mcount)