blob: 0525a8bdf65d6421cd602286283575d559f41fe1 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_FTRACE_H
2#define _ASM_X86_FTRACE_H
Abhishek Sagar395a59d2008-06-21 23:47:27 +05303
Cyrill Gorcunovd680fe42008-12-13 00:09:08 +03004#ifdef __ASSEMBLY__
5
Steven Rostedt08f6fba2012-04-30 16:20:23 -04006 /* skip is set if the stack was already partially adjusted */
7 .macro MCOUNT_SAVE_FRAME skip=0
8 /*
9 * We add enough stack to save all regs.
10 */
11 subq $(SS+8-\skip), %rsp
12 movq %rax, RAX(%rsp)
13 movq %rcx, RCX(%rsp)
14 movq %rdx, RDX(%rsp)
15 movq %rsi, RSI(%rsp)
16 movq %rdi, RDI(%rsp)
17 movq %r8, R8(%rsp)
18 movq %r9, R9(%rsp)
19 /* Move RIP to its proper location */
20 movq SS+8(%rsp), %rdx
21 movq %rdx, RIP(%rsp)
Cyrill Gorcunovd680fe42008-12-13 00:09:08 +030022 .endm
23
Steven Rostedt08f6fba2012-04-30 16:20:23 -040024 .macro MCOUNT_RESTORE_FRAME skip=0
25 movq R9(%rsp), %r9
26 movq R8(%rsp), %r8
27 movq RDI(%rsp), %rdi
28 movq RSI(%rsp), %rsi
29 movq RDX(%rsp), %rdx
30 movq RCX(%rsp), %rcx
31 movq RAX(%rsp), %rax
32 addq $(SS+8-\skip), %rsp
Cyrill Gorcunovd680fe42008-12-13 00:09:08 +030033 .endm
34
35#endif
36
Ingo Molnar4944dd62008-10-27 10:50:54 +010037#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedtd57c5d52011-02-09 13:32:18 -050038#ifdef CC_USING_FENTRY
39# define MCOUNT_ADDR ((long)(__fentry__))
40#else
41# define MCOUNT_ADDR ((long)(mcount))
42#endif
Abhishek Sagar395a59d2008-06-21 23:47:27 +053043#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
44
Steven Rostedt28fb5df2011-08-10 22:00:55 -040045#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040046#define ARCH_SUPPORTS_FTRACE_OPS 1
Steven Rostedt08f6fba2012-04-30 16:20:23 -040047#endif
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040048
Abhishek Sagar395a59d2008-06-21 23:47:27 +053049#ifndef __ASSEMBLY__
50extern void mcount(void);
Steven Rostedta192cd02012-05-30 13:26:37 -040051extern atomic_t modifying_ftrace_code;
Steven Rostedtd57c5d52011-02-09 13:32:18 -050052extern void __fentry__(void);
Steven Rostedt68bf21a2008-08-14 15:45:08 -040053
54static inline unsigned long ftrace_call_adjust(unsigned long addr)
55{
56 /*
Martin Schwidefsky521ccb52011-05-10 10:10:41 +020057 * addr is the address of the mcount call instruction.
58 * recordmcount does the necessary offset calculation.
Steven Rostedt68bf21a2008-08-14 15:45:08 -040059 */
Martin Schwidefsky521ccb52011-05-10 10:10:41 +020060 return addr;
Steven Rostedt68bf21a2008-08-14 15:45:08 -040061}
Steven Rostedt31e88902008-11-14 16:21:19 -080062
63#ifdef CONFIG_DYNAMIC_FTRACE
64
65struct dyn_arch_ftrace {
66 /* No extra data needed for x86 */
67};
68
Steven Rostedt08d636b2011-08-16 09:57:10 -040069int ftrace_int3_handler(struct pt_regs *regs);
70
Steven Rostedt31e88902008-11-14 16:21:19 -080071#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedta26a2a22008-10-31 00:03:22 -040072#endif /* __ASSEMBLY__ */
Ingo Molnar4944dd62008-10-27 10:50:54 +010073#endif /* CONFIG_FUNCTION_TRACER */
Abhishek Sagar395a59d2008-06-21 23:47:27 +053074
Steven Rostedtf431b632013-02-12 16:18:59 -050075
76#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
77
78#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
79#include <asm/compat.h>
80
81/*
82 * Because ia32 syscalls do not map to x86_64 syscall numbers
83 * this screws up the trace output when tracing a ia32 task.
84 * Instead of reporting bogus syscalls, just do not trace them.
85 *
86 * If the user realy wants these, then they should use the
87 * raw syscall tracepoints with filtering.
88 */
89#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
90static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
91{
92 if (is_compat_task())
93 return true;
94 return false;
95}
96#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
97#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
98
H. Peter Anvin1965aae2008-10-22 22:26:29 -070099#endif /* _ASM_X86_FTRACE_H */