blob: b53664ef53e841e5758bd69b789d39e4ef9621f5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Mundt934135c2008-09-12 19:52:36 +09002 * SuperH process tracing
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Paul Mundt934135c2008-09-12 19:52:36 +09004 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
Paul Mundt34d0b5a2009-12-28 17:53:47 +09005 * Copyright (C) 2002 - 2009 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
Paul Mundt934135c2008-09-12 19:52:36 +09007 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/errno.h>
18#include <linux/ptrace.h>
19#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/security.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070021#include <linux/signal.h>
Stuart Menefy9432f962007-02-23 13:22:17 +090022#include <linux/io.h>
Yuichi Nakamura1322b9d2007-11-10 19:21:34 +090023#include <linux/audit.h>
Paul Mundtc4637d42008-07-30 15:30:52 +090024#include <linux/seccomp.h>
Paul Mundtab99c732008-07-30 19:55:30 +090025#include <linux/tracehook.h>
Paul Mundt934135c2008-09-12 19:52:36 +090026#include <linux/elf.h>
27#include <linux/regset.h>
Paul Mundt34d0b5a2009-12-28 17:53:47 +090028#include <linux/hw_breakpoint.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/uaccess.h>
30#include <asm/pgtable.h>
31#include <asm/system.h>
32#include <asm/processor.h>
33#include <asm/mmu_context.h>
Paul Mundtfa439722008-09-04 18:53:58 +090034#include <asm/syscalls.h>
Paul Mundte7ab3cd2008-09-21 19:04:55 +090035#include <asm/fpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Paul Mundta74f7e02009-09-16 14:30:34 +090037#define CREATE_TRACE_POINTS
38#include <trace/events/syscalls.h>
Matt Flemingc652d782009-07-06 20:16:33 +090039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 * This routine will get a word off of the process kernel stack.
42 */
43static inline int get_stack_long(struct task_struct *task, int offset)
44{
45 unsigned char *stack;
46
Al Viro3cf0f4e2006-01-12 01:05:44 -080047 stack = (unsigned char *)task_pt_regs(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 stack += offset;
49 return (*((int *)stack));
50}
51
52/*
53 * This routine will put a word on the process kernel stack.
54 */
55static inline int put_stack_long(struct task_struct *task, int offset,
56 unsigned long data)
57{
58 unsigned char *stack;
59
Al Viro3cf0f4e2006-01-12 01:05:44 -080060 stack = (unsigned char *)task_pt_regs(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 stack += offset;
62 *(unsigned long *) stack = data;
63 return 0;
64}
65
Paul Mundt34d0b5a2009-12-28 17:53:47 +090066void ptrace_triggered(struct perf_event *bp, int nmi,
67 struct perf_sample_data *data, struct pt_regs *regs)
68{
69 struct perf_event_attr attr;
70
71 /*
72 * Disable the breakpoint request here since ptrace has defined a
73 * one-shot behaviour for breakpoint exceptions.
74 */
75 attr = bp->attr;
76 attr.disabled = true;
77 modify_user_hw_breakpoint(bp, &attr);
78}
79
80static int set_single_step(struct task_struct *tsk, unsigned long addr)
81{
82 struct thread_struct *thread = &tsk->thread;
83 struct perf_event *bp;
84 struct perf_event_attr attr;
85
86 bp = thread->ptrace_bps[0];
87 if (!bp) {
Frederic Weisbecker73266fc2010-04-22 05:05:45 +020088 ptrace_breakpoint_init(&attr);
Paul Mundt34d0b5a2009-12-28 17:53:47 +090089
90 attr.bp_addr = addr;
91 attr.bp_len = HW_BREAKPOINT_LEN_2;
92 attr.bp_type = HW_BREAKPOINT_R;
93
94 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
95 if (IS_ERR(bp))
96 return PTR_ERR(bp);
97
98 thread->ptrace_bps[0] = bp;
99 } else {
100 int err;
101
102 attr = bp->attr;
103 attr.bp_addr = addr;
104 err = modify_user_hw_breakpoint(bp, &attr);
105 if (unlikely(err))
106 return err;
107 }
108
109 return 0;
110}
111
Paul Mundtc459dbf2008-07-30 19:09:31 +0900112void user_enable_single_step(struct task_struct *child)
113{
Paul Mundt34d0b5a2009-12-28 17:53:47 +0900114 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
Paul Mundtc459dbf2008-07-30 19:09:31 +0900115
116 set_tsk_thread_flag(child, TIF_SINGLESTEP);
Paul Mundt34d0b5a2009-12-28 17:53:47 +0900117
118 set_single_step(child, pc);
Paul Mundtc459dbf2008-07-30 19:09:31 +0900119}
120
121void user_disable_single_step(struct task_struct *child)
Stuart Menefy9432f962007-02-23 13:22:17 +0900122{
123 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
Stuart Menefy9432f962007-02-23 13:22:17 +0900124}
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126/*
127 * Called by kernel/ptrace.c when detaching..
128 *
129 * Make sure single step bits etc are not set.
130 */
131void ptrace_disable(struct task_struct *child)
132{
Paul Mundtc459dbf2008-07-30 19:09:31 +0900133 user_disable_single_step(child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
135
Paul Mundt934135c2008-09-12 19:52:36 +0900136static int genregs_get(struct task_struct *target,
137 const struct user_regset *regset,
138 unsigned int pos, unsigned int count,
139 void *kbuf, void __user *ubuf)
140{
141 const struct pt_regs *regs = task_pt_regs(target);
142 int ret;
143
144 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
145 regs->regs,
146 0, 16 * sizeof(unsigned long));
147 if (!ret)
148 /* PC, PR, SR, GBR, MACH, MACL, TRA */
149 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
150 &regs->pc,
151 offsetof(struct pt_regs, pc),
152 sizeof(struct pt_regs));
153 if (!ret)
154 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
155 sizeof(struct pt_regs), -1);
156
157 return ret;
158}
159
160static int genregs_set(struct task_struct *target,
161 const struct user_regset *regset,
162 unsigned int pos, unsigned int count,
163 const void *kbuf, const void __user *ubuf)
164{
165 struct pt_regs *regs = task_pt_regs(target);
166 int ret;
167
168 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
169 regs->regs,
170 0, 16 * sizeof(unsigned long));
171 if (!ret && count > 0)
172 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
173 &regs->pc,
174 offsetof(struct pt_regs, pc),
175 sizeof(struct pt_regs));
176 if (!ret)
177 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
178 sizeof(struct pt_regs), -1);
179
180 return ret;
181}
182
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900183#ifdef CONFIG_SH_FPU
184int fpregs_get(struct task_struct *target,
185 const struct user_regset *regset,
186 unsigned int pos, unsigned int count,
187 void *kbuf, void __user *ubuf)
188{
189 int ret;
190
191 ret = init_fpu(target);
192 if (ret)
193 return ret;
194
195 if ((boot_cpu_data.flags & CPU_HAS_FPU))
196 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Paul Mundt0ea820c2010-01-13 12:51:40 +0900197 &target->thread.xstate->hardfpu, 0, -1);
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900198
199 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Paul Mundt0ea820c2010-01-13 12:51:40 +0900200 &target->thread.xstate->softfpu, 0, -1);
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900201}
202
203static int fpregs_set(struct task_struct *target,
204 const struct user_regset *regset,
205 unsigned int pos, unsigned int count,
206 const void *kbuf, const void __user *ubuf)
207{
208 int ret;
209
210 ret = init_fpu(target);
211 if (ret)
212 return ret;
213
214 set_stopped_child_used_math(target);
215
216 if ((boot_cpu_data.flags & CPU_HAS_FPU))
217 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Paul Mundt0ea820c2010-01-13 12:51:40 +0900218 &target->thread.xstate->hardfpu, 0, -1);
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900219
220 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Paul Mundt0ea820c2010-01-13 12:51:40 +0900221 &target->thread.xstate->softfpu, 0, -1);
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900222}
223
224static int fpregs_active(struct task_struct *target,
225 const struct user_regset *regset)
226{
227 return tsk_used_math(target) ? regset->n : 0;
228}
229#endif
230
Paul Mundt5dadb342008-09-12 22:42:10 +0900231#ifdef CONFIG_SH_DSP
232static int dspregs_get(struct task_struct *target,
233 const struct user_regset *regset,
234 unsigned int pos, unsigned int count,
235 void *kbuf, void __user *ubuf)
236{
Michael Trimarchi01ab1032009-04-03 17:32:33 +0000237 const struct pt_dspregs *regs =
238 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
Paul Mundt5dadb342008-09-12 22:42:10 +0900239 int ret;
240
241 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
242 0, sizeof(struct pt_dspregs));
243 if (!ret)
244 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
245 sizeof(struct pt_dspregs), -1);
246
247 return ret;
248}
249
250static int dspregs_set(struct task_struct *target,
251 const struct user_regset *regset,
252 unsigned int pos, unsigned int count,
253 const void *kbuf, const void __user *ubuf)
254{
Michael Trimarchi01ab1032009-04-03 17:32:33 +0000255 struct pt_dspregs *regs =
256 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
Paul Mundt5dadb342008-09-12 22:42:10 +0900257 int ret;
258
259 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
260 0, sizeof(struct pt_dspregs));
261 if (!ret)
262 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
263 sizeof(struct pt_dspregs), -1);
264
265 return ret;
266}
Paul Mundt72461992008-09-12 22:56:35 +0900267
268static int dspregs_active(struct task_struct *target,
269 const struct user_regset *regset)
270{
271 struct pt_regs *regs = task_pt_regs(target);
272
273 return regs->sr & SR_DSP ? regset->n : 0;
274}
Paul Mundt5dadb342008-09-12 22:42:10 +0900275#endif
276
Paul Mundteaaaeef2010-06-14 15:16:53 +0900277const struct pt_regs_offset regoffset_table[] = {
278 REGS_OFFSET_NAME(0),
279 REGS_OFFSET_NAME(1),
280 REGS_OFFSET_NAME(2),
281 REGS_OFFSET_NAME(3),
282 REGS_OFFSET_NAME(4),
283 REGS_OFFSET_NAME(5),
284 REGS_OFFSET_NAME(6),
285 REGS_OFFSET_NAME(7),
286 REGS_OFFSET_NAME(8),
287 REGS_OFFSET_NAME(9),
288 REGS_OFFSET_NAME(10),
289 REGS_OFFSET_NAME(11),
290 REGS_OFFSET_NAME(12),
291 REGS_OFFSET_NAME(13),
292 REGS_OFFSET_NAME(14),
293 REGS_OFFSET_NAME(15),
294 REG_OFFSET_NAME(pc),
295 REG_OFFSET_NAME(pr),
296 REG_OFFSET_NAME(sr),
297 REG_OFFSET_NAME(gbr),
298 REG_OFFSET_NAME(mach),
299 REG_OFFSET_NAME(macl),
300 REG_OFFSET_NAME(tra),
301 REG_OFFSET_END,
302};
303
Paul Mundt934135c2008-09-12 19:52:36 +0900304/*
305 * These are our native regset flavours.
306 */
307enum sh_regset {
308 REGSET_GENERAL,
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900309#ifdef CONFIG_SH_FPU
310 REGSET_FPU,
311#endif
Paul Mundt5dadb342008-09-12 22:42:10 +0900312#ifdef CONFIG_SH_DSP
313 REGSET_DSP,
314#endif
Paul Mundt934135c2008-09-12 19:52:36 +0900315};
316
317static const struct user_regset sh_regsets[] = {
318 /*
319 * Format is:
320 * R0 --> R15
321 * PC, PR, SR, GBR, MACH, MACL, TRA
322 */
323 [REGSET_GENERAL] = {
324 .core_note_type = NT_PRSTATUS,
325 .n = ELF_NGREG,
326 .size = sizeof(long),
327 .align = sizeof(long),
328 .get = genregs_get,
329 .set = genregs_set,
330 },
Paul Mundt5dadb342008-09-12 22:42:10 +0900331
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900332#ifdef CONFIG_SH_FPU
333 [REGSET_FPU] = {
334 .core_note_type = NT_PRFPREG,
335 .n = sizeof(struct user_fpu_struct) / sizeof(long),
336 .size = sizeof(long),
337 .align = sizeof(long),
338 .get = fpregs_get,
339 .set = fpregs_set,
340 .active = fpregs_active,
341 },
342#endif
343
Paul Mundt5dadb342008-09-12 22:42:10 +0900344#ifdef CONFIG_SH_DSP
345 [REGSET_DSP] = {
346 .n = sizeof(struct pt_dspregs) / sizeof(long),
347 .size = sizeof(long),
348 .align = sizeof(long),
349 .get = dspregs_get,
350 .set = dspregs_set,
Paul Mundt72461992008-09-12 22:56:35 +0900351 .active = dspregs_active,
Paul Mundt5dadb342008-09-12 22:42:10 +0900352 },
353#endif
Paul Mundt934135c2008-09-12 19:52:36 +0900354};
355
356static const struct user_regset_view user_sh_native_view = {
357 .name = "sh",
358 .e_machine = EM_SH,
359 .regsets = sh_regsets,
360 .n = ARRAY_SIZE(sh_regsets),
361};
362
Paul Mundtf9540ec2008-09-12 22:42:43 +0900363const struct user_regset_view *task_user_regset_view(struct task_struct *task)
364{
365 return &user_sh_native_view;
366}
367
Namhyung Kim9b05a692010-10-27 15:33:47 -0700368long arch_ptrace(struct task_struct *child, long request,
369 unsigned long addr, unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
Paul Mundtfa439722008-09-04 18:53:58 +0900371 unsigned long __user *datap = (unsigned long __user *)data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 int ret;
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 switch (request) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 /* read the word at location addr in the USER area. */
376 case PTRACE_PEEKUSR: {
377 unsigned long tmp;
378
379 ret = -EIO;
Stuart Menefy9432f962007-02-23 13:22:17 +0900380 if ((addr & 3) || addr < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 addr > sizeof(struct user) - 3)
382 break;
383
384 if (addr < sizeof(struct pt_regs))
385 tmp = get_stack_long(child, addr);
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700386 else if (addr >= offsetof(struct user, fpu) &&
387 addr < offsetof(struct user, u_fpvalid)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 if (!tsk_used_math(child)) {
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700389 if (addr == offsetof(struct user, fpu.fpscr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 tmp = FPSCR_INIT;
391 else
392 tmp = 0;
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700393 } else {
394 unsigned long index;
Phil Edworthyc49b6ec2011-03-18 14:16:31 +0000395 ret = init_fpu(child);
396 if (ret)
397 break;
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700398 index = addr - offsetof(struct user, fpu);
Namhyung Kim9b05a692010-10-27 15:33:47 -0700399 tmp = ((unsigned long *)child->thread.xstate)
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700400 [index >> 2];
401 }
402 } else if (addr == offsetof(struct user, u_fpvalid))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 tmp = !!tsk_used_math(child);
Peter Griffinba0d4742009-05-08 15:50:54 +0100404 else if (addr == PT_TEXT_ADDR)
405 tmp = child->mm->start_code;
406 else if (addr == PT_DATA_ADDR)
407 tmp = child->mm->start_data;
408 else if (addr == PT_TEXT_END_ADDR)
409 tmp = child->mm->end_code;
410 else if (addr == PT_TEXT_LEN)
411 tmp = child->mm->end_code - child->mm->start_code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 else
413 tmp = 0;
Paul Mundtfa439722008-09-04 18:53:58 +0900414 ret = put_user(tmp, datap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 break;
416 }
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
419 ret = -EIO;
Stuart Menefy9432f962007-02-23 13:22:17 +0900420 if ((addr & 3) || addr < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 addr > sizeof(struct user) - 3)
422 break;
423
424 if (addr < sizeof(struct pt_regs))
425 ret = put_stack_long(child, addr, data);
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700426 else if (addr >= offsetof(struct user, fpu) &&
427 addr < offsetof(struct user, u_fpvalid)) {
428 unsigned long index;
Phil Edworthyc49b6ec2011-03-18 14:16:31 +0000429 ret = init_fpu(child);
430 if (ret)
431 break;
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700432 index = addr - offsetof(struct user, fpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 set_stopped_child_used_math(child);
Namhyung Kim9b05a692010-10-27 15:33:47 -0700434 ((unsigned long *)child->thread.xstate)
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700435 [index >> 2] = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 ret = 0;
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700437 } else if (addr == offsetof(struct user, u_fpvalid)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 conditional_stopped_child_used_math(data, child);
439 ret = 0;
440 }
441 break;
442
Paul Mundt934135c2008-09-12 19:52:36 +0900443 case PTRACE_GETREGS:
444 return copy_regset_to_user(child, &user_sh_native_view,
445 REGSET_GENERAL,
446 0, sizeof(struct pt_regs),
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700447 datap);
Paul Mundt934135c2008-09-12 19:52:36 +0900448 case PTRACE_SETREGS:
449 return copy_regset_from_user(child, &user_sh_native_view,
450 REGSET_GENERAL,
451 0, sizeof(struct pt_regs),
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700452 datap);
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900453#ifdef CONFIG_SH_FPU
454 case PTRACE_GETFPREGS:
455 return copy_regset_to_user(child, &user_sh_native_view,
456 REGSET_FPU,
457 0, sizeof(struct user_fpu_struct),
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700458 datap);
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900459 case PTRACE_SETFPREGS:
460 return copy_regset_from_user(child, &user_sh_native_view,
461 REGSET_FPU,
462 0, sizeof(struct user_fpu_struct),
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700463 datap);
Paul Mundte7ab3cd2008-09-21 19:04:55 +0900464#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465#ifdef CONFIG_SH_DSP
Paul Mundt5dadb342008-09-12 22:42:10 +0900466 case PTRACE_GETDSPREGS:
467 return copy_regset_to_user(child, &user_sh_native_view,
468 REGSET_DSP,
469 0, sizeof(struct pt_dspregs),
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700470 datap);
Paul Mundt5dadb342008-09-12 22:42:10 +0900471 case PTRACE_SETDSPREGS:
472 return copy_regset_from_user(child, &user_sh_native_view,
473 REGSET_DSP,
474 0, sizeof(struct pt_dspregs),
Namhyung Kim9e1cb202010-10-27 15:34:03 -0700475 datap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476#endif
477 default:
478 ret = ptrace_request(child, request, addr, data);
479 break;
480 }
Christoph Hellwig481bed42005-11-07 00:59:47 -0800481
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 return ret;
483}
484
Paul Mundt9e5e2112008-07-30 20:05:35 +0900485static inline int audit_arch(void)
486{
487 int arch = EM_SH;
488
489#ifdef CONFIG_CPU_LITTLE_ENDIAN
490 arch |= __AUDIT_ARCH_LE;
491#endif
492
493 return arch;
494}
495
Paul Mundtab99c732008-07-30 19:55:30 +0900496asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497{
Paul Mundtab99c732008-07-30 19:55:30 +0900498 long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Paul Mundtc4637d42008-07-30 15:30:52 +0900500 secure_computing(regs->regs[0]);
501
Paul Mundtab99c732008-07-30 19:55:30 +0900502 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
503 tracehook_report_syscall_entry(regs))
504 /*
505 * Tracing decided this syscall should not happen.
506 * We'll return a bogus call number to get an ENOSYS
507 * error, but leave the original number in regs->regs[0].
508 */
509 ret = -1L;
Yuichi Nakamura1322b9d2007-11-10 19:21:34 +0900510
Paul Mundta74f7e02009-09-16 14:30:34 +0900511 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
512 trace_sys_enter(regs, regs->regs[0]);
Matt Flemingc652d782009-07-06 20:16:33 +0900513
Paul Mundtab99c732008-07-30 19:55:30 +0900514 if (unlikely(current->audit_context))
Paul Mundt9e5e2112008-07-30 20:05:35 +0900515 audit_syscall_entry(audit_arch(), regs->regs[3],
Yuichi Nakamura1322b9d2007-11-10 19:21:34 +0900516 regs->regs[4], regs->regs[5],
517 regs->regs[6], regs->regs[7]);
518
Paul Mundtab99c732008-07-30 19:55:30 +0900519 return ret ?: regs->regs[0];
520}
521
522asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
523{
524 int step;
525
526 if (unlikely(current->audit_context))
527 audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
528 regs->regs[0]);
529
Paul Mundta74f7e02009-09-16 14:30:34 +0900530 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
531 trace_sys_exit(regs, regs->regs[0]);
Matt Flemingc652d782009-07-06 20:16:33 +0900532
Paul Mundtab99c732008-07-30 19:55:30 +0900533 step = test_thread_flag(TIF_SINGLESTEP);
534 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
535 tracehook_report_syscall_exit(regs, step);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}