Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 2 | * SuperH process tracing |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 4 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka |
| 5 | * Copyright (C) 2002 - 2008 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 7 | * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> |
| 8 | * |
| 9 | * This file is subject to the terms and conditions of the GNU General Public |
| 10 | * License. See the file "COPYING" in the main directory of this archive |
| 11 | * for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/errno.h> |
| 18 | #include <linux/ptrace.h> |
| 19 | #include <linux/user.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/security.h> |
Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 22 | #include <linux/signal.h> |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 23 | #include <linux/io.h> |
Yuichi Nakamura | 1322b9d | 2007-11-10 19:21:34 +0900 | [diff] [blame] | 24 | #include <linux/audit.h> |
Paul Mundt | c4637d4 | 2008-07-30 15:30:52 +0900 | [diff] [blame] | 25 | #include <linux/seccomp.h> |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 26 | #include <linux/tracehook.h> |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 27 | #include <linux/elf.h> |
| 28 | #include <linux/regset.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/uaccess.h> |
| 30 | #include <asm/pgtable.h> |
| 31 | #include <asm/system.h> |
| 32 | #include <asm/processor.h> |
| 33 | #include <asm/mmu_context.h> |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 34 | #include <asm/syscalls.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
| 36 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | * This routine will get a word off of the process kernel stack. |
| 38 | */ |
| 39 | static inline int get_stack_long(struct task_struct *task, int offset) |
| 40 | { |
| 41 | unsigned char *stack; |
| 42 | |
Al Viro | 3cf0f4e | 2006-01-12 01:05:44 -0800 | [diff] [blame] | 43 | stack = (unsigned char *)task_pt_regs(task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | stack += offset; |
| 45 | return (*((int *)stack)); |
| 46 | } |
| 47 | |
| 48 | /* |
| 49 | * This routine will put a word on the process kernel stack. |
| 50 | */ |
| 51 | static inline int put_stack_long(struct task_struct *task, int offset, |
| 52 | unsigned long data) |
| 53 | { |
| 54 | unsigned char *stack; |
| 55 | |
Al Viro | 3cf0f4e | 2006-01-12 01:05:44 -0800 | [diff] [blame] | 56 | stack = (unsigned char *)task_pt_regs(task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | stack += offset; |
| 58 | *(unsigned long *) stack = data; |
| 59 | return 0; |
| 60 | } |
| 61 | |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 62 | void user_enable_single_step(struct task_struct *child) |
| 63 | { |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 64 | /* Next scheduling will set up UBC */ |
| 65 | if (child->thread.ubc_pc == 0) |
| 66 | ubc_usercnt += 1; |
| 67 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 68 | child->thread.ubc_pc = get_stack_long(child, |
| 69 | offsetof(struct pt_regs, pc)); |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 70 | |
| 71 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
| 72 | } |
| 73 | |
| 74 | void user_disable_single_step(struct task_struct *child) |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 75 | { |
| 76 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
| 77 | |
| 78 | /* |
| 79 | * Ensure the UBC is not programmed at the next context switch. |
| 80 | * |
| 81 | * Normally this is not needed but there are sequences such as |
| 82 | * singlestep, signal delivery, and continue that leave the |
| 83 | * ubc_pc non-zero leading to spurious SIGTRAPs. |
| 84 | */ |
| 85 | if (child->thread.ubc_pc != 0) { |
| 86 | ubc_usercnt -= 1; |
| 87 | child->thread.ubc_pc = 0; |
| 88 | } |
| 89 | } |
| 90 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | /* |
| 92 | * Called by kernel/ptrace.c when detaching.. |
| 93 | * |
| 94 | * Make sure single step bits etc are not set. |
| 95 | */ |
| 96 | void ptrace_disable(struct task_struct *child) |
| 97 | { |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 98 | user_disable_single_step(child); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | } |
| 100 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 101 | static int genregs_get(struct task_struct *target, |
| 102 | const struct user_regset *regset, |
| 103 | unsigned int pos, unsigned int count, |
| 104 | void *kbuf, void __user *ubuf) |
| 105 | { |
| 106 | const struct pt_regs *regs = task_pt_regs(target); |
| 107 | int ret; |
| 108 | |
| 109 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 110 | regs->regs, |
| 111 | 0, 16 * sizeof(unsigned long)); |
| 112 | if (!ret) |
| 113 | /* PC, PR, SR, GBR, MACH, MACL, TRA */ |
| 114 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 115 | ®s->pc, |
| 116 | offsetof(struct pt_regs, pc), |
| 117 | sizeof(struct pt_regs)); |
| 118 | if (!ret) |
| 119 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, |
| 120 | sizeof(struct pt_regs), -1); |
| 121 | |
| 122 | return ret; |
| 123 | } |
| 124 | |
| 125 | static int genregs_set(struct task_struct *target, |
| 126 | const struct user_regset *regset, |
| 127 | unsigned int pos, unsigned int count, |
| 128 | const void *kbuf, const void __user *ubuf) |
| 129 | { |
| 130 | struct pt_regs *regs = task_pt_regs(target); |
| 131 | int ret; |
| 132 | |
| 133 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 134 | regs->regs, |
| 135 | 0, 16 * sizeof(unsigned long)); |
| 136 | if (!ret && count > 0) |
| 137 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 138 | ®s->pc, |
| 139 | offsetof(struct pt_regs, pc), |
| 140 | sizeof(struct pt_regs)); |
| 141 | if (!ret) |
| 142 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, |
| 143 | sizeof(struct pt_regs), -1); |
| 144 | |
| 145 | return ret; |
| 146 | } |
| 147 | |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 148 | #ifdef CONFIG_SH_DSP |
| 149 | static int dspregs_get(struct task_struct *target, |
| 150 | const struct user_regset *regset, |
| 151 | unsigned int pos, unsigned int count, |
| 152 | void *kbuf, void __user *ubuf) |
| 153 | { |
| 154 | const struct pt_dspregs *regs = task_pt_dspregs(target); |
| 155 | int ret; |
| 156 | |
| 157 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, |
| 158 | 0, sizeof(struct pt_dspregs)); |
| 159 | if (!ret) |
| 160 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, |
| 161 | sizeof(struct pt_dspregs), -1); |
| 162 | |
| 163 | return ret; |
| 164 | } |
| 165 | |
| 166 | static int dspregs_set(struct task_struct *target, |
| 167 | const struct user_regset *regset, |
| 168 | unsigned int pos, unsigned int count, |
| 169 | const void *kbuf, const void __user *ubuf) |
| 170 | { |
| 171 | struct pt_dspregs *regs = task_pt_dspregs(target); |
| 172 | int ret; |
| 173 | |
| 174 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, |
| 175 | 0, sizeof(struct pt_dspregs)); |
| 176 | if (!ret) |
| 177 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, |
| 178 | sizeof(struct pt_dspregs), -1); |
| 179 | |
| 180 | return ret; |
| 181 | } |
Paul Mundt | 7246199 | 2008-09-12 22:56:35 +0900 | [diff] [blame^] | 182 | |
| 183 | static int dspregs_active(struct task_struct *target, |
| 184 | const struct user_regset *regset) |
| 185 | { |
| 186 | struct pt_regs *regs = task_pt_regs(target); |
| 187 | |
| 188 | return regs->sr & SR_DSP ? regset->n : 0; |
| 189 | } |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 190 | #endif |
| 191 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 192 | /* |
| 193 | * These are our native regset flavours. |
| 194 | */ |
| 195 | enum sh_regset { |
| 196 | REGSET_GENERAL, |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 197 | #ifdef CONFIG_SH_DSP |
| 198 | REGSET_DSP, |
| 199 | #endif |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 200 | }; |
| 201 | |
| 202 | static const struct user_regset sh_regsets[] = { |
| 203 | /* |
| 204 | * Format is: |
| 205 | * R0 --> R15 |
| 206 | * PC, PR, SR, GBR, MACH, MACL, TRA |
| 207 | */ |
| 208 | [REGSET_GENERAL] = { |
| 209 | .core_note_type = NT_PRSTATUS, |
| 210 | .n = ELF_NGREG, |
| 211 | .size = sizeof(long), |
| 212 | .align = sizeof(long), |
| 213 | .get = genregs_get, |
| 214 | .set = genregs_set, |
| 215 | }, |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 216 | |
| 217 | #ifdef CONFIG_SH_DSP |
| 218 | [REGSET_DSP] = { |
| 219 | .n = sizeof(struct pt_dspregs) / sizeof(long), |
| 220 | .size = sizeof(long), |
| 221 | .align = sizeof(long), |
| 222 | .get = dspregs_get, |
| 223 | .set = dspregs_set, |
Paul Mundt | 7246199 | 2008-09-12 22:56:35 +0900 | [diff] [blame^] | 224 | .active = dspregs_active, |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 225 | }, |
| 226 | #endif |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 227 | }; |
| 228 | |
| 229 | static const struct user_regset_view user_sh_native_view = { |
| 230 | .name = "sh", |
| 231 | .e_machine = EM_SH, |
| 232 | .regsets = sh_regsets, |
| 233 | .n = ARRAY_SIZE(sh_regsets), |
| 234 | }; |
| 235 | |
Paul Mundt | f9540ec | 2008-09-12 22:42:43 +0900 | [diff] [blame] | 236 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) |
| 237 | { |
| 238 | return &user_sh_native_view; |
| 239 | } |
| 240 | |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 241 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | struct user * dummy = NULL; |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 244 | unsigned long __user *datap = (unsigned long __user *)data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | int ret; |
| 246 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | switch (request) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | /* read the word at location addr in the USER area. */ |
| 249 | case PTRACE_PEEKUSR: { |
| 250 | unsigned long tmp; |
| 251 | |
| 252 | ret = -EIO; |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 253 | if ((addr & 3) || addr < 0 || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | addr > sizeof(struct user) - 3) |
| 255 | break; |
| 256 | |
| 257 | if (addr < sizeof(struct pt_regs)) |
| 258 | tmp = get_stack_long(child, addr); |
| 259 | else if (addr >= (long) &dummy->fpu && |
| 260 | addr < (long) &dummy->u_fpvalid) { |
| 261 | if (!tsk_used_math(child)) { |
| 262 | if (addr == (long)&dummy->fpu.fpscr) |
| 263 | tmp = FPSCR_INIT; |
| 264 | else |
| 265 | tmp = 0; |
| 266 | } else |
| 267 | tmp = ((long *)&child->thread.fpu) |
| 268 | [(addr - (long)&dummy->fpu) >> 2]; |
| 269 | } else if (addr == (long) &dummy->u_fpvalid) |
| 270 | tmp = !!tsk_used_math(child); |
| 271 | else |
| 272 | tmp = 0; |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 273 | ret = put_user(tmp, datap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | break; |
| 275 | } |
| 276 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ |
| 278 | ret = -EIO; |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 279 | if ((addr & 3) || addr < 0 || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | addr > sizeof(struct user) - 3) |
| 281 | break; |
| 282 | |
| 283 | if (addr < sizeof(struct pt_regs)) |
| 284 | ret = put_stack_long(child, addr, data); |
| 285 | else if (addr >= (long) &dummy->fpu && |
| 286 | addr < (long) &dummy->u_fpvalid) { |
| 287 | set_stopped_child_used_math(child); |
| 288 | ((long *)&child->thread.fpu) |
| 289 | [(addr - (long)&dummy->fpu) >> 2] = data; |
| 290 | ret = 0; |
| 291 | } else if (addr == (long) &dummy->u_fpvalid) { |
| 292 | conditional_stopped_child_used_math(data, child); |
| 293 | ret = 0; |
| 294 | } |
| 295 | break; |
| 296 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 297 | case PTRACE_GETREGS: |
| 298 | return copy_regset_to_user(child, &user_sh_native_view, |
| 299 | REGSET_GENERAL, |
| 300 | 0, sizeof(struct pt_regs), |
| 301 | (void __user *)data); |
| 302 | case PTRACE_SETREGS: |
| 303 | return copy_regset_from_user(child, &user_sh_native_view, |
| 304 | REGSET_GENERAL, |
| 305 | 0, sizeof(struct pt_regs), |
| 306 | (const void __user *)data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | #ifdef CONFIG_SH_DSP |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 308 | case PTRACE_GETDSPREGS: |
| 309 | return copy_regset_to_user(child, &user_sh_native_view, |
| 310 | REGSET_DSP, |
| 311 | 0, sizeof(struct pt_dspregs), |
| 312 | (void __user *)data); |
| 313 | case PTRACE_SETDSPREGS: |
| 314 | return copy_regset_from_user(child, &user_sh_native_view, |
| 315 | REGSET_DSP, |
| 316 | 0, sizeof(struct pt_dspregs), |
| 317 | (const void __user *)data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | #endif |
Paul Mundt | 3bc24a1 | 2008-05-19 13:40:12 +0900 | [diff] [blame] | 319 | #ifdef CONFIG_BINFMT_ELF_FDPIC |
| 320 | case PTRACE_GETFDPIC: { |
| 321 | unsigned long tmp = 0; |
| 322 | |
| 323 | switch (addr) { |
| 324 | case PTRACE_GETFDPIC_EXEC: |
| 325 | tmp = child->mm->context.exec_fdpic_loadmap; |
| 326 | break; |
| 327 | case PTRACE_GETFDPIC_INTERP: |
| 328 | tmp = child->mm->context.interp_fdpic_loadmap; |
| 329 | break; |
| 330 | default: |
| 331 | break; |
| 332 | } |
| 333 | |
| 334 | ret = 0; |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 335 | if (put_user(tmp, datap)) { |
Paul Mundt | 3bc24a1 | 2008-05-19 13:40:12 +0900 | [diff] [blame] | 336 | ret = -EFAULT; |
| 337 | break; |
| 338 | } |
| 339 | break; |
| 340 | } |
| 341 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | default: |
| 343 | ret = ptrace_request(child, request, addr, data); |
| 344 | break; |
| 345 | } |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 346 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | return ret; |
| 348 | } |
| 349 | |
Paul Mundt | 9e5e211 | 2008-07-30 20:05:35 +0900 | [diff] [blame] | 350 | static inline int audit_arch(void) |
| 351 | { |
| 352 | int arch = EM_SH; |
| 353 | |
| 354 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
| 355 | arch |= __AUDIT_ARCH_LE; |
| 356 | #endif |
| 357 | |
| 358 | return arch; |
| 359 | } |
| 360 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 361 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | { |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 363 | long ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | |
Paul Mundt | c4637d4 | 2008-07-30 15:30:52 +0900 | [diff] [blame] | 365 | secure_computing(regs->regs[0]); |
| 366 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 367 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
| 368 | tracehook_report_syscall_entry(regs)) |
| 369 | /* |
| 370 | * Tracing decided this syscall should not happen. |
| 371 | * We'll return a bogus call number to get an ENOSYS |
| 372 | * error, but leave the original number in regs->regs[0]. |
| 373 | */ |
| 374 | ret = -1L; |
Yuichi Nakamura | 1322b9d | 2007-11-10 19:21:34 +0900 | [diff] [blame] | 375 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 376 | if (unlikely(current->audit_context)) |
Paul Mundt | 9e5e211 | 2008-07-30 20:05:35 +0900 | [diff] [blame] | 377 | audit_syscall_entry(audit_arch(), regs->regs[3], |
Yuichi Nakamura | 1322b9d | 2007-11-10 19:21:34 +0900 | [diff] [blame] | 378 | regs->regs[4], regs->regs[5], |
| 379 | regs->regs[6], regs->regs[7]); |
| 380 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 381 | return ret ?: regs->regs[0]; |
| 382 | } |
| 383 | |
| 384 | asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) |
| 385 | { |
| 386 | int step; |
| 387 | |
| 388 | if (unlikely(current->audit_context)) |
| 389 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]), |
| 390 | regs->regs[0]); |
| 391 | |
| 392 | step = test_thread_flag(TIF_SINGLESTEP); |
| 393 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
| 394 | tracehook_report_syscall_exit(regs, step); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | } |