Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame^] | 2 | * SuperH process tracing |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame^] | 4 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka |
| 5 | * Copyright (C) 2002 - 2008 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame^] | 7 | * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> |
| 8 | * |
| 9 | * This file is subject to the terms and conditions of the GNU General Public |
| 10 | * License. See the file "COPYING" in the main directory of this archive |
| 11 | * for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/errno.h> |
| 18 | #include <linux/ptrace.h> |
| 19 | #include <linux/user.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/security.h> |
Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 22 | #include <linux/signal.h> |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 23 | #include <linux/io.h> |
Yuichi Nakamura | 1322b9d | 2007-11-10 19:21:34 +0900 | [diff] [blame] | 24 | #include <linux/audit.h> |
Paul Mundt | c4637d4 | 2008-07-30 15:30:52 +0900 | [diff] [blame] | 25 | #include <linux/seccomp.h> |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 26 | #include <linux/tracehook.h> |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame^] | 27 | #include <linux/elf.h> |
| 28 | #include <linux/regset.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/uaccess.h> |
| 30 | #include <asm/pgtable.h> |
| 31 | #include <asm/system.h> |
| 32 | #include <asm/processor.h> |
| 33 | #include <asm/mmu_context.h> |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 34 | #include <asm/syscalls.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
| 36 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | * This routine will get a word off of the process kernel stack. |
| 38 | */ |
| 39 | static inline int get_stack_long(struct task_struct *task, int offset) |
| 40 | { |
| 41 | unsigned char *stack; |
| 42 | |
Al Viro | 3cf0f4e | 2006-01-12 01:05:44 -0800 | [diff] [blame] | 43 | stack = (unsigned char *)task_pt_regs(task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | stack += offset; |
| 45 | return (*((int *)stack)); |
| 46 | } |
| 47 | |
| 48 | /* |
| 49 | * This routine will put a word on the process kernel stack. |
| 50 | */ |
| 51 | static inline int put_stack_long(struct task_struct *task, int offset, |
| 52 | unsigned long data) |
| 53 | { |
| 54 | unsigned char *stack; |
| 55 | |
Al Viro | 3cf0f4e | 2006-01-12 01:05:44 -0800 | [diff] [blame] | 56 | stack = (unsigned char *)task_pt_regs(task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | stack += offset; |
| 58 | *(unsigned long *) stack = data; |
| 59 | return 0; |
| 60 | } |
| 61 | |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 62 | void user_enable_single_step(struct task_struct *child) |
| 63 | { |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 64 | /* Next scheduling will set up UBC */ |
| 65 | if (child->thread.ubc_pc == 0) |
| 66 | ubc_usercnt += 1; |
| 67 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame^] | 68 | child->thread.ubc_pc = get_stack_long(child, |
| 69 | offsetof(struct pt_regs, pc)); |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 70 | |
| 71 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
| 72 | } |
| 73 | |
| 74 | void user_disable_single_step(struct task_struct *child) |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 75 | { |
| 76 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
| 77 | |
| 78 | /* |
| 79 | * Ensure the UBC is not programmed at the next context switch. |
| 80 | * |
| 81 | * Normally this is not needed but there are sequences such as |
| 82 | * singlestep, signal delivery, and continue that leave the |
| 83 | * ubc_pc non-zero leading to spurious SIGTRAPs. |
| 84 | */ |
| 85 | if (child->thread.ubc_pc != 0) { |
| 86 | ubc_usercnt -= 1; |
| 87 | child->thread.ubc_pc = 0; |
| 88 | } |
| 89 | } |
| 90 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | /* |
| 92 | * Called by kernel/ptrace.c when detaching.. |
| 93 | * |
| 94 | * Make sure single step bits etc are not set. |
| 95 | */ |
| 96 | void ptrace_disable(struct task_struct *child) |
| 97 | { |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 98 | user_disable_single_step(child); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | } |
| 100 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame^] | 101 | static int genregs_get(struct task_struct *target, |
| 102 | const struct user_regset *regset, |
| 103 | unsigned int pos, unsigned int count, |
| 104 | void *kbuf, void __user *ubuf) |
| 105 | { |
| 106 | const struct pt_regs *regs = task_pt_regs(target); |
| 107 | int ret; |
| 108 | |
| 109 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 110 | regs->regs, |
| 111 | 0, 16 * sizeof(unsigned long)); |
| 112 | if (!ret) |
| 113 | /* PC, PR, SR, GBR, MACH, MACL, TRA */ |
| 114 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 115 | ®s->pc, |
| 116 | offsetof(struct pt_regs, pc), |
| 117 | sizeof(struct pt_regs)); |
| 118 | if (!ret) |
| 119 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, |
| 120 | sizeof(struct pt_regs), -1); |
| 121 | |
| 122 | return ret; |
| 123 | } |
| 124 | |
| 125 | static int genregs_set(struct task_struct *target, |
| 126 | const struct user_regset *regset, |
| 127 | unsigned int pos, unsigned int count, |
| 128 | const void *kbuf, const void __user *ubuf) |
| 129 | { |
| 130 | struct pt_regs *regs = task_pt_regs(target); |
| 131 | int ret; |
| 132 | |
| 133 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 134 | regs->regs, |
| 135 | 0, 16 * sizeof(unsigned long)); |
| 136 | if (!ret && count > 0) |
| 137 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 138 | ®s->pc, |
| 139 | offsetof(struct pt_regs, pc), |
| 140 | sizeof(struct pt_regs)); |
| 141 | if (!ret) |
| 142 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, |
| 143 | sizeof(struct pt_regs), -1); |
| 144 | |
| 145 | return ret; |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * These are our native regset flavours. |
| 150 | */ |
| 151 | enum sh_regset { |
| 152 | REGSET_GENERAL, |
| 153 | }; |
| 154 | |
| 155 | static const struct user_regset sh_regsets[] = { |
| 156 | /* |
| 157 | * Format is: |
| 158 | * R0 --> R15 |
| 159 | * PC, PR, SR, GBR, MACH, MACL, TRA |
| 160 | */ |
| 161 | [REGSET_GENERAL] = { |
| 162 | .core_note_type = NT_PRSTATUS, |
| 163 | .n = ELF_NGREG, |
| 164 | .size = sizeof(long), |
| 165 | .align = sizeof(long), |
| 166 | .get = genregs_get, |
| 167 | .set = genregs_set, |
| 168 | }, |
| 169 | }; |
| 170 | |
| 171 | static const struct user_regset_view user_sh_native_view = { |
| 172 | .name = "sh", |
| 173 | .e_machine = EM_SH, |
| 174 | .regsets = sh_regsets, |
| 175 | .n = ARRAY_SIZE(sh_regsets), |
| 176 | }; |
| 177 | |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 178 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | struct user * dummy = NULL; |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 181 | unsigned long __user *datap = (unsigned long __user *)data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | int ret; |
| 183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | switch (request) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | /* read the word at location addr in the USER area. */ |
| 186 | case PTRACE_PEEKUSR: { |
| 187 | unsigned long tmp; |
| 188 | |
| 189 | ret = -EIO; |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 190 | if ((addr & 3) || addr < 0 || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | addr > sizeof(struct user) - 3) |
| 192 | break; |
| 193 | |
| 194 | if (addr < sizeof(struct pt_regs)) |
| 195 | tmp = get_stack_long(child, addr); |
| 196 | else if (addr >= (long) &dummy->fpu && |
| 197 | addr < (long) &dummy->u_fpvalid) { |
| 198 | if (!tsk_used_math(child)) { |
| 199 | if (addr == (long)&dummy->fpu.fpscr) |
| 200 | tmp = FPSCR_INIT; |
| 201 | else |
| 202 | tmp = 0; |
| 203 | } else |
| 204 | tmp = ((long *)&child->thread.fpu) |
| 205 | [(addr - (long)&dummy->fpu) >> 2]; |
| 206 | } else if (addr == (long) &dummy->u_fpvalid) |
| 207 | tmp = !!tsk_used_math(child); |
| 208 | else |
| 209 | tmp = 0; |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 210 | ret = put_user(tmp, datap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | break; |
| 212 | } |
| 213 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ |
| 215 | ret = -EIO; |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 216 | if ((addr & 3) || addr < 0 || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | addr > sizeof(struct user) - 3) |
| 218 | break; |
| 219 | |
| 220 | if (addr < sizeof(struct pt_regs)) |
| 221 | ret = put_stack_long(child, addr, data); |
| 222 | else if (addr >= (long) &dummy->fpu && |
| 223 | addr < (long) &dummy->u_fpvalid) { |
| 224 | set_stopped_child_used_math(child); |
| 225 | ((long *)&child->thread.fpu) |
| 226 | [(addr - (long)&dummy->fpu) >> 2] = data; |
| 227 | ret = 0; |
| 228 | } else if (addr == (long) &dummy->u_fpvalid) { |
| 229 | conditional_stopped_child_used_math(data, child); |
| 230 | ret = 0; |
| 231 | } |
| 232 | break; |
| 233 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame^] | 234 | case PTRACE_GETREGS: |
| 235 | return copy_regset_to_user(child, &user_sh_native_view, |
| 236 | REGSET_GENERAL, |
| 237 | 0, sizeof(struct pt_regs), |
| 238 | (void __user *)data); |
| 239 | case PTRACE_SETREGS: |
| 240 | return copy_regset_from_user(child, &user_sh_native_view, |
| 241 | REGSET_GENERAL, |
| 242 | 0, sizeof(struct pt_regs), |
| 243 | (const void __user *)data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | #ifdef CONFIG_SH_DSP |
| 245 | case PTRACE_GETDSPREGS: { |
| 246 | unsigned long dp; |
| 247 | |
| 248 | ret = -EIO; |
| 249 | dp = ((unsigned long) child) + THREAD_SIZE - |
| 250 | sizeof(struct pt_dspregs); |
| 251 | if (*((int *) (dp - 4)) == SR_FD) { |
Magnus Damm | 0906185 | 2008-02-08 17:26:54 +0900 | [diff] [blame] | 252 | copy_to_user((void *)addr, (void *) dp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | sizeof(struct pt_dspregs)); |
| 254 | ret = 0; |
| 255 | } |
| 256 | break; |
| 257 | } |
| 258 | |
| 259 | case PTRACE_SETDSPREGS: { |
| 260 | unsigned long dp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | |
| 262 | ret = -EIO; |
| 263 | dp = ((unsigned long) child) + THREAD_SIZE - |
| 264 | sizeof(struct pt_dspregs); |
| 265 | if (*((int *) (dp - 4)) == SR_FD) { |
Magnus Damm | 0906185 | 2008-02-08 17:26:54 +0900 | [diff] [blame] | 266 | copy_from_user((void *) dp, (void *)addr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | sizeof(struct pt_dspregs)); |
| 268 | ret = 0; |
| 269 | } |
| 270 | break; |
| 271 | } |
| 272 | #endif |
Paul Mundt | 3bc24a1 | 2008-05-19 13:40:12 +0900 | [diff] [blame] | 273 | #ifdef CONFIG_BINFMT_ELF_FDPIC |
| 274 | case PTRACE_GETFDPIC: { |
| 275 | unsigned long tmp = 0; |
| 276 | |
| 277 | switch (addr) { |
| 278 | case PTRACE_GETFDPIC_EXEC: |
| 279 | tmp = child->mm->context.exec_fdpic_loadmap; |
| 280 | break; |
| 281 | case PTRACE_GETFDPIC_INTERP: |
| 282 | tmp = child->mm->context.interp_fdpic_loadmap; |
| 283 | break; |
| 284 | default: |
| 285 | break; |
| 286 | } |
| 287 | |
| 288 | ret = 0; |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 289 | if (put_user(tmp, datap)) { |
Paul Mundt | 3bc24a1 | 2008-05-19 13:40:12 +0900 | [diff] [blame] | 290 | ret = -EFAULT; |
| 291 | break; |
| 292 | } |
| 293 | break; |
| 294 | } |
| 295 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | default: |
| 297 | ret = ptrace_request(child, request, addr, data); |
| 298 | break; |
| 299 | } |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 300 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | return ret; |
| 302 | } |
| 303 | |
Paul Mundt | 9e5e211 | 2008-07-30 20:05:35 +0900 | [diff] [blame] | 304 | static inline int audit_arch(void) |
| 305 | { |
| 306 | int arch = EM_SH; |
| 307 | |
| 308 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
| 309 | arch |= __AUDIT_ARCH_LE; |
| 310 | #endif |
| 311 | |
| 312 | return arch; |
| 313 | } |
| 314 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 315 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | { |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 317 | long ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | |
Paul Mundt | c4637d4 | 2008-07-30 15:30:52 +0900 | [diff] [blame] | 319 | secure_computing(regs->regs[0]); |
| 320 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 321 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
| 322 | tracehook_report_syscall_entry(regs)) |
| 323 | /* |
| 324 | * Tracing decided this syscall should not happen. |
| 325 | * We'll return a bogus call number to get an ENOSYS |
| 326 | * error, but leave the original number in regs->regs[0]. |
| 327 | */ |
| 328 | ret = -1L; |
Yuichi Nakamura | 1322b9d | 2007-11-10 19:21:34 +0900 | [diff] [blame] | 329 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 330 | if (unlikely(current->audit_context)) |
Paul Mundt | 9e5e211 | 2008-07-30 20:05:35 +0900 | [diff] [blame] | 331 | audit_syscall_entry(audit_arch(), regs->regs[3], |
Yuichi Nakamura | 1322b9d | 2007-11-10 19:21:34 +0900 | [diff] [blame] | 332 | regs->regs[4], regs->regs[5], |
| 333 | regs->regs[6], regs->regs[7]); |
| 334 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 335 | return ret ?: regs->regs[0]; |
| 336 | } |
| 337 | |
| 338 | asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) |
| 339 | { |
| 340 | int step; |
| 341 | |
| 342 | if (unlikely(current->audit_context)) |
| 343 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]), |
| 344 | regs->regs[0]); |
| 345 | |
| 346 | step = test_thread_flag(TIF_SINGLESTEP); |
| 347 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
| 348 | tracehook_report_syscall_exit(regs, step); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | } |