Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 2 | * SuperH process tracing |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 4 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka |
Paul Mundt | 34d0b5a | 2009-12-28 17:53:47 +0900 | [diff] [blame] | 5 | * Copyright (C) 2002 - 2009 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 7 | * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> |
| 8 | * |
| 9 | * This file is subject to the terms and conditions of the GNU General Public |
| 10 | * License. See the file "COPYING" in the main directory of this archive |
| 11 | * for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/errno.h> |
| 18 | #include <linux/ptrace.h> |
| 19 | #include <linux/user.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/security.h> |
Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 21 | #include <linux/signal.h> |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 22 | #include <linux/io.h> |
Yuichi Nakamura | 1322b9d | 2007-11-10 19:21:34 +0900 | [diff] [blame] | 23 | #include <linux/audit.h> |
Paul Mundt | c4637d4 | 2008-07-30 15:30:52 +0900 | [diff] [blame] | 24 | #include <linux/seccomp.h> |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 25 | #include <linux/tracehook.h> |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 26 | #include <linux/elf.h> |
| 27 | #include <linux/regset.h> |
Paul Mundt | 34d0b5a | 2009-12-28 17:53:47 +0900 | [diff] [blame] | 28 | #include <linux/hw_breakpoint.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/uaccess.h> |
| 30 | #include <asm/pgtable.h> |
| 31 | #include <asm/system.h> |
| 32 | #include <asm/processor.h> |
| 33 | #include <asm/mmu_context.h> |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 34 | #include <asm/syscalls.h> |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 35 | #include <asm/fpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Paul Mundt | a74f7e0 | 2009-09-16 14:30:34 +0900 | [diff] [blame] | 37 | #define CREATE_TRACE_POINTS |
| 38 | #include <trace/events/syscalls.h> |
Matt Fleming | c652d78 | 2009-07-06 20:16:33 +0900 | [diff] [blame] | 39 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | * This routine will get a word off of the process kernel stack. |
| 42 | */ |
| 43 | static inline int get_stack_long(struct task_struct *task, int offset) |
| 44 | { |
| 45 | unsigned char *stack; |
| 46 | |
Al Viro | 3cf0f4e | 2006-01-12 01:05:44 -0800 | [diff] [blame] | 47 | stack = (unsigned char *)task_pt_regs(task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | stack += offset; |
| 49 | return (*((int *)stack)); |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * This routine will put a word on the process kernel stack. |
| 54 | */ |
| 55 | static inline int put_stack_long(struct task_struct *task, int offset, |
| 56 | unsigned long data) |
| 57 | { |
| 58 | unsigned char *stack; |
| 59 | |
Al Viro | 3cf0f4e | 2006-01-12 01:05:44 -0800 | [diff] [blame] | 60 | stack = (unsigned char *)task_pt_regs(task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | stack += offset; |
| 62 | *(unsigned long *) stack = data; |
| 63 | return 0; |
| 64 | } |
| 65 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 66 | void ptrace_triggered(struct perf_event *bp, |
Paul Mundt | 34d0b5a | 2009-12-28 17:53:47 +0900 | [diff] [blame] | 67 | struct perf_sample_data *data, struct pt_regs *regs) |
| 68 | { |
| 69 | struct perf_event_attr attr; |
| 70 | |
| 71 | /* |
| 72 | * Disable the breakpoint request here since ptrace has defined a |
| 73 | * one-shot behaviour for breakpoint exceptions. |
| 74 | */ |
| 75 | attr = bp->attr; |
| 76 | attr.disabled = true; |
| 77 | modify_user_hw_breakpoint(bp, &attr); |
| 78 | } |
| 79 | |
| 80 | static int set_single_step(struct task_struct *tsk, unsigned long addr) |
| 81 | { |
| 82 | struct thread_struct *thread = &tsk->thread; |
| 83 | struct perf_event *bp; |
| 84 | struct perf_event_attr attr; |
| 85 | |
| 86 | bp = thread->ptrace_bps[0]; |
| 87 | if (!bp) { |
Frederic Weisbecker | 73266fc | 2010-04-22 05:05:45 +0200 | [diff] [blame] | 88 | ptrace_breakpoint_init(&attr); |
Paul Mundt | 34d0b5a | 2009-12-28 17:53:47 +0900 | [diff] [blame] | 89 | |
| 90 | attr.bp_addr = addr; |
| 91 | attr.bp_len = HW_BREAKPOINT_LEN_2; |
| 92 | attr.bp_type = HW_BREAKPOINT_R; |
| 93 | |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 94 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, |
| 95 | NULL, tsk); |
Paul Mundt | 34d0b5a | 2009-12-28 17:53:47 +0900 | [diff] [blame] | 96 | if (IS_ERR(bp)) |
| 97 | return PTR_ERR(bp); |
| 98 | |
| 99 | thread->ptrace_bps[0] = bp; |
| 100 | } else { |
| 101 | int err; |
| 102 | |
| 103 | attr = bp->attr; |
| 104 | attr.bp_addr = addr; |
David Engraf | fb7f045 | 2011-03-23 11:35:42 +0000 | [diff] [blame] | 105 | /* reenable breakpoint */ |
| 106 | attr.disabled = false; |
Paul Mundt | 34d0b5a | 2009-12-28 17:53:47 +0900 | [diff] [blame] | 107 | err = modify_user_hw_breakpoint(bp, &attr); |
| 108 | if (unlikely(err)) |
| 109 | return err; |
| 110 | } |
| 111 | |
| 112 | return 0; |
| 113 | } |
| 114 | |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 115 | void user_enable_single_step(struct task_struct *child) |
| 116 | { |
Paul Mundt | 34d0b5a | 2009-12-28 17:53:47 +0900 | [diff] [blame] | 117 | unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc)); |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 118 | |
| 119 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
Paul Mundt | 34d0b5a | 2009-12-28 17:53:47 +0900 | [diff] [blame] | 120 | |
Frederic Weisbecker | e0ac845 | 2011-04-08 17:29:36 +0200 | [diff] [blame] | 121 | if (ptrace_get_breakpoints(child) < 0) |
| 122 | return; |
| 123 | |
Paul Mundt | 34d0b5a | 2009-12-28 17:53:47 +0900 | [diff] [blame] | 124 | set_single_step(child, pc); |
Frederic Weisbecker | e0ac845 | 2011-04-08 17:29:36 +0200 | [diff] [blame] | 125 | ptrace_put_breakpoints(child); |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | void user_disable_single_step(struct task_struct *child) |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 129 | { |
| 130 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 131 | } |
| 132 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | /* |
| 134 | * Called by kernel/ptrace.c when detaching.. |
| 135 | * |
| 136 | * Make sure single step bits etc are not set. |
| 137 | */ |
| 138 | void ptrace_disable(struct task_struct *child) |
| 139 | { |
Paul Mundt | c459dbf | 2008-07-30 19:09:31 +0900 | [diff] [blame] | 140 | user_disable_single_step(child); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | } |
| 142 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 143 | static int genregs_get(struct task_struct *target, |
| 144 | const struct user_regset *regset, |
| 145 | unsigned int pos, unsigned int count, |
| 146 | void *kbuf, void __user *ubuf) |
| 147 | { |
| 148 | const struct pt_regs *regs = task_pt_regs(target); |
| 149 | int ret; |
| 150 | |
| 151 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 152 | regs->regs, |
| 153 | 0, 16 * sizeof(unsigned long)); |
| 154 | if (!ret) |
| 155 | /* PC, PR, SR, GBR, MACH, MACL, TRA */ |
| 156 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 157 | ®s->pc, |
| 158 | offsetof(struct pt_regs, pc), |
| 159 | sizeof(struct pt_regs)); |
| 160 | if (!ret) |
| 161 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, |
| 162 | sizeof(struct pt_regs), -1); |
| 163 | |
| 164 | return ret; |
| 165 | } |
| 166 | |
| 167 | static int genregs_set(struct task_struct *target, |
| 168 | const struct user_regset *regset, |
| 169 | unsigned int pos, unsigned int count, |
| 170 | const void *kbuf, const void __user *ubuf) |
| 171 | { |
| 172 | struct pt_regs *regs = task_pt_regs(target); |
| 173 | int ret; |
| 174 | |
| 175 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 176 | regs->regs, |
| 177 | 0, 16 * sizeof(unsigned long)); |
| 178 | if (!ret && count > 0) |
| 179 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 180 | ®s->pc, |
| 181 | offsetof(struct pt_regs, pc), |
| 182 | sizeof(struct pt_regs)); |
| 183 | if (!ret) |
| 184 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, |
| 185 | sizeof(struct pt_regs), -1); |
| 186 | |
| 187 | return ret; |
| 188 | } |
| 189 | |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 190 | #ifdef CONFIG_SH_FPU |
| 191 | int fpregs_get(struct task_struct *target, |
| 192 | const struct user_regset *regset, |
| 193 | unsigned int pos, unsigned int count, |
| 194 | void *kbuf, void __user *ubuf) |
| 195 | { |
| 196 | int ret; |
| 197 | |
| 198 | ret = init_fpu(target); |
| 199 | if (ret) |
| 200 | return ret; |
| 201 | |
| 202 | if ((boot_cpu_data.flags & CPU_HAS_FPU)) |
| 203 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 204 | &target->thread.xstate->hardfpu, 0, -1); |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 205 | |
| 206 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 207 | &target->thread.xstate->softfpu, 0, -1); |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | static int fpregs_set(struct task_struct *target, |
| 211 | const struct user_regset *regset, |
| 212 | unsigned int pos, unsigned int count, |
| 213 | const void *kbuf, const void __user *ubuf) |
| 214 | { |
| 215 | int ret; |
| 216 | |
| 217 | ret = init_fpu(target); |
| 218 | if (ret) |
| 219 | return ret; |
| 220 | |
| 221 | set_stopped_child_used_math(target); |
| 222 | |
| 223 | if ((boot_cpu_data.flags & CPU_HAS_FPU)) |
| 224 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 225 | &target->thread.xstate->hardfpu, 0, -1); |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 226 | |
| 227 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 228 | &target->thread.xstate->softfpu, 0, -1); |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | static int fpregs_active(struct task_struct *target, |
| 232 | const struct user_regset *regset) |
| 233 | { |
| 234 | return tsk_used_math(target) ? regset->n : 0; |
| 235 | } |
| 236 | #endif |
| 237 | |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 238 | #ifdef CONFIG_SH_DSP |
| 239 | static int dspregs_get(struct task_struct *target, |
| 240 | const struct user_regset *regset, |
| 241 | unsigned int pos, unsigned int count, |
| 242 | void *kbuf, void __user *ubuf) |
| 243 | { |
Michael Trimarchi | 01ab103 | 2009-04-03 17:32:33 +0000 | [diff] [blame] | 244 | const struct pt_dspregs *regs = |
| 245 | (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 246 | int ret; |
| 247 | |
| 248 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, |
| 249 | 0, sizeof(struct pt_dspregs)); |
| 250 | if (!ret) |
| 251 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, |
| 252 | sizeof(struct pt_dspregs), -1); |
| 253 | |
| 254 | return ret; |
| 255 | } |
| 256 | |
| 257 | static int dspregs_set(struct task_struct *target, |
| 258 | const struct user_regset *regset, |
| 259 | unsigned int pos, unsigned int count, |
| 260 | const void *kbuf, const void __user *ubuf) |
| 261 | { |
Michael Trimarchi | 01ab103 | 2009-04-03 17:32:33 +0000 | [diff] [blame] | 262 | struct pt_dspregs *regs = |
| 263 | (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 264 | int ret; |
| 265 | |
| 266 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, |
| 267 | 0, sizeof(struct pt_dspregs)); |
| 268 | if (!ret) |
| 269 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, |
| 270 | sizeof(struct pt_dspregs), -1); |
| 271 | |
| 272 | return ret; |
| 273 | } |
Paul Mundt | 7246199 | 2008-09-12 22:56:35 +0900 | [diff] [blame] | 274 | |
| 275 | static int dspregs_active(struct task_struct *target, |
| 276 | const struct user_regset *regset) |
| 277 | { |
| 278 | struct pt_regs *regs = task_pt_regs(target); |
| 279 | |
| 280 | return regs->sr & SR_DSP ? regset->n : 0; |
| 281 | } |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 282 | #endif |
| 283 | |
Paul Mundt | eaaaeef | 2010-06-14 15:16:53 +0900 | [diff] [blame] | 284 | const struct pt_regs_offset regoffset_table[] = { |
| 285 | REGS_OFFSET_NAME(0), |
| 286 | REGS_OFFSET_NAME(1), |
| 287 | REGS_OFFSET_NAME(2), |
| 288 | REGS_OFFSET_NAME(3), |
| 289 | REGS_OFFSET_NAME(4), |
| 290 | REGS_OFFSET_NAME(5), |
| 291 | REGS_OFFSET_NAME(6), |
| 292 | REGS_OFFSET_NAME(7), |
| 293 | REGS_OFFSET_NAME(8), |
| 294 | REGS_OFFSET_NAME(9), |
| 295 | REGS_OFFSET_NAME(10), |
| 296 | REGS_OFFSET_NAME(11), |
| 297 | REGS_OFFSET_NAME(12), |
| 298 | REGS_OFFSET_NAME(13), |
| 299 | REGS_OFFSET_NAME(14), |
| 300 | REGS_OFFSET_NAME(15), |
| 301 | REG_OFFSET_NAME(pc), |
| 302 | REG_OFFSET_NAME(pr), |
| 303 | REG_OFFSET_NAME(sr), |
| 304 | REG_OFFSET_NAME(gbr), |
| 305 | REG_OFFSET_NAME(mach), |
| 306 | REG_OFFSET_NAME(macl), |
| 307 | REG_OFFSET_NAME(tra), |
| 308 | REG_OFFSET_END, |
| 309 | }; |
| 310 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 311 | /* |
| 312 | * These are our native regset flavours. |
| 313 | */ |
| 314 | enum sh_regset { |
| 315 | REGSET_GENERAL, |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 316 | #ifdef CONFIG_SH_FPU |
| 317 | REGSET_FPU, |
| 318 | #endif |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 319 | #ifdef CONFIG_SH_DSP |
| 320 | REGSET_DSP, |
| 321 | #endif |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 322 | }; |
| 323 | |
| 324 | static const struct user_regset sh_regsets[] = { |
| 325 | /* |
| 326 | * Format is: |
| 327 | * R0 --> R15 |
| 328 | * PC, PR, SR, GBR, MACH, MACL, TRA |
| 329 | */ |
| 330 | [REGSET_GENERAL] = { |
| 331 | .core_note_type = NT_PRSTATUS, |
| 332 | .n = ELF_NGREG, |
| 333 | .size = sizeof(long), |
| 334 | .align = sizeof(long), |
| 335 | .get = genregs_get, |
| 336 | .set = genregs_set, |
| 337 | }, |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 338 | |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 339 | #ifdef CONFIG_SH_FPU |
| 340 | [REGSET_FPU] = { |
| 341 | .core_note_type = NT_PRFPREG, |
| 342 | .n = sizeof(struct user_fpu_struct) / sizeof(long), |
| 343 | .size = sizeof(long), |
| 344 | .align = sizeof(long), |
| 345 | .get = fpregs_get, |
| 346 | .set = fpregs_set, |
| 347 | .active = fpregs_active, |
| 348 | }, |
| 349 | #endif |
| 350 | |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 351 | #ifdef CONFIG_SH_DSP |
| 352 | [REGSET_DSP] = { |
| 353 | .n = sizeof(struct pt_dspregs) / sizeof(long), |
| 354 | .size = sizeof(long), |
| 355 | .align = sizeof(long), |
| 356 | .get = dspregs_get, |
| 357 | .set = dspregs_set, |
Paul Mundt | 7246199 | 2008-09-12 22:56:35 +0900 | [diff] [blame] | 358 | .active = dspregs_active, |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 359 | }, |
| 360 | #endif |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 361 | }; |
| 362 | |
| 363 | static const struct user_regset_view user_sh_native_view = { |
| 364 | .name = "sh", |
| 365 | .e_machine = EM_SH, |
| 366 | .regsets = sh_regsets, |
| 367 | .n = ARRAY_SIZE(sh_regsets), |
| 368 | }; |
| 369 | |
Paul Mundt | f9540ec | 2008-09-12 22:42:43 +0900 | [diff] [blame] | 370 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) |
| 371 | { |
| 372 | return &user_sh_native_view; |
| 373 | } |
| 374 | |
Namhyung Kim | 9b05a69 | 2010-10-27 15:33:47 -0700 | [diff] [blame] | 375 | long arch_ptrace(struct task_struct *child, long request, |
| 376 | unsigned long addr, unsigned long data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | { |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 378 | unsigned long __user *datap = (unsigned long __user *)data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | int ret; |
| 380 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | switch (request) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | /* read the word at location addr in the USER area. */ |
| 383 | case PTRACE_PEEKUSR: { |
| 384 | unsigned long tmp; |
| 385 | |
| 386 | ret = -EIO; |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 387 | if ((addr & 3) || addr < 0 || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | addr > sizeof(struct user) - 3) |
| 389 | break; |
| 390 | |
| 391 | if (addr < sizeof(struct pt_regs)) |
| 392 | tmp = get_stack_long(child, addr); |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 393 | else if (addr >= offsetof(struct user, fpu) && |
| 394 | addr < offsetof(struct user, u_fpvalid)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | if (!tsk_used_math(child)) { |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 396 | if (addr == offsetof(struct user, fpu.fpscr)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | tmp = FPSCR_INIT; |
| 398 | else |
| 399 | tmp = 0; |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 400 | } else { |
| 401 | unsigned long index; |
Phil Edworthy | c49b6ec | 2011-03-18 14:16:31 +0000 | [diff] [blame] | 402 | ret = init_fpu(child); |
| 403 | if (ret) |
| 404 | break; |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 405 | index = addr - offsetof(struct user, fpu); |
Namhyung Kim | 9b05a69 | 2010-10-27 15:33:47 -0700 | [diff] [blame] | 406 | tmp = ((unsigned long *)child->thread.xstate) |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 407 | [index >> 2]; |
| 408 | } |
| 409 | } else if (addr == offsetof(struct user, u_fpvalid)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | tmp = !!tsk_used_math(child); |
Peter Griffin | ba0d474 | 2009-05-08 15:50:54 +0100 | [diff] [blame] | 411 | else if (addr == PT_TEXT_ADDR) |
| 412 | tmp = child->mm->start_code; |
| 413 | else if (addr == PT_DATA_ADDR) |
| 414 | tmp = child->mm->start_data; |
| 415 | else if (addr == PT_TEXT_END_ADDR) |
| 416 | tmp = child->mm->end_code; |
| 417 | else if (addr == PT_TEXT_LEN) |
| 418 | tmp = child->mm->end_code - child->mm->start_code; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | else |
| 420 | tmp = 0; |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 421 | ret = put_user(tmp, datap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | break; |
| 423 | } |
| 424 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ |
| 426 | ret = -EIO; |
Stuart Menefy | 9432f96 | 2007-02-23 13:22:17 +0900 | [diff] [blame] | 427 | if ((addr & 3) || addr < 0 || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | addr > sizeof(struct user) - 3) |
| 429 | break; |
| 430 | |
| 431 | if (addr < sizeof(struct pt_regs)) |
| 432 | ret = put_stack_long(child, addr, data); |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 433 | else if (addr >= offsetof(struct user, fpu) && |
| 434 | addr < offsetof(struct user, u_fpvalid)) { |
| 435 | unsigned long index; |
Phil Edworthy | c49b6ec | 2011-03-18 14:16:31 +0000 | [diff] [blame] | 436 | ret = init_fpu(child); |
| 437 | if (ret) |
| 438 | break; |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 439 | index = addr - offsetof(struct user, fpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | set_stopped_child_used_math(child); |
Namhyung Kim | 9b05a69 | 2010-10-27 15:33:47 -0700 | [diff] [blame] | 441 | ((unsigned long *)child->thread.xstate) |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 442 | [index >> 2] = data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | ret = 0; |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 444 | } else if (addr == offsetof(struct user, u_fpvalid)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | conditional_stopped_child_used_math(data, child); |
| 446 | ret = 0; |
| 447 | } |
| 448 | break; |
| 449 | |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 450 | case PTRACE_GETREGS: |
| 451 | return copy_regset_to_user(child, &user_sh_native_view, |
| 452 | REGSET_GENERAL, |
| 453 | 0, sizeof(struct pt_regs), |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 454 | datap); |
Paul Mundt | 934135c | 2008-09-12 19:52:36 +0900 | [diff] [blame] | 455 | case PTRACE_SETREGS: |
| 456 | return copy_regset_from_user(child, &user_sh_native_view, |
| 457 | REGSET_GENERAL, |
| 458 | 0, sizeof(struct pt_regs), |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 459 | datap); |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 460 | #ifdef CONFIG_SH_FPU |
| 461 | case PTRACE_GETFPREGS: |
| 462 | return copy_regset_to_user(child, &user_sh_native_view, |
| 463 | REGSET_FPU, |
| 464 | 0, sizeof(struct user_fpu_struct), |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 465 | datap); |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 466 | case PTRACE_SETFPREGS: |
| 467 | return copy_regset_from_user(child, &user_sh_native_view, |
| 468 | REGSET_FPU, |
| 469 | 0, sizeof(struct user_fpu_struct), |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 470 | datap); |
Paul Mundt | e7ab3cd | 2008-09-21 19:04:55 +0900 | [diff] [blame] | 471 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | #ifdef CONFIG_SH_DSP |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 473 | case PTRACE_GETDSPREGS: |
| 474 | return copy_regset_to_user(child, &user_sh_native_view, |
| 475 | REGSET_DSP, |
| 476 | 0, sizeof(struct pt_dspregs), |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 477 | datap); |
Paul Mundt | 5dadb34 | 2008-09-12 22:42:10 +0900 | [diff] [blame] | 478 | case PTRACE_SETDSPREGS: |
| 479 | return copy_regset_from_user(child, &user_sh_native_view, |
| 480 | REGSET_DSP, |
| 481 | 0, sizeof(struct pt_dspregs), |
Namhyung Kim | 9e1cb20 | 2010-10-27 15:34:03 -0700 | [diff] [blame] | 482 | datap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | #endif |
| 484 | default: |
| 485 | ret = ptrace_request(child, request, addr, data); |
| 486 | break; |
| 487 | } |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 488 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | return ret; |
| 490 | } |
| 491 | |
Paul Mundt | 9e5e211 | 2008-07-30 20:05:35 +0900 | [diff] [blame] | 492 | static inline int audit_arch(void) |
| 493 | { |
| 494 | int arch = EM_SH; |
| 495 | |
| 496 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
| 497 | arch |= __AUDIT_ARCH_LE; |
| 498 | #endif |
| 499 | |
| 500 | return arch; |
| 501 | } |
| 502 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 503 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | { |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 505 | long ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | |
Paul Mundt | c4637d4 | 2008-07-30 15:30:52 +0900 | [diff] [blame] | 507 | secure_computing(regs->regs[0]); |
| 508 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 509 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
| 510 | tracehook_report_syscall_entry(regs)) |
| 511 | /* |
| 512 | * Tracing decided this syscall should not happen. |
| 513 | * We'll return a bogus call number to get an ENOSYS |
| 514 | * error, but leave the original number in regs->regs[0]. |
| 515 | */ |
| 516 | ret = -1L; |
Yuichi Nakamura | 1322b9d | 2007-11-10 19:21:34 +0900 | [diff] [blame] | 517 | |
Paul Mundt | a74f7e0 | 2009-09-16 14:30:34 +0900 | [diff] [blame] | 518 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
| 519 | trace_sys_enter(regs, regs->regs[0]); |
Matt Fleming | c652d78 | 2009-07-06 20:16:33 +0900 | [diff] [blame] | 520 | |
Eric Paris | b05d844 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 521 | audit_syscall_entry(audit_arch(), regs->regs[3], |
| 522 | regs->regs[4], regs->regs[5], |
| 523 | regs->regs[6], regs->regs[7]); |
Yuichi Nakamura | 1322b9d | 2007-11-10 19:21:34 +0900 | [diff] [blame] | 524 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 525 | return ret ?: regs->regs[0]; |
| 526 | } |
| 527 | |
| 528 | asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) |
| 529 | { |
| 530 | int step; |
| 531 | |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 532 | audit_syscall_exit(regs); |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 533 | |
Paul Mundt | a74f7e0 | 2009-09-16 14:30:34 +0900 | [diff] [blame] | 534 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
| 535 | trace_sys_exit(regs, regs->regs[0]); |
Matt Fleming | c652d78 | 2009-07-06 20:16:33 +0900 | [diff] [blame] | 536 | |
Paul Mundt | ab99c73 | 2008-07-30 19:55:30 +0900 | [diff] [blame] | 537 | step = test_thread_flag(TIF_SINGLESTEP); |
| 538 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
| 539 | tracehook_report_syscall_exit(regs, step); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | } |