blob: e4a88d340de687fdedc80638e836d01e6791439e [file] [log] [blame]
Stephen Rothwell81e70092005-10-18 11:17:58 +10001/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
Stephen Rothwell81e70092005-10-18 11:17:58 +100020#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100023#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
Lucas Woods05ead012007-12-13 15:56:06 -080027#include <linux/ptrace.h>
Christian Dietrich76462232011-06-04 05:36:54 +000028#include <linux/ratelimit.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100029#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
31#include <linux/compat.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100032#else
33#include <linux/wait.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100034#include <linux/unistd.h>
35#include <linux/stddef.h>
36#include <linux/tty.h>
37#include <linux/binfmts.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100038#endif
39
40#include <asm/uaccess.h>
41#include <asm/cacheflush.h>
Arnd Bergmanna7f31842006-03-23 00:00:08 +010042#include <asm/syscalls.h>
David Gibsonc5ff7002005-11-09 11:21:07 +110043#include <asm/sigcontext.h>
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110044#include <asm/vdso.h>
David Howellsae3a1972012-03-28 18:30:02 +010045#include <asm/switch_to.h>
Michael Neuling2b0a5762013-02-13 16:21:41 +000046#include <asm/tm.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100047#ifdef CONFIG_PPC64
Stephen Rothwell879168e2005-11-03 15:32:07 +110048#include "ppc32.h"
Stephen Rothwell81e70092005-10-18 11:17:58 +100049#include <asm/unistd.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100050#else
51#include <asm/ucontext.h>
52#include <asm/pgtable.h>
53#endif
54
Benjamin Herrenschmidt22e38f22007-06-04 15:15:49 +100055#include "signal.h"
56
Stephen Rothwell81e70092005-10-18 11:17:58 +100057#undef DEBUG_SIG
58
Stephen Rothwell81e70092005-10-18 11:17:58 +100059#ifdef CONFIG_PPC64
Stephen Rothwellb09a4912005-10-18 14:51:57 +100060#define sys_sigsuspend compat_sys_sigsuspend
61#define sys_rt_sigsuspend compat_sys_rt_sigsuspend
62#define sys_rt_sigreturn compat_sys_rt_sigreturn
63#define sys_sigaction compat_sys_sigaction
64#define sys_swapcontext compat_sys_swapcontext
65#define sys_sigreturn compat_sys_sigreturn
Stephen Rothwell81e70092005-10-18 11:17:58 +100066
67#define old_sigaction old_sigaction32
68#define sigcontext sigcontext32
69#define mcontext mcontext32
70#define ucontext ucontext32
71
72/*
Michael Neulingc1cb2992008-07-08 18:43:41 +100073 * Userspace code may pass a ucontext which doesn't include VSX added
74 * at the end. We need to check for this case.
75 */
76#define UCONTEXTSIZEWITHOUTVSX \
77 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
78
79/*
Stephen Rothwell81e70092005-10-18 11:17:58 +100080 * Returning 0 means we return to userspace via
81 * ret_from_except and thus restore all user
82 * registers from *regs. This is what we need
83 * to do when a signal has been delivered.
84 */
Stephen Rothwell81e70092005-10-18 11:17:58 +100085
86#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
87#undef __SIGNAL_FRAMESIZE
88#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
89#undef ELF_NVRREG
90#define ELF_NVRREG ELF_NVRREG32
91
92/*
93 * Functions for flipping sigsets (thanks to brain dead generic
94 * implementation that makes things simple for little endian only)
95 */
96static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
97{
98 compat_sigset_t cset;
99
100 switch (_NSIG_WORDS) {
Will Deacona313f4c2011-11-08 04:51:19 +0000101 case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000102 cset.sig[7] = set->sig[3] >> 32;
103 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
104 cset.sig[5] = set->sig[2] >> 32;
105 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
106 cset.sig[3] = set->sig[1] >> 32;
107 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
108 cset.sig[1] = set->sig[0] >> 32;
109 }
110 return copy_to_user(uset, &cset, sizeof(*uset));
111}
112
Paul Mackerras9b7cf8b2005-10-19 23:13:04 +1000113static inline int get_sigset_t(sigset_t *set,
114 const compat_sigset_t __user *uset)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000115{
116 compat_sigset_t s32;
117
118 if (copy_from_user(&s32, uset, sizeof(*uset)))
119 return -EFAULT;
120
121 /*
122 * Swap the 2 words of the 64-bit sigset_t (they are stored
123 * in the "wrong" endian in 32-bit user storage).
124 */
125 switch (_NSIG_WORDS) {
126 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
127 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
128 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
129 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
130 }
131 return 0;
132}
133
134static inline int get_old_sigaction(struct k_sigaction *new_ka,
135 struct old_sigaction __user *act)
136{
137 compat_old_sigset_t mask;
138 compat_uptr_t handler, restorer;
139
140 if (get_user(handler, &act->sa_handler) ||
141 __get_user(restorer, &act->sa_restorer) ||
142 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
143 __get_user(mask, &act->sa_mask))
144 return -EFAULT;
145 new_ka->sa.sa_handler = compat_ptr(handler);
146 new_ka->sa.sa_restorer = compat_ptr(restorer);
147 siginitset(&new_ka->sa.sa_mask, mask);
148 return 0;
149}
150
Al Viro29e646d2006-02-01 05:28:09 -0500151#define to_user_ptr(p) ptr_to_compat(p)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000152#define from_user_ptr(p) compat_ptr(p)
153
154static inline int save_general_regs(struct pt_regs *regs,
155 struct mcontext __user *frame)
156{
157 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
158 int i;
159
Paul Mackerras1bd79332006-03-08 13:24:22 +1100160 WARN_ON(!FULL_REGS(regs));
David Woodhouse401d1f02005-11-15 18:52:18 +0000161
162 for (i = 0; i <= PT_RESULT; i ++) {
163 if (i == 14 && !FULL_REGS(regs))
164 i = 32;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000165 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
166 return -EFAULT;
David Woodhouse401d1f02005-11-15 18:52:18 +0000167 }
Stephen Rothwell81e70092005-10-18 11:17:58 +1000168 return 0;
169}
170
171static inline int restore_general_regs(struct pt_regs *regs,
172 struct mcontext __user *sr)
173{
174 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
175 int i;
176
177 for (i = 0; i <= PT_RESULT; i++) {
178 if ((i == PT_MSR) || (i == PT_SOFTE))
179 continue;
180 if (__get_user(gregs[i], &sr->mc_gregs[i]))
181 return -EFAULT;
182 }
183 return 0;
184}
185
186#else /* CONFIG_PPC64 */
187
Stephen Rothwell81e70092005-10-18 11:17:58 +1000188#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
189
190static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
191{
192 return copy_to_user(uset, set, sizeof(*uset));
193}
194
Paul Mackerras9b7cf8b2005-10-19 23:13:04 +1000195static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000196{
197 return copy_from_user(set, uset, sizeof(*uset));
198}
199
200static inline int get_old_sigaction(struct k_sigaction *new_ka,
201 struct old_sigaction __user *act)
202{
203 old_sigset_t mask;
204
205 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
206 __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
Al Viro43f16812012-04-22 17:01:49 -0400207 __get_user(new_ka->sa.sa_restorer, &act->sa_restorer) ||
208 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
209 __get_user(mask, &act->sa_mask))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000210 return -EFAULT;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000211 siginitset(&new_ka->sa.sa_mask, mask);
212 return 0;
213}
214
Al Viro29e646d2006-02-01 05:28:09 -0500215#define to_user_ptr(p) ((unsigned long)(p))
216#define from_user_ptr(p) ((void __user *)(p))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000217
218static inline int save_general_regs(struct pt_regs *regs,
219 struct mcontext __user *frame)
220{
Paul Mackerras1bd79332006-03-08 13:24:22 +1100221 WARN_ON(!FULL_REGS(regs));
Stephen Rothwell81e70092005-10-18 11:17:58 +1000222 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
223}
224
225static inline int restore_general_regs(struct pt_regs *regs,
226 struct mcontext __user *sr)
227{
228 /* copy up to but not including MSR */
229 if (__copy_from_user(regs, &sr->mc_gregs,
230 PT_MSR * sizeof(elf_greg_t)))
231 return -EFAULT;
232 /* copy from orig_r3 (the word after the MSR) up to the end */
233 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
234 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
235 return -EFAULT;
236 return 0;
237}
238
239#endif /* CONFIG_PPC64 */
240
Stephen Rothwell81e70092005-10-18 11:17:58 +1000241/*
242 * Atomically swap in the new signal mask, and wait for a signal.
243 */
David Woodhouse150256d2006-01-18 17:43:57 -0800244long sys_sigsuspend(old_sigset_t mask)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000245{
Matt Fleminga2007ce2012-02-14 01:40:59 +0000246 sigset_t blocked;
Matt Fleminga2007ce2012-02-14 01:40:59 +0000247 siginitset(&blocked, mask);
Al Viro68f3f162012-05-21 21:42:32 -0400248 return sigsuspend(&blocked);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000249}
250
Stephen Rothwell81e70092005-10-18 11:17:58 +1000251long sys_sigaction(int sig, struct old_sigaction __user *act,
252 struct old_sigaction __user *oact)
253{
254 struct k_sigaction new_ka, old_ka;
255 int ret;
256
257#ifdef CONFIG_PPC64
258 if (sig < 0)
259 sig = -sig;
260#endif
261
262 if (act) {
263 if (get_old_sigaction(&new_ka, act))
264 return -EFAULT;
265 }
266
267 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
268 if (!ret && oact) {
269 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
270 __put_user(to_user_ptr(old_ka.sa.sa_handler),
271 &oact->sa_handler) ||
272 __put_user(to_user_ptr(old_ka.sa.sa_restorer),
273 &oact->sa_restorer) ||
274 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
275 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
276 return -EFAULT;
277 }
278
279 return ret;
280}
281
282/*
283 * When we have signals to deliver, we set up on the
284 * user stack, going down from the original stack pointer:
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000285 * an ABI gap of 56 words
286 * an mcontext struct
Stephen Rothwell81e70092005-10-18 11:17:58 +1000287 * a sigcontext struct
288 * a gap of __SIGNAL_FRAMESIZE bytes
289 *
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000290 * Each of these things must be a multiple of 16 bytes in size. The following
291 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
Stephen Rothwell81e70092005-10-18 11:17:58 +1000292 *
293 */
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000294struct sigframe {
295 struct sigcontext sctx; /* the sigcontext */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000296 struct mcontext mctx; /* all the register values */
Michael Neuling2b0a5762013-02-13 16:21:41 +0000297#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
298 struct sigcontext sctx_transact;
299 struct mcontext mctx_transact;
300#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000301 /*
302 * Programs using the rs6000/xcoff abi can save up to 19 gp
303 * regs and 18 fp regs below sp before decrementing it.
304 */
305 int abigap[56];
306};
307
308/* We use the mc_pad field for the signal return trampoline. */
309#define tramp mc_pad
310
311/*
312 * When we have rt signals to deliver, we set up on the
313 * user stack, going down from the original stack pointer:
314 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
315 * a gap of __SIGNAL_FRAMESIZE+16 bytes
316 * (the +16 is to get the siginfo and ucontext in the same
317 * positions as in older kernels).
318 *
319 * Each of these things must be a multiple of 16 bytes in size.
320 *
321 */
322struct rt_sigframe {
323#ifdef CONFIG_PPC64
324 compat_siginfo_t info;
325#else
326 struct siginfo info;
327#endif
328 struct ucontext uc;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000329#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
330 struct ucontext uc_transact;
331#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000332 /*
333 * Programs using the rs6000/xcoff abi can save up to 19 gp
334 * regs and 18 fp regs below sp before decrementing it.
335 */
336 int abigap[56];
337};
338
Michael Neuling6a274c02008-07-02 14:06:37 +1000339#ifdef CONFIG_VSX
340unsigned long copy_fpr_to_user(void __user *to,
341 struct task_struct *task)
342{
343 double buf[ELF_NFPREG];
344 int i;
345
346 /* save FPR copy to local buffer then write to the thread_struct */
347 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
348 buf[i] = task->thread.TS_FPR(i);
349 memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
350 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
351}
352
353unsigned long copy_fpr_from_user(struct task_struct *task,
354 void __user *from)
355{
356 double buf[ELF_NFPREG];
357 int i;
358
359 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
360 return 1;
361 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
362 task->thread.TS_FPR(i) = buf[i];
363 memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
364
365 return 0;
366}
367
368unsigned long copy_vsx_to_user(void __user *to,
369 struct task_struct *task)
370{
371 double buf[ELF_NVSRHALFREG];
372 int i;
373
374 /* save FPR copy to local buffer then write to the thread_struct */
375 for (i = 0; i < ELF_NVSRHALFREG; i++)
376 buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
377 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
378}
379
380unsigned long copy_vsx_from_user(struct task_struct *task,
381 void __user *from)
382{
383 double buf[ELF_NVSRHALFREG];
384 int i;
385
386 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
387 return 1;
388 for (i = 0; i < ELF_NVSRHALFREG ; i++)
389 task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
390 return 0;
391}
Michael Neuling2b0a5762013-02-13 16:21:41 +0000392
393#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
394unsigned long copy_transact_fpr_to_user(void __user *to,
395 struct task_struct *task)
396{
397 double buf[ELF_NFPREG];
398 int i;
399
400 /* save FPR copy to local buffer then write to the thread_struct */
401 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
402 buf[i] = task->thread.TS_TRANS_FPR(i);
403 memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double));
404 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
405}
406
407unsigned long copy_transact_fpr_from_user(struct task_struct *task,
408 void __user *from)
409{
410 double buf[ELF_NFPREG];
411 int i;
412
413 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
414 return 1;
415 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
416 task->thread.TS_TRANS_FPR(i) = buf[i];
417 memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double));
418
419 return 0;
420}
421
422unsigned long copy_transact_vsx_to_user(void __user *to,
423 struct task_struct *task)
424{
425 double buf[ELF_NVSRHALFREG];
426 int i;
427
428 /* save FPR copy to local buffer then write to the thread_struct */
429 for (i = 0; i < ELF_NVSRHALFREG; i++)
430 buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET];
431 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
432}
433
434unsigned long copy_transact_vsx_from_user(struct task_struct *task,
435 void __user *from)
436{
437 double buf[ELF_NVSRHALFREG];
438 int i;
439
440 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
441 return 1;
442 for (i = 0; i < ELF_NVSRHALFREG ; i++)
443 task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i];
444 return 0;
445}
446#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Michael Neuling6a274c02008-07-02 14:06:37 +1000447#else
448inline unsigned long copy_fpr_to_user(void __user *to,
449 struct task_struct *task)
450{
451 return __copy_to_user(to, task->thread.fpr,
452 ELF_NFPREG * sizeof(double));
453}
454
455inline unsigned long copy_fpr_from_user(struct task_struct *task,
456 void __user *from)
457{
458 return __copy_from_user(task->thread.fpr, from,
459 ELF_NFPREG * sizeof(double));
460}
Michael Neuling2b0a5762013-02-13 16:21:41 +0000461
462#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
463inline unsigned long copy_transact_fpr_to_user(void __user *to,
464 struct task_struct *task)
465{
466 return __copy_to_user(to, task->thread.transact_fpr,
467 ELF_NFPREG * sizeof(double));
468}
469
470inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
471 void __user *from)
472{
473 return __copy_from_user(task->thread.transact_fpr, from,
474 ELF_NFPREG * sizeof(double));
475}
476#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Michael Neuling6a274c02008-07-02 14:06:37 +1000477#endif
478
Stephen Rothwell81e70092005-10-18 11:17:58 +1000479/*
480 * Save the current user registers on the user stack.
481 * We only save the altivec/spe registers if the process has used
482 * altivec/spe instructions at some point.
483 */
484static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
Michael Neuling16c29d12008-10-23 00:42:36 +0000485 int sigret, int ctx_has_vsx_region)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000486{
Michael Neuling9e751182008-06-25 14:07:17 +1000487 unsigned long msr = regs->msr;
488
Stephen Rothwell81e70092005-10-18 11:17:58 +1000489 /* Make sure floating point registers are stored in regs */
490 flush_fp_to_thread(current);
491
Michael Neulingc6e67712008-06-25 14:07:18 +1000492 /* save general registers */
493 if (save_general_regs(regs, frame))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000494 return 1;
495
Stephen Rothwell81e70092005-10-18 11:17:58 +1000496#ifdef CONFIG_ALTIVEC
497 /* save altivec registers */
498 if (current->thread.used_vr) {
499 flush_altivec_to_thread(current);
500 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
501 ELF_NVRREG * sizeof(vector128)))
502 return 1;
503 /* set MSR_VEC in the saved MSR value to indicate that
504 frame->mc_vregs contains valid data */
Michael Neuling9e751182008-06-25 14:07:17 +1000505 msr |= MSR_VEC;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000506 }
507 /* else assert((regs->msr & MSR_VEC) == 0) */
508
509 /* We always copy to/from vrsave, it's 0 if we don't have or don't
510 * use altivec. Since VSCR only contains 32 bits saved in the least
511 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
512 * most significant bits of that same vector. --BenH
513 */
514 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
515 return 1;
516#endif /* CONFIG_ALTIVEC */
Michael Neuling6a274c02008-07-02 14:06:37 +1000517 if (copy_fpr_to_user(&frame->mc_fregs, current))
Michael Neulingc6e67712008-06-25 14:07:18 +1000518 return 1;
Michael Neuling6a274c02008-07-02 14:06:37 +1000519#ifdef CONFIG_VSX
Michael Neulingce48b212008-06-25 14:07:18 +1000520 /*
521 * Copy VSR 0-31 upper half from thread_struct to local
522 * buffer, then write that to userspace. Also set MSR_VSX in
523 * the saved MSR value to indicate that frame->mc_vregs
524 * contains valid data
525 */
Michael Neuling16c29d12008-10-23 00:42:36 +0000526 if (current->thread.used_vsr && ctx_has_vsx_region) {
Michael Neuling7c292172008-07-11 16:29:12 +1000527 __giveup_vsx(current);
Michael Neuling6a274c02008-07-02 14:06:37 +1000528 if (copy_vsx_to_user(&frame->mc_vsregs, current))
Michael Neulingce48b212008-06-25 14:07:18 +1000529 return 1;
530 msr |= MSR_VSX;
531 }
Michael Neulingc6e67712008-06-25 14:07:18 +1000532#endif /* CONFIG_VSX */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000533#ifdef CONFIG_SPE
534 /* save spe registers */
535 if (current->thread.used_spe) {
536 flush_spe_to_thread(current);
537 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
538 ELF_NEVRREG * sizeof(u32)))
539 return 1;
540 /* set MSR_SPE in the saved MSR value to indicate that
541 frame->mc_vregs contains valid data */
Michael Neuling9e751182008-06-25 14:07:17 +1000542 msr |= MSR_SPE;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000543 }
544 /* else assert((regs->msr & MSR_SPE) == 0) */
545
546 /* We always copy to/from spefscr */
547 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
548 return 1;
549#endif /* CONFIG_SPE */
550
Michael Neuling9e751182008-06-25 14:07:17 +1000551 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
552 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000553 if (sigret) {
554 /* Set up the sigreturn trampoline: li r0,sigret; sc */
555 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
556 || __put_user(0x44000002UL, &frame->tramp[1]))
557 return 1;
558 flush_icache_range((unsigned long) &frame->tramp[0],
559 (unsigned long) &frame->tramp[2]);
560 }
561
562 return 0;
563}
564
Michael Neuling2b0a5762013-02-13 16:21:41 +0000565#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
566/*
567 * Save the current user registers on the user stack.
568 * We only save the altivec/spe registers if the process has used
569 * altivec/spe instructions at some point.
570 * We also save the transactional registers to a second ucontext in the
571 * frame.
572 *
573 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
574 */
575static int save_tm_user_regs(struct pt_regs *regs,
576 struct mcontext __user *frame,
577 struct mcontext __user *tm_frame, int sigret)
578{
579 unsigned long msr = regs->msr;
580
581 /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
582 * thread.transact_fpr[], thread.transact_vr[], etc.
583 */
584 tm_enable();
585 tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
586
587 /* Make sure floating point registers are stored in regs */
588 flush_fp_to_thread(current);
589
590 /* Save both sets of general registers */
591 if (save_general_regs(&current->thread.ckpt_regs, frame)
592 || save_general_regs(regs, tm_frame))
593 return 1;
594
595 /* Stash the top half of the 64bit MSR into the 32bit MSR word
596 * of the transactional mcontext. This way we have a backward-compatible
597 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
598 * also look at what type of transaction (T or S) was active at the
599 * time of the signal.
600 */
601 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
602 return 1;
603
604#ifdef CONFIG_ALTIVEC
605 /* save altivec registers */
606 if (current->thread.used_vr) {
607 flush_altivec_to_thread(current);
608 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
609 ELF_NVRREG * sizeof(vector128)))
610 return 1;
611 if (msr & MSR_VEC) {
612 if (__copy_to_user(&tm_frame->mc_vregs,
613 current->thread.transact_vr,
614 ELF_NVRREG * sizeof(vector128)))
615 return 1;
616 } else {
617 if (__copy_to_user(&tm_frame->mc_vregs,
618 current->thread.vr,
619 ELF_NVRREG * sizeof(vector128)))
620 return 1;
621 }
622
623 /* set MSR_VEC in the saved MSR value to indicate that
624 * frame->mc_vregs contains valid data
625 */
626 msr |= MSR_VEC;
627 }
628
629 /* We always copy to/from vrsave, it's 0 if we don't have or don't
630 * use altivec. Since VSCR only contains 32 bits saved in the least
631 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
632 * most significant bits of that same vector. --BenH
633 */
634 if (__put_user(current->thread.vrsave,
635 (u32 __user *)&frame->mc_vregs[32]))
636 return 1;
637 if (msr & MSR_VEC) {
638 if (__put_user(current->thread.transact_vrsave,
639 (u32 __user *)&tm_frame->mc_vregs[32]))
640 return 1;
641 } else {
642 if (__put_user(current->thread.vrsave,
643 (u32 __user *)&tm_frame->mc_vregs[32]))
644 return 1;
645 }
646#endif /* CONFIG_ALTIVEC */
647
648 if (copy_fpr_to_user(&frame->mc_fregs, current))
649 return 1;
650 if (msr & MSR_FP) {
651 if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
652 return 1;
653 } else {
654 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
655 return 1;
656 }
657
658#ifdef CONFIG_VSX
659 /*
660 * Copy VSR 0-31 upper half from thread_struct to local
661 * buffer, then write that to userspace. Also set MSR_VSX in
662 * the saved MSR value to indicate that frame->mc_vregs
663 * contains valid data
664 */
665 if (current->thread.used_vsr) {
666 __giveup_vsx(current);
667 if (copy_vsx_to_user(&frame->mc_vsregs, current))
668 return 1;
669 if (msr & MSR_VSX) {
670 if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
671 current))
672 return 1;
673 } else {
674 if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
675 return 1;
676 }
677
678 msr |= MSR_VSX;
679 }
680#endif /* CONFIG_VSX */
681#ifdef CONFIG_SPE
682 /* SPE regs are not checkpointed with TM, so this section is
683 * simply the same as in save_user_regs().
684 */
685 if (current->thread.used_spe) {
686 flush_spe_to_thread(current);
687 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
688 ELF_NEVRREG * sizeof(u32)))
689 return 1;
690 /* set MSR_SPE in the saved MSR value to indicate that
691 * frame->mc_vregs contains valid data */
692 msr |= MSR_SPE;
693 }
694
695 /* We always copy to/from spefscr */
696 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
697 return 1;
698#endif /* CONFIG_SPE */
699
700 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
701 return 1;
702 if (sigret) {
703 /* Set up the sigreturn trampoline: li r0,sigret; sc */
704 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
705 || __put_user(0x44000002UL, &frame->tramp[1]))
706 return 1;
707 flush_icache_range((unsigned long) &frame->tramp[0],
708 (unsigned long) &frame->tramp[2]);
709 }
710
711 return 0;
712}
713#endif
714
Stephen Rothwell81e70092005-10-18 11:17:58 +1000715/*
716 * Restore the current user register values from the user stack,
717 * (except for MSR).
718 */
719static long restore_user_regs(struct pt_regs *regs,
720 struct mcontext __user *sr, int sig)
721{
722 long err;
723 unsigned int save_r2 = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000724 unsigned long msr;
Michael Neulingc6e67712008-06-25 14:07:18 +1000725#ifdef CONFIG_VSX
Michael Neulingc6e67712008-06-25 14:07:18 +1000726 int i;
727#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000728
729 /*
730 * restore general registers but not including MSR or SOFTE. Also
731 * take care of keeping r2 (TLS) intact if not a signal
732 */
733 if (!sig)
734 save_r2 = (unsigned int)regs->gpr[2];
735 err = restore_general_regs(regs, sr);
Al Viro9a81c162010-09-20 21:48:57 +0100736 regs->trap = 0;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000737 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000738 if (!sig)
739 regs->gpr[2] = (unsigned long) save_r2;
740 if (err)
741 return 1;
742
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000743 /* if doing signal return, restore the previous little-endian mode */
744 if (sig)
745 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
746
Paul Mackerras5388fb12006-01-11 22:11:39 +1100747 /*
748 * Do this before updating the thread state in
749 * current->thread.fpr/vr/evr. That way, if we get preempted
750 * and another task grabs the FPU/Altivec/SPE, it won't be
751 * tempted to save the current CPU state into the thread_struct
752 * and corrupt what we are writing there.
753 */
754 discard_lazy_cpu_state();
755
Stephen Rothwell81e70092005-10-18 11:17:58 +1000756#ifdef CONFIG_ALTIVEC
Michael Neulingc6e67712008-06-25 14:07:18 +1000757 /*
758 * Force the process to reload the altivec registers from
759 * current->thread when it next does altivec instructions
760 */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000761 regs->msr &= ~MSR_VEC;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000762 if (msr & MSR_VEC) {
Stephen Rothwell81e70092005-10-18 11:17:58 +1000763 /* restore altivec registers from the stack */
764 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
765 sizeof(sr->mc_vregs)))
766 return 1;
767 } else if (current->thread.used_vr)
768 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
769
770 /* Always get VRSAVE back */
771 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
772 return 1;
773#endif /* CONFIG_ALTIVEC */
Michael Neuling6a274c02008-07-02 14:06:37 +1000774 if (copy_fpr_from_user(current, &sr->mc_fregs))
775 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000776
Michael Neulingc6e67712008-06-25 14:07:18 +1000777#ifdef CONFIG_VSX
Michael Neulingce48b212008-06-25 14:07:18 +1000778 /*
779 * Force the process to reload the VSX registers from
780 * current->thread when it next does VSX instruction.
781 */
782 regs->msr &= ~MSR_VSX;
783 if (msr & MSR_VSX) {
784 /*
785 * Restore altivec registers from the stack to a local
786 * buffer, then write this out to the thread_struct
787 */
Michael Neuling6a274c02008-07-02 14:06:37 +1000788 if (copy_vsx_from_user(current, &sr->mc_vsregs))
Michael Neulingce48b212008-06-25 14:07:18 +1000789 return 1;
Michael Neulingce48b212008-06-25 14:07:18 +1000790 } else if (current->thread.used_vsr)
791 for (i = 0; i < 32 ; i++)
792 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
Michael Neulingc6e67712008-06-25 14:07:18 +1000793#endif /* CONFIG_VSX */
794 /*
795 * force the process to reload the FP registers from
796 * current->thread when it next does FP instructions
797 */
798 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
799
Stephen Rothwell81e70092005-10-18 11:17:58 +1000800#ifdef CONFIG_SPE
801 /* force the process to reload the spe registers from
802 current->thread when it next does spe instructions */
803 regs->msr &= ~MSR_SPE;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000804 if (msr & MSR_SPE) {
Stephen Rothwell81e70092005-10-18 11:17:58 +1000805 /* restore spe registers from the stack */
806 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
807 ELF_NEVRREG * sizeof(u32)))
808 return 1;
809 } else if (current->thread.used_spe)
810 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
811
812 /* Always get SPEFSCR back */
813 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
814 return 1;
815#endif /* CONFIG_SPE */
816
Stephen Rothwell81e70092005-10-18 11:17:58 +1000817 return 0;
818}
819
Michael Neuling2b0a5762013-02-13 16:21:41 +0000820#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
821/*
822 * Restore the current user register values from the user stack, except for
823 * MSR, and recheckpoint the original checkpointed register state for processes
824 * in transactions.
825 */
826static long restore_tm_user_regs(struct pt_regs *regs,
827 struct mcontext __user *sr,
828 struct mcontext __user *tm_sr)
829{
830 long err;
831 unsigned long msr;
832#ifdef CONFIG_VSX
833 int i;
834#endif
835
836 /*
837 * restore general registers but not including MSR or SOFTE. Also
838 * take care of keeping r2 (TLS) intact if not a signal.
839 * See comment in signal_64.c:restore_tm_sigcontexts();
840 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
841 * were set by the signal delivery.
842 */
843 err = restore_general_regs(regs, tm_sr);
844 err |= restore_general_regs(&current->thread.ckpt_regs, sr);
845
846 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
847
848 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
849 if (err)
850 return 1;
851
852 /* Restore the previous little-endian mode */
853 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
854
855 /*
856 * Do this before updating the thread state in
857 * current->thread.fpr/vr/evr. That way, if we get preempted
858 * and another task grabs the FPU/Altivec/SPE, it won't be
859 * tempted to save the current CPU state into the thread_struct
860 * and corrupt what we are writing there.
861 */
862 discard_lazy_cpu_state();
863
864#ifdef CONFIG_ALTIVEC
865 regs->msr &= ~MSR_VEC;
866 if (msr & MSR_VEC) {
867 /* restore altivec registers from the stack */
868 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
869 sizeof(sr->mc_vregs)) ||
870 __copy_from_user(current->thread.transact_vr,
871 &tm_sr->mc_vregs,
872 sizeof(sr->mc_vregs)))
873 return 1;
874 } else if (current->thread.used_vr) {
875 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
876 memset(current->thread.transact_vr, 0,
877 ELF_NVRREG * sizeof(vector128));
878 }
879
880 /* Always get VRSAVE back */
881 if (__get_user(current->thread.vrsave,
882 (u32 __user *)&sr->mc_vregs[32]) ||
883 __get_user(current->thread.transact_vrsave,
884 (u32 __user *)&tm_sr->mc_vregs[32]))
885 return 1;
886#endif /* CONFIG_ALTIVEC */
887
888 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
889
890 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
891 copy_transact_fpr_from_user(current, &tm_sr->mc_fregs))
892 return 1;
893
894#ifdef CONFIG_VSX
895 regs->msr &= ~MSR_VSX;
896 if (msr & MSR_VSX) {
897 /*
898 * Restore altivec registers from the stack to a local
899 * buffer, then write this out to the thread_struct
900 */
901 if (copy_vsx_from_user(current, &sr->mc_vsregs) ||
902 copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))
903 return 1;
904 } else if (current->thread.used_vsr)
905 for (i = 0; i < 32 ; i++) {
906 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
907 current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
908 }
909#endif /* CONFIG_VSX */
910
911#ifdef CONFIG_SPE
912 /* SPE regs are not checkpointed with TM, so this section is
913 * simply the same as in restore_user_regs().
914 */
915 regs->msr &= ~MSR_SPE;
916 if (msr & MSR_SPE) {
917 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
918 ELF_NEVRREG * sizeof(u32)))
919 return 1;
920 } else if (current->thread.used_spe)
921 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
922
923 /* Always get SPEFSCR back */
924 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
925 + ELF_NEVRREG))
926 return 1;
927#endif /* CONFIG_SPE */
928
929 /* Now, recheckpoint. This loads up all of the checkpointed (older)
930 * registers, including FP and V[S]Rs. After recheckpointing, the
931 * transactional versions should be loaded.
932 */
933 tm_enable();
934 /* This loads the checkpointed FP/VEC state, if used */
935 tm_recheckpoint(&current->thread, msr);
936 /* The task has moved into TM state S, so ensure MSR reflects this */
937 regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
938
939 /* This loads the speculative FP/VEC state, if used */
940 if (msr & MSR_FP) {
941 do_load_up_transact_fpu(&current->thread);
942 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
943 }
944 if (msr & MSR_VEC) {
945 do_load_up_transact_altivec(&current->thread);
946 regs->msr |= MSR_VEC;
947 }
948
949 return 0;
950}
951#endif
952
Stephen Rothwell81e70092005-10-18 11:17:58 +1000953#ifdef CONFIG_PPC64
Stephen Rothwellb09a4912005-10-18 14:51:57 +1000954long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000955 struct sigaction32 __user *oact, size_t sigsetsize)
956{
957 struct k_sigaction new_ka, old_ka;
958 int ret;
959
960 /* XXX: Don't preclude handling different sized sigset_t's. */
961 if (sigsetsize != sizeof(compat_sigset_t))
962 return -EINVAL;
963
964 if (act) {
965 compat_uptr_t handler;
966
967 ret = get_user(handler, &act->sa_handler);
968 new_ka.sa.sa_handler = compat_ptr(handler);
969 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
970 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
971 if (ret)
972 return -EFAULT;
973 }
974
975 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
976 if (!ret && oact) {
Al Viro29e646d2006-02-01 05:28:09 -0500977 ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000978 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
979 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
980 }
981 return ret;
982}
983
984/*
985 * Note: it is necessary to treat how as an unsigned int, with the
986 * corresponding cast to a signed int to insure that the proper
987 * conversion (sign extension) between the register representation
988 * of a signed int (msr in 32-bit mode) and the register representation
989 * of a signed int (msr in 64-bit mode) is performed.
990 */
Stephen Rothwellb09a4912005-10-18 14:51:57 +1000991long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000992 compat_sigset_t __user *oset, size_t sigsetsize)
993{
994 sigset_t s;
995 sigset_t __user *up;
996 int ret;
997 mm_segment_t old_fs = get_fs();
998
999 if (set) {
1000 if (get_sigset_t(&s, set))
1001 return -EFAULT;
1002 }
1003
1004 set_fs(KERNEL_DS);
1005 /* This is valid because of the set_fs() */
1006 up = (sigset_t __user *) &s;
1007 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
1008 sigsetsize);
1009 set_fs(old_fs);
1010 if (ret)
1011 return ret;
1012 if (oset) {
1013 if (put_sigset_t(oset, &s))
1014 return -EFAULT;
1015 }
1016 return 0;
1017}
1018
Stephen Rothwellb09a4912005-10-18 14:51:57 +10001019long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001020{
1021 sigset_t s;
1022 int ret;
1023 mm_segment_t old_fs = get_fs();
1024
1025 set_fs(KERNEL_DS);
1026 /* The __user pointer cast is valid because of the set_fs() */
1027 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
1028 set_fs(old_fs);
1029 if (!ret) {
1030 if (put_sigset_t(set, &s))
1031 return -EFAULT;
1032 }
1033 return ret;
1034}
1035
1036
1037int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
1038{
1039 int err;
1040
1041 if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
1042 return -EFAULT;
1043
1044 /* If you change siginfo_t structure, please be sure
1045 * this code is fixed accordingly.
1046 * It should never copy any pad contained in the structure
1047 * to avoid security leaks, but must copy the generic
1048 * 3 ints plus the relevant union member.
1049 * This routine must convert siginfo from 64bit to 32bit as well
1050 * at the same time.
1051 */
1052 err = __put_user(s->si_signo, &d->si_signo);
1053 err |= __put_user(s->si_errno, &d->si_errno);
1054 err |= __put_user((short)s->si_code, &d->si_code);
1055 if (s->si_code < 0)
1056 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
1057 SI_PAD_SIZE32);
1058 else switch(s->si_code >> 16) {
1059 case __SI_CHLD >> 16:
1060 err |= __put_user(s->si_pid, &d->si_pid);
1061 err |= __put_user(s->si_uid, &d->si_uid);
1062 err |= __put_user(s->si_utime, &d->si_utime);
1063 err |= __put_user(s->si_stime, &d->si_stime);
1064 err |= __put_user(s->si_status, &d->si_status);
1065 break;
1066 case __SI_FAULT >> 16:
1067 err |= __put_user((unsigned int)(unsigned long)s->si_addr,
1068 &d->si_addr);
1069 break;
1070 case __SI_POLL >> 16:
1071 err |= __put_user(s->si_band, &d->si_band);
1072 err |= __put_user(s->si_fd, &d->si_fd);
1073 break;
1074 case __SI_TIMER >> 16:
1075 err |= __put_user(s->si_tid, &d->si_tid);
1076 err |= __put_user(s->si_overrun, &d->si_overrun);
1077 err |= __put_user(s->si_int, &d->si_int);
1078 break;
1079 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
1080 case __SI_MESGQ >> 16:
1081 err |= __put_user(s->si_int, &d->si_int);
1082 /* fallthrough */
1083 case __SI_KILL >> 16:
1084 default:
1085 err |= __put_user(s->si_pid, &d->si_pid);
1086 err |= __put_user(s->si_uid, &d->si_uid);
1087 break;
1088 }
1089 return err;
1090}
1091
1092#define copy_siginfo_to_user copy_siginfo_to_user32
1093
Roland McGrath9c0c44d2008-04-20 08:19:24 +10001094int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
1095{
1096 memset(to, 0, sizeof *to);
1097
1098 if (copy_from_user(to, from, 3*sizeof(int)) ||
1099 copy_from_user(to->_sifields._pad,
1100 from->_sifields._pad, SI_PAD_SIZE32))
1101 return -EFAULT;
1102
1103 return 0;
1104}
1105
Stephen Rothwell81e70092005-10-18 11:17:58 +10001106/*
1107 * Note: it is necessary to treat pid and sig as unsigned ints, with the
1108 * corresponding cast to a signed int to insure that the proper conversion
1109 * (sign extension) between the register representation of a signed int
1110 * (msr in 32-bit mode) and the register representation of a signed int
1111 * (msr in 64-bit mode) is performed.
1112 */
Stephen Rothwellb09a4912005-10-18 14:51:57 +10001113long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001114{
1115 siginfo_t info;
1116 int ret;
1117 mm_segment_t old_fs = get_fs();
1118
Roland McGrath9c0c44d2008-04-20 08:19:24 +10001119 ret = copy_siginfo_from_user32(&info, uinfo);
1120 if (unlikely(ret))
1121 return ret;
1122
Stephen Rothwell81e70092005-10-18 11:17:58 +10001123 set_fs (KERNEL_DS);
1124 /* The __user pointer cast is valid becasuse of the set_fs() */
1125 ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
1126 set_fs (old_fs);
1127 return ret;
1128}
1129/*
1130 * Start Alternate signal stack support
1131 *
1132 * System Calls
Stephen Rothwellb09a4912005-10-18 14:51:57 +10001133 * sigaltatck compat_sys_sigaltstack
Stephen Rothwell81e70092005-10-18 11:17:58 +10001134 */
1135
Stephen Rothwellb09a4912005-10-18 14:51:57 +10001136int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
Stephen Rothwell81e70092005-10-18 11:17:58 +10001137 int r6, int r7, int r8, struct pt_regs *regs)
1138{
Al Viro29e646d2006-02-01 05:28:09 -05001139 stack_32_t __user * newstack = compat_ptr(__new);
1140 stack_32_t __user * oldstack = compat_ptr(__old);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001141 stack_t uss, uoss;
1142 int ret;
1143 mm_segment_t old_fs;
1144 unsigned long sp;
1145 compat_uptr_t ss_sp;
1146
1147 /*
1148 * set sp to the user stack on entry to the system call
1149 * the system call router sets R9 to the saved registers
1150 */
1151 sp = regs->gpr[1];
1152
1153 /* Put new stack info in local 64 bit stack struct */
1154 if (newstack) {
1155 if (get_user(ss_sp, &newstack->ss_sp) ||
1156 __get_user(uss.ss_flags, &newstack->ss_flags) ||
1157 __get_user(uss.ss_size, &newstack->ss_size))
1158 return -EFAULT;
1159 uss.ss_sp = compat_ptr(ss_sp);
1160 }
1161
1162 old_fs = get_fs();
1163 set_fs(KERNEL_DS);
1164 /* The __user pointer casts are valid because of the set_fs() */
1165 ret = do_sigaltstack(
1166 newstack ? (stack_t __user *) &uss : NULL,
1167 oldstack ? (stack_t __user *) &uoss : NULL,
1168 sp);
1169 set_fs(old_fs);
1170 /* Copy the stack information to the user output buffer */
1171 if (!ret && oldstack &&
Al Viro29e646d2006-02-01 05:28:09 -05001172 (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
Stephen Rothwell81e70092005-10-18 11:17:58 +10001173 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
1174 __put_user(uoss.ss_size, &oldstack->ss_size)))
1175 return -EFAULT;
1176 return ret;
1177}
1178#endif /* CONFIG_PPC64 */
1179
Stephen Rothwell81e70092005-10-18 11:17:58 +10001180/*
1181 * Set up a signal frame for a "real-time" signal handler
1182 * (one which gets siginfo).
1183 */
Christoph Hellwigf478f542007-06-04 15:15:52 +10001184int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
Stephen Rothwell81e70092005-10-18 11:17:58 +10001185 siginfo_t *info, sigset_t *oldset,
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001186 struct pt_regs *regs)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001187{
1188 struct rt_sigframe __user *rt_sf;
1189 struct mcontext __user *frame;
Olof Johanssond0c3d532007-10-12 10:20:07 +10001190 void __user *addr;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001191 unsigned long newsp = 0;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001192 int sigret;
1193 unsigned long tramp;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001194
1195 /* Set up Signal Frame */
1196 /* Put a Real Time Context onto stack */
Josh Boyerefbda862009-03-25 06:23:59 +00001197 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001198 addr = rt_sf;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001199 if (unlikely(rt_sf == NULL))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001200 goto badframe;
1201
1202 /* Put the siginfo & fill in most of the ucontext */
1203 if (copy_siginfo_to_user(&rt_sf->info, info)
1204 || __put_user(0, &rt_sf->uc.uc_flags)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001205 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
1206 || __put_user(sas_ss_flags(regs->gpr[1]),
1207 &rt_sf->uc.uc_stack.ss_flags)
1208 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
1209 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
1210 &rt_sf->uc.uc_regs)
1211 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
1212 goto badframe;
1213
1214 /* Save user registers on the stack */
1215 frame = &rt_sf->uc.uc_mcontext;
Olof Johanssond0c3d532007-10-12 10:20:07 +10001216 addr = frame;
Benjamin Herrenschmidta5bba932006-05-30 13:51:37 +10001217 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001218 sigret = 0;
1219 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +11001220 } else {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001221 sigret = __NR_rt_sigreturn;
1222 tramp = (unsigned long) frame->tramp;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001223 }
Paul Mackerrascc657f52005-11-14 21:55:15 +11001224
Michael Neuling2b0a5762013-02-13 16:21:41 +00001225#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1226 if (MSR_TM_ACTIVE(regs->msr)) {
1227 if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
1228 &rt_sf->uc_transact.uc_mcontext, sigret))
1229 goto badframe;
1230 }
1231 else
1232#endif
1233 if (save_user_regs(regs, frame, sigret, 1))
1234 goto badframe;
1235 regs->link = tramp;
1236
1237#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1238 if (MSR_TM_ACTIVE(regs->msr)) {
1239 if (__put_user((unsigned long)&rt_sf->uc_transact,
1240 &rt_sf->uc.uc_link)
1241 || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
1242 &rt_sf->uc_transact.uc_regs))
1243 goto badframe;
1244 }
1245 else
1246#endif
1247 if (__put_user(0, &rt_sf->uc.uc_link))
1248 goto badframe;
1249
Paul Mackerrascc657f52005-11-14 21:55:15 +11001250 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
1251
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001252 /* create a stack frame for the caller of the handler */
1253 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001254 addr = (void __user *)regs->gpr[1];
Paul Mackerrase2b55302005-10-22 14:46:33 +10001255 if (put_user(regs->gpr[1], (u32 __user *)newsp))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001256 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001257
1258 /* Fill registers for signal handler */
Stephen Rothwell81e70092005-10-18 11:17:58 +10001259 regs->gpr[1] = newsp;
1260 regs->gpr[3] = sig;
1261 regs->gpr[4] = (unsigned long) &rt_sf->info;
1262 regs->gpr[5] = (unsigned long) &rt_sf->uc;
1263 regs->gpr[6] = (unsigned long) rt_sf;
1264 regs->nip = (unsigned long) ka->sa.sa_handler;
Paul Mackerrasfab5db92006-06-07 16:14:40 +10001265 /* enter the signal handler in big-endian mode */
1266 regs->msr &= ~MSR_LE;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001267#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1268 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
1269 * just indicates to userland that we were doing a transaction, but we
1270 * don't want to return in transactional state:
1271 */
1272 regs->msr &= ~MSR_TS_MASK;
1273#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001274 return 1;
1275
1276badframe:
1277#ifdef DEBUG_SIG
1278 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
1279 regs, frame, newsp);
1280#endif
Christian Dietrich76462232011-06-04 05:36:54 +00001281 if (show_unhandled_signals)
1282 printk_ratelimited(KERN_INFO
1283 "%s[%d]: bad frame in handle_rt_signal32: "
1284 "%p nip %08lx lr %08lx\n",
1285 current->comm, current->pid,
1286 addr, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001287
Stephen Rothwell81e70092005-10-18 11:17:58 +10001288 force_sigsegv(sig, current);
1289 return 0;
1290}
1291
1292static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1293{
1294 sigset_t set;
1295 struct mcontext __user *mcp;
1296
1297 if (get_sigset_t(&set, &ucp->uc_sigmask))
1298 return -EFAULT;
1299#ifdef CONFIG_PPC64
1300 {
1301 u32 cmcp;
1302
1303 if (__get_user(cmcp, &ucp->uc_regs))
1304 return -EFAULT;
1305 mcp = (struct mcontext __user *)(u64)cmcp;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001306 /* no need to check access_ok(mcp), since mcp < 4GB */
Stephen Rothwell81e70092005-10-18 11:17:58 +10001307 }
1308#else
1309 if (__get_user(mcp, &ucp->uc_regs))
1310 return -EFAULT;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001311 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1312 return -EFAULT;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001313#endif
Al Viro17440f12012-04-27 14:09:19 -04001314 set_current_blocked(&set);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001315 if (restore_user_regs(regs, mcp, sig))
1316 return -EFAULT;
1317
1318 return 0;
1319}
1320
Michael Neuling2b0a5762013-02-13 16:21:41 +00001321#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1322static int do_setcontext_tm(struct ucontext __user *ucp,
1323 struct ucontext __user *tm_ucp,
1324 struct pt_regs *regs)
1325{
1326 sigset_t set;
1327 struct mcontext __user *mcp;
1328 struct mcontext __user *tm_mcp;
1329 u32 cmcp;
1330 u32 tm_cmcp;
1331
1332 if (get_sigset_t(&set, &ucp->uc_sigmask))
1333 return -EFAULT;
1334
1335 if (__get_user(cmcp, &ucp->uc_regs) ||
1336 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1337 return -EFAULT;
1338 mcp = (struct mcontext __user *)(u64)cmcp;
1339 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1340 /* no need to check access_ok(mcp), since mcp < 4GB */
1341
1342 set_current_blocked(&set);
1343 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1344 return -EFAULT;
1345
1346 return 0;
1347}
1348#endif
1349
Stephen Rothwell81e70092005-10-18 11:17:58 +10001350long sys_swapcontext(struct ucontext __user *old_ctx,
Paul Mackerras1bd79332006-03-08 13:24:22 +11001351 struct ucontext __user *new_ctx,
1352 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001353{
1354 unsigned char tmp;
Michael Neuling16c29d12008-10-23 00:42:36 +00001355 int ctx_has_vsx_region = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001356
Michael Neulingc1cb2992008-07-08 18:43:41 +10001357#ifdef CONFIG_PPC64
1358 unsigned long new_msr = 0;
1359
Andreas Schwab77eb50a2008-11-06 00:49:00 +00001360 if (new_ctx) {
1361 struct mcontext __user *mcp;
1362 u32 cmcp;
1363
1364 /*
1365 * Get pointer to the real mcontext. No need for
1366 * access_ok since we are dealing with compat
1367 * pointers.
1368 */
1369 if (__get_user(cmcp, &new_ctx->uc_regs))
1370 return -EFAULT;
1371 mcp = (struct mcontext __user *)(u64)cmcp;
1372 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1373 return -EFAULT;
1374 }
Michael Neulingc1cb2992008-07-08 18:43:41 +10001375 /*
1376 * Check that the context is not smaller than the original
1377 * size (with VMX but without VSX)
1378 */
1379 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1380 return -EINVAL;
1381 /*
1382 * If the new context state sets the MSR VSX bits but
1383 * it doesn't provide VSX state.
1384 */
1385 if ((ctx_size < sizeof(struct ucontext)) &&
1386 (new_msr & MSR_VSX))
1387 return -EINVAL;
Michael Neuling16c29d12008-10-23 00:42:36 +00001388 /* Does the context have enough room to store VSX data? */
1389 if (ctx_size >= sizeof(struct ucontext))
1390 ctx_has_vsx_region = 1;
Michael Neulingc1cb2992008-07-08 18:43:41 +10001391#else
Stephen Rothwell81e70092005-10-18 11:17:58 +10001392 /* Context size is for future use. Right now, we only make sure
1393 * we are passed something we understand
1394 */
1395 if (ctx_size < sizeof(struct ucontext))
1396 return -EINVAL;
Michael Neulingc1cb2992008-07-08 18:43:41 +10001397#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001398 if (old_ctx != NULL) {
Paul Mackerras1c9bb1a2006-12-20 13:57:06 +11001399 struct mcontext __user *mctx;
1400
1401 /*
1402 * old_ctx might not be 16-byte aligned, in which
1403 * case old_ctx->uc_mcontext won't be either.
1404 * Because we have the old_ctx->uc_pad2 field
1405 * before old_ctx->uc_mcontext, we need to round down
1406 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1407 */
1408 mctx = (struct mcontext __user *)
1409 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
Michael Neuling16c29d12008-10-23 00:42:36 +00001410 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1411 || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001412 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
Paul Mackerras1c9bb1a2006-12-20 13:57:06 +11001413 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001414 return -EFAULT;
1415 }
1416 if (new_ctx == NULL)
1417 return 0;
Michael Neuling16c29d12008-10-23 00:42:36 +00001418 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001419 || __get_user(tmp, (u8 __user *) new_ctx)
Michael Neuling16c29d12008-10-23 00:42:36 +00001420 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001421 return -EFAULT;
1422
1423 /*
1424 * If we get a fault copying the context into the kernel's
1425 * image of the user's registers, we can't just return -EFAULT
1426 * because the user's registers will be corrupted. For instance
1427 * the NIP value may have been updated but not some of the
1428 * other registers. Given that we have done the access_ok
1429 * and successfully read the first and last bytes of the region
1430 * above, this should only happen in an out-of-memory situation
1431 * or if another thread unmaps the region containing the context.
1432 * We kill the task with a SIGSEGV in this situation.
1433 */
1434 if (do_setcontext(new_ctx, regs, 0))
1435 do_exit(SIGSEGV);
David Woodhouse401d1f02005-11-15 18:52:18 +00001436
1437 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001438 return 0;
1439}
1440
1441long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1442 struct pt_regs *regs)
1443{
1444 struct rt_sigframe __user *rt_sf;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001445#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1446 struct ucontext __user *uc_transact;
1447 unsigned long msr_hi;
1448 unsigned long tmp;
1449 int tm_restore = 0;
1450#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001451 /* Always make any pending restarted system calls return -EINTR */
1452 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1453
1454 rt_sf = (struct rt_sigframe __user *)
1455 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1456 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1457 goto bad;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001458#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1459 if (__get_user(tmp, &rt_sf->uc.uc_link))
1460 goto bad;
1461 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1462 if (uc_transact) {
1463 u32 cmcp;
1464 struct mcontext __user *mcp;
1465
1466 if (__get_user(cmcp, &uc_transact->uc_regs))
1467 return -EFAULT;
1468 mcp = (struct mcontext __user *)(u64)cmcp;
1469 /* The top 32 bits of the MSR are stashed in the transactional
1470 * ucontext. */
1471 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1472 goto bad;
1473
1474 if (MSR_TM_SUSPENDED(msr_hi<<32)) {
1475 /* We only recheckpoint on return if we're
1476 * transaction.
1477 */
1478 tm_restore = 1;
1479 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1480 goto bad;
1481 }
1482 }
1483 if (!tm_restore)
1484 /* Fall through, for non-TM restore */
1485#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001486 if (do_setcontext(&rt_sf->uc, regs, 1))
1487 goto bad;
1488
1489 /*
1490 * It's not clear whether or why it is desirable to save the
1491 * sigaltstack setting on signal delivery and restore it on
1492 * signal return. But other architectures do this and we have
1493 * always done it up until now so it is probably better not to
1494 * change it. -- paulus
1495 */
1496#ifdef CONFIG_PPC64
1497 /*
Stephen Rothwellb09a4912005-10-18 14:51:57 +10001498 * We use the compat_sys_ version that does the 32/64 bits conversion
Stephen Rothwell81e70092005-10-18 11:17:58 +10001499 * and takes userland pointer directly. What about error checking ?
1500 * nobody does any...
1501 */
Stephen Rothwellb09a4912005-10-18 14:51:57 +10001502 compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001503#else
1504 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001505#endif
David Woodhouse401d1f02005-11-15 18:52:18 +00001506 set_thread_flag(TIF_RESTOREALL);
1507 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001508
1509 bad:
Christian Dietrich76462232011-06-04 05:36:54 +00001510 if (show_unhandled_signals)
1511 printk_ratelimited(KERN_INFO
1512 "%s[%d]: bad frame in sys_rt_sigreturn: "
1513 "%p nip %08lx lr %08lx\n",
1514 current->comm, current->pid,
1515 rt_sf, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001516
Stephen Rothwell81e70092005-10-18 11:17:58 +10001517 force_sig(SIGSEGV, current);
1518 return 0;
1519}
1520
1521#ifdef CONFIG_PPC32
1522int sys_debug_setcontext(struct ucontext __user *ctx,
1523 int ndbg, struct sig_dbg_op __user *dbg,
1524 int r6, int r7, int r8,
1525 struct pt_regs *regs)
1526{
1527 struct sig_dbg_op op;
1528 int i;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001529 unsigned char tmp;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001530 unsigned long new_msr = regs->msr;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001531#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001532 unsigned long new_dbcr0 = current->thread.dbcr0;
1533#endif
1534
1535 for (i=0; i<ndbg; i++) {
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001536 if (copy_from_user(&op, dbg + i, sizeof(op)))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001537 return -EFAULT;
1538 switch (op.dbg_type) {
1539 case SIG_DBG_SINGLE_STEPPING:
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001540#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001541 if (op.dbg_value) {
1542 new_msr |= MSR_DE;
1543 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1544 } else {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00001545 new_dbcr0 &= ~DBCR0_IC;
1546 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1547 current->thread.dbcr1)) {
1548 new_msr &= ~MSR_DE;
1549 new_dbcr0 &= ~DBCR0_IDM;
1550 }
Stephen Rothwell81e70092005-10-18 11:17:58 +10001551 }
1552#else
1553 if (op.dbg_value)
1554 new_msr |= MSR_SE;
1555 else
1556 new_msr &= ~MSR_SE;
1557#endif
1558 break;
1559 case SIG_DBG_BRANCH_TRACING:
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001560#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001561 return -EINVAL;
1562#else
1563 if (op.dbg_value)
1564 new_msr |= MSR_BE;
1565 else
1566 new_msr &= ~MSR_BE;
1567#endif
1568 break;
1569
1570 default:
1571 return -EINVAL;
1572 }
1573 }
1574
1575 /* We wait until here to actually install the values in the
1576 registers so if we fail in the above loop, it will not
1577 affect the contents of these registers. After this point,
1578 failure is a problem, anyway, and it's very unlikely unless
1579 the user is really doing something wrong. */
1580 regs->msr = new_msr;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001581#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001582 current->thread.dbcr0 = new_dbcr0;
1583#endif
1584
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001585 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1586 || __get_user(tmp, (u8 __user *) ctx)
1587 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1588 return -EFAULT;
1589
Stephen Rothwell81e70092005-10-18 11:17:58 +10001590 /*
1591 * If we get a fault copying the context into the kernel's
1592 * image of the user's registers, we can't just return -EFAULT
1593 * because the user's registers will be corrupted. For instance
1594 * the NIP value may have been updated but not some of the
1595 * other registers. Given that we have done the access_ok
1596 * and successfully read the first and last bytes of the region
1597 * above, this should only happen in an out-of-memory situation
1598 * or if another thread unmaps the region containing the context.
1599 * We kill the task with a SIGSEGV in this situation.
1600 */
1601 if (do_setcontext(ctx, regs, 1)) {
Christian Dietrich76462232011-06-04 05:36:54 +00001602 if (show_unhandled_signals)
1603 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1604 "sys_debug_setcontext: %p nip %08lx "
1605 "lr %08lx\n",
1606 current->comm, current->pid,
1607 ctx, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001608
Stephen Rothwell81e70092005-10-18 11:17:58 +10001609 force_sig(SIGSEGV, current);
1610 goto out;
1611 }
1612
1613 /*
1614 * It's not clear whether or why it is desirable to save the
1615 * sigaltstack setting on signal delivery and restore it on
1616 * signal return. But other architectures do this and we have
1617 * always done it up until now so it is probably better not to
1618 * change it. -- paulus
1619 */
1620 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1621
David Woodhouse401d1f02005-11-15 18:52:18 +00001622 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001623 out:
1624 return 0;
1625}
1626#endif
1627
1628/*
1629 * OK, we're invoking a handler
1630 */
Christoph Hellwigf478f542007-06-04 15:15:52 +10001631int handle_signal32(unsigned long sig, struct k_sigaction *ka,
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001632 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001633{
1634 struct sigcontext __user *sc;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001635 struct sigframe __user *frame;
1636 unsigned long newsp = 0;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001637 int sigret;
1638 unsigned long tramp;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001639
1640 /* Set up Signal Frame */
Josh Boyerefbda862009-03-25 06:23:59 +00001641 frame = get_sigframe(ka, regs, sizeof(*frame), 1);
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001642 if (unlikely(frame == NULL))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001643 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001644 sc = (struct sigcontext __user *) &frame->sctx;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001645
1646#if _NSIG != 64
1647#error "Please adjust handle_signal()"
1648#endif
1649 if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1650 || __put_user(oldset->sig[0], &sc->oldmask)
1651#ifdef CONFIG_PPC64
1652 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1653#else
1654 || __put_user(oldset->sig[1], &sc->_unused[3])
1655#endif
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001656 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001657 || __put_user(sig, &sc->signal))
1658 goto badframe;
1659
Benjamin Herrenschmidta5bba932006-05-30 13:51:37 +10001660 if (vdso32_sigtramp && current->mm->context.vdso_base) {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001661 sigret = 0;
1662 tramp = current->mm->context.vdso_base + vdso32_sigtramp;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +11001663 } else {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001664 sigret = __NR_sigreturn;
1665 tramp = (unsigned long) frame->mctx.tramp;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001666 }
1667
Michael Neuling2b0a5762013-02-13 16:21:41 +00001668#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1669 if (MSR_TM_ACTIVE(regs->msr)) {
1670 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1671 sigret))
1672 goto badframe;
1673 }
1674 else
1675#endif
1676 if (save_user_regs(regs, &frame->mctx, sigret, 1))
1677 goto badframe;
1678
1679 regs->link = tramp;
1680
Paul Mackerrascc657f52005-11-14 21:55:15 +11001681 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
1682
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001683 /* create a stack frame for the caller of the handler */
1684 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001685 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1686 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001687
Stephen Rothwell81e70092005-10-18 11:17:58 +10001688 regs->gpr[1] = newsp;
1689 regs->gpr[3] = sig;
1690 regs->gpr[4] = (unsigned long) sc;
1691 regs->nip = (unsigned long) ka->sa.sa_handler;
Paul Mackerrasfab5db92006-06-07 16:14:40 +10001692 /* enter the signal handler in big-endian mode */
1693 regs->msr &= ~MSR_LE;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001694#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1695 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
1696 * just indicates to userland that we were doing a transaction, but we
1697 * don't want to return in transactional state:
1698 */
1699 regs->msr &= ~MSR_TS_MASK;
1700#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001701 return 1;
1702
1703badframe:
1704#ifdef DEBUG_SIG
1705 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1706 regs, frame, newsp);
1707#endif
Christian Dietrich76462232011-06-04 05:36:54 +00001708 if (show_unhandled_signals)
1709 printk_ratelimited(KERN_INFO
1710 "%s[%d]: bad frame in handle_signal32: "
1711 "%p nip %08lx lr %08lx\n",
1712 current->comm, current->pid,
1713 frame, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001714
Stephen Rothwell81e70092005-10-18 11:17:58 +10001715 force_sigsegv(sig, current);
1716 return 0;
1717}
1718
1719/*
1720 * Do a signal return; undo the signal stack.
1721 */
1722long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1723 struct pt_regs *regs)
1724{
1725 struct sigcontext __user *sc;
1726 struct sigcontext sigctx;
1727 struct mcontext __user *sr;
Olof Johanssond0c3d532007-10-12 10:20:07 +10001728 void __user *addr;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001729 sigset_t set;
1730
1731 /* Always make any pending restarted system calls return -EINTR */
1732 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1733
1734 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001735 addr = sc;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001736 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1737 goto badframe;
1738
1739#ifdef CONFIG_PPC64
1740 /*
1741 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1742 * unused part of the signal stackframe
1743 */
1744 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1745#else
1746 set.sig[0] = sigctx.oldmask;
1747 set.sig[1] = sigctx._unused[3];
1748#endif
Al Viro17440f12012-04-27 14:09:19 -04001749 set_current_blocked(&set);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001750
1751 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001752 addr = sr;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001753 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1754 || restore_user_regs(regs, sr, 1))
1755 goto badframe;
1756
David Woodhouse401d1f02005-11-15 18:52:18 +00001757 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001758 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001759
1760badframe:
Christian Dietrich76462232011-06-04 05:36:54 +00001761 if (show_unhandled_signals)
1762 printk_ratelimited(KERN_INFO
1763 "%s[%d]: bad frame in sys_sigreturn: "
1764 "%p nip %08lx lr %08lx\n",
1765 current->comm, current->pid,
1766 addr, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001767
Stephen Rothwell81e70092005-10-18 11:17:58 +10001768 force_sig(SIGSEGV, current);
1769 return 0;
1770}