blob: 804e323c139d000fbc280742d9b1a6729271110e [file] [log] [blame]
Stephen Rothwell81e70092005-10-18 11:17:58 +10001/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
Stephen Rothwell81e70092005-10-18 11:17:58 +100020#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100023#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
Lucas Woods05ead012007-12-13 15:56:06 -080027#include <linux/ptrace.h>
Christian Dietrich76462232011-06-04 05:36:54 +000028#include <linux/ratelimit.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100029#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
31#include <linux/compat.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100032#else
33#include <linux/wait.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100034#include <linux/unistd.h>
35#include <linux/stddef.h>
36#include <linux/tty.h>
37#include <linux/binfmts.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100038#endif
39
40#include <asm/uaccess.h>
41#include <asm/cacheflush.h>
Arnd Bergmanna7f31842006-03-23 00:00:08 +010042#include <asm/syscalls.h>
David Gibsonc5ff7002005-11-09 11:21:07 +110043#include <asm/sigcontext.h>
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110044#include <asm/vdso.h>
David Howellsae3a1972012-03-28 18:30:02 +010045#include <asm/switch_to.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100046#ifdef CONFIG_PPC64
Stephen Rothwell879168e2005-11-03 15:32:07 +110047#include "ppc32.h"
Stephen Rothwell81e70092005-10-18 11:17:58 +100048#include <asm/unistd.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100049#else
50#include <asm/ucontext.h>
51#include <asm/pgtable.h>
52#endif
53
Benjamin Herrenschmidt22e38f22007-06-04 15:15:49 +100054#include "signal.h"
55
Stephen Rothwell81e70092005-10-18 11:17:58 +100056#undef DEBUG_SIG
57
Stephen Rothwell81e70092005-10-18 11:17:58 +100058#ifdef CONFIG_PPC64
Stephen Rothwellb09a4912005-10-18 14:51:57 +100059#define sys_sigsuspend compat_sys_sigsuspend
60#define sys_rt_sigsuspend compat_sys_rt_sigsuspend
61#define sys_rt_sigreturn compat_sys_rt_sigreturn
62#define sys_sigaction compat_sys_sigaction
63#define sys_swapcontext compat_sys_swapcontext
64#define sys_sigreturn compat_sys_sigreturn
Stephen Rothwell81e70092005-10-18 11:17:58 +100065
66#define old_sigaction old_sigaction32
67#define sigcontext sigcontext32
68#define mcontext mcontext32
69#define ucontext ucontext32
70
71/*
Michael Neulingc1cb2992008-07-08 18:43:41 +100072 * Userspace code may pass a ucontext which doesn't include VSX added
73 * at the end. We need to check for this case.
74 */
75#define UCONTEXTSIZEWITHOUTVSX \
76 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
77
78/*
Stephen Rothwell81e70092005-10-18 11:17:58 +100079 * Returning 0 means we return to userspace via
80 * ret_from_except and thus restore all user
81 * registers from *regs. This is what we need
82 * to do when a signal has been delivered.
83 */
Stephen Rothwell81e70092005-10-18 11:17:58 +100084
85#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
86#undef __SIGNAL_FRAMESIZE
87#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
88#undef ELF_NVRREG
89#define ELF_NVRREG ELF_NVRREG32
90
91/*
92 * Functions for flipping sigsets (thanks to brain dead generic
93 * implementation that makes things simple for little endian only)
94 */
95static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
96{
97 compat_sigset_t cset;
98
99 switch (_NSIG_WORDS) {
Will Deacona313f4c2011-11-08 04:51:19 +0000100 case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000101 cset.sig[7] = set->sig[3] >> 32;
102 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
103 cset.sig[5] = set->sig[2] >> 32;
104 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
105 cset.sig[3] = set->sig[1] >> 32;
106 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
107 cset.sig[1] = set->sig[0] >> 32;
108 }
109 return copy_to_user(uset, &cset, sizeof(*uset));
110}
111
Paul Mackerras9b7cf8b2005-10-19 23:13:04 +1000112static inline int get_sigset_t(sigset_t *set,
113 const compat_sigset_t __user *uset)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000114{
115 compat_sigset_t s32;
116
117 if (copy_from_user(&s32, uset, sizeof(*uset)))
118 return -EFAULT;
119
120 /*
121 * Swap the 2 words of the 64-bit sigset_t (they are stored
122 * in the "wrong" endian in 32-bit user storage).
123 */
124 switch (_NSIG_WORDS) {
125 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
126 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
127 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
128 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
129 }
130 return 0;
131}
132
133static inline int get_old_sigaction(struct k_sigaction *new_ka,
134 struct old_sigaction __user *act)
135{
136 compat_old_sigset_t mask;
137 compat_uptr_t handler, restorer;
138
139 if (get_user(handler, &act->sa_handler) ||
140 __get_user(restorer, &act->sa_restorer) ||
141 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
142 __get_user(mask, &act->sa_mask))
143 return -EFAULT;
144 new_ka->sa.sa_handler = compat_ptr(handler);
145 new_ka->sa.sa_restorer = compat_ptr(restorer);
146 siginitset(&new_ka->sa.sa_mask, mask);
147 return 0;
148}
149
Al Viro29e646d2006-02-01 05:28:09 -0500150#define to_user_ptr(p) ptr_to_compat(p)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000151#define from_user_ptr(p) compat_ptr(p)
152
153static inline int save_general_regs(struct pt_regs *regs,
154 struct mcontext __user *frame)
155{
156 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
157 int i;
158
Paul Mackerras1bd79332006-03-08 13:24:22 +1100159 WARN_ON(!FULL_REGS(regs));
David Woodhouse401d1f02005-11-15 18:52:18 +0000160
161 for (i = 0; i <= PT_RESULT; i ++) {
162 if (i == 14 && !FULL_REGS(regs))
163 i = 32;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000164 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
165 return -EFAULT;
David Woodhouse401d1f02005-11-15 18:52:18 +0000166 }
Stephen Rothwell81e70092005-10-18 11:17:58 +1000167 return 0;
168}
169
170static inline int restore_general_regs(struct pt_regs *regs,
171 struct mcontext __user *sr)
172{
173 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
174 int i;
175
176 for (i = 0; i <= PT_RESULT; i++) {
177 if ((i == PT_MSR) || (i == PT_SOFTE))
178 continue;
179 if (__get_user(gregs[i], &sr->mc_gregs[i]))
180 return -EFAULT;
181 }
182 return 0;
183}
184
185#else /* CONFIG_PPC64 */
186
Stephen Rothwell81e70092005-10-18 11:17:58 +1000187#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
188
189static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
190{
191 return copy_to_user(uset, set, sizeof(*uset));
192}
193
Paul Mackerras9b7cf8b2005-10-19 23:13:04 +1000194static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000195{
196 return copy_from_user(set, uset, sizeof(*uset));
197}
198
199static inline int get_old_sigaction(struct k_sigaction *new_ka,
200 struct old_sigaction __user *act)
201{
202 old_sigset_t mask;
203
204 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
205 __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
Al Viro43f16812012-04-22 17:01:49 -0400206 __get_user(new_ka->sa.sa_restorer, &act->sa_restorer) ||
207 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
208 __get_user(mask, &act->sa_mask))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000209 return -EFAULT;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000210 siginitset(&new_ka->sa.sa_mask, mask);
211 return 0;
212}
213
Al Viro29e646d2006-02-01 05:28:09 -0500214#define to_user_ptr(p) ((unsigned long)(p))
215#define from_user_ptr(p) ((void __user *)(p))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000216
217static inline int save_general_regs(struct pt_regs *regs,
218 struct mcontext __user *frame)
219{
Paul Mackerras1bd79332006-03-08 13:24:22 +1100220 WARN_ON(!FULL_REGS(regs));
Stephen Rothwell81e70092005-10-18 11:17:58 +1000221 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
222}
223
224static inline int restore_general_regs(struct pt_regs *regs,
225 struct mcontext __user *sr)
226{
227 /* copy up to but not including MSR */
228 if (__copy_from_user(regs, &sr->mc_gregs,
229 PT_MSR * sizeof(elf_greg_t)))
230 return -EFAULT;
231 /* copy from orig_r3 (the word after the MSR) up to the end */
232 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
233 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
234 return -EFAULT;
235 return 0;
236}
237
238#endif /* CONFIG_PPC64 */
239
Stephen Rothwell81e70092005-10-18 11:17:58 +1000240/*
241 * Atomically swap in the new signal mask, and wait for a signal.
242 */
David Woodhouse150256d2006-01-18 17:43:57 -0800243long sys_sigsuspend(old_sigset_t mask)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000244{
Matt Fleminga2007ce2012-02-14 01:40:59 +0000245 sigset_t blocked;
Matt Fleminga2007ce2012-02-14 01:40:59 +0000246 siginitset(&blocked, mask);
Al Viro68f3f162012-05-21 21:42:32 -0400247 return sigsuspend(&blocked);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000248}
249
Stephen Rothwell81e70092005-10-18 11:17:58 +1000250long sys_sigaction(int sig, struct old_sigaction __user *act,
251 struct old_sigaction __user *oact)
252{
253 struct k_sigaction new_ka, old_ka;
254 int ret;
255
256#ifdef CONFIG_PPC64
257 if (sig < 0)
258 sig = -sig;
259#endif
260
261 if (act) {
262 if (get_old_sigaction(&new_ka, act))
263 return -EFAULT;
264 }
265
266 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
267 if (!ret && oact) {
268 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
269 __put_user(to_user_ptr(old_ka.sa.sa_handler),
270 &oact->sa_handler) ||
271 __put_user(to_user_ptr(old_ka.sa.sa_restorer),
272 &oact->sa_restorer) ||
273 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
274 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
275 return -EFAULT;
276 }
277
278 return ret;
279}
280
281/*
282 * When we have signals to deliver, we set up on the
283 * user stack, going down from the original stack pointer:
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000284 * an ABI gap of 56 words
285 * an mcontext struct
Stephen Rothwell81e70092005-10-18 11:17:58 +1000286 * a sigcontext struct
287 * a gap of __SIGNAL_FRAMESIZE bytes
288 *
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000289 * Each of these things must be a multiple of 16 bytes in size. The following
290 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
Stephen Rothwell81e70092005-10-18 11:17:58 +1000291 *
292 */
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000293struct sigframe {
294 struct sigcontext sctx; /* the sigcontext */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000295 struct mcontext mctx; /* all the register values */
296 /*
297 * Programs using the rs6000/xcoff abi can save up to 19 gp
298 * regs and 18 fp regs below sp before decrementing it.
299 */
300 int abigap[56];
301};
302
303/* We use the mc_pad field for the signal return trampoline. */
304#define tramp mc_pad
305
306/*
307 * When we have rt signals to deliver, we set up on the
308 * user stack, going down from the original stack pointer:
309 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
310 * a gap of __SIGNAL_FRAMESIZE+16 bytes
311 * (the +16 is to get the siginfo and ucontext in the same
312 * positions as in older kernels).
313 *
314 * Each of these things must be a multiple of 16 bytes in size.
315 *
316 */
317struct rt_sigframe {
318#ifdef CONFIG_PPC64
319 compat_siginfo_t info;
320#else
321 struct siginfo info;
322#endif
323 struct ucontext uc;
324 /*
325 * Programs using the rs6000/xcoff abi can save up to 19 gp
326 * regs and 18 fp regs below sp before decrementing it.
327 */
328 int abigap[56];
329};
330
Michael Neuling6a274c02008-07-02 14:06:37 +1000331#ifdef CONFIG_VSX
332unsigned long copy_fpr_to_user(void __user *to,
333 struct task_struct *task)
334{
335 double buf[ELF_NFPREG];
336 int i;
337
338 /* save FPR copy to local buffer then write to the thread_struct */
339 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
340 buf[i] = task->thread.TS_FPR(i);
341 memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
342 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
343}
344
345unsigned long copy_fpr_from_user(struct task_struct *task,
346 void __user *from)
347{
348 double buf[ELF_NFPREG];
349 int i;
350
351 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
352 return 1;
353 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
354 task->thread.TS_FPR(i) = buf[i];
355 memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
356
357 return 0;
358}
359
360unsigned long copy_vsx_to_user(void __user *to,
361 struct task_struct *task)
362{
363 double buf[ELF_NVSRHALFREG];
364 int i;
365
366 /* save FPR copy to local buffer then write to the thread_struct */
367 for (i = 0; i < ELF_NVSRHALFREG; i++)
368 buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
369 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
370}
371
372unsigned long copy_vsx_from_user(struct task_struct *task,
373 void __user *from)
374{
375 double buf[ELF_NVSRHALFREG];
376 int i;
377
378 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
379 return 1;
380 for (i = 0; i < ELF_NVSRHALFREG ; i++)
381 task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
382 return 0;
383}
384#else
385inline unsigned long copy_fpr_to_user(void __user *to,
386 struct task_struct *task)
387{
388 return __copy_to_user(to, task->thread.fpr,
389 ELF_NFPREG * sizeof(double));
390}
391
392inline unsigned long copy_fpr_from_user(struct task_struct *task,
393 void __user *from)
394{
395 return __copy_from_user(task->thread.fpr, from,
396 ELF_NFPREG * sizeof(double));
397}
398#endif
399
Stephen Rothwell81e70092005-10-18 11:17:58 +1000400/*
401 * Save the current user registers on the user stack.
402 * We only save the altivec/spe registers if the process has used
403 * altivec/spe instructions at some point.
404 */
405static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
Michael Neuling16c29d12008-10-23 00:42:36 +0000406 int sigret, int ctx_has_vsx_region)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000407{
Michael Neuling9e751182008-06-25 14:07:17 +1000408 unsigned long msr = regs->msr;
409
Stephen Rothwell81e70092005-10-18 11:17:58 +1000410 /* Make sure floating point registers are stored in regs */
411 flush_fp_to_thread(current);
412
Michael Neulingc6e67712008-06-25 14:07:18 +1000413 /* save general registers */
414 if (save_general_regs(regs, frame))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000415 return 1;
416
Stephen Rothwell81e70092005-10-18 11:17:58 +1000417#ifdef CONFIG_ALTIVEC
418 /* save altivec registers */
419 if (current->thread.used_vr) {
420 flush_altivec_to_thread(current);
421 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
422 ELF_NVRREG * sizeof(vector128)))
423 return 1;
424 /* set MSR_VEC in the saved MSR value to indicate that
425 frame->mc_vregs contains valid data */
Michael Neuling9e751182008-06-25 14:07:17 +1000426 msr |= MSR_VEC;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000427 }
428 /* else assert((regs->msr & MSR_VEC) == 0) */
429
430 /* We always copy to/from vrsave, it's 0 if we don't have or don't
431 * use altivec. Since VSCR only contains 32 bits saved in the least
432 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
433 * most significant bits of that same vector. --BenH
434 */
435 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
436 return 1;
437#endif /* CONFIG_ALTIVEC */
Michael Neuling6a274c02008-07-02 14:06:37 +1000438 if (copy_fpr_to_user(&frame->mc_fregs, current))
Michael Neulingc6e67712008-06-25 14:07:18 +1000439 return 1;
Michael Neuling6a274c02008-07-02 14:06:37 +1000440#ifdef CONFIG_VSX
Michael Neulingce48b212008-06-25 14:07:18 +1000441 /*
442 * Copy VSR 0-31 upper half from thread_struct to local
443 * buffer, then write that to userspace. Also set MSR_VSX in
444 * the saved MSR value to indicate that frame->mc_vregs
445 * contains valid data
446 */
Michael Neuling16c29d12008-10-23 00:42:36 +0000447 if (current->thread.used_vsr && ctx_has_vsx_region) {
Michael Neuling7c292172008-07-11 16:29:12 +1000448 __giveup_vsx(current);
Michael Neuling6a274c02008-07-02 14:06:37 +1000449 if (copy_vsx_to_user(&frame->mc_vsregs, current))
Michael Neulingce48b212008-06-25 14:07:18 +1000450 return 1;
451 msr |= MSR_VSX;
452 }
Michael Neulingc6e67712008-06-25 14:07:18 +1000453#endif /* CONFIG_VSX */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000454#ifdef CONFIG_SPE
455 /* save spe registers */
456 if (current->thread.used_spe) {
457 flush_spe_to_thread(current);
458 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
459 ELF_NEVRREG * sizeof(u32)))
460 return 1;
461 /* set MSR_SPE in the saved MSR value to indicate that
462 frame->mc_vregs contains valid data */
Michael Neuling9e751182008-06-25 14:07:17 +1000463 msr |= MSR_SPE;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000464 }
465 /* else assert((regs->msr & MSR_SPE) == 0) */
466
467 /* We always copy to/from spefscr */
468 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
469 return 1;
470#endif /* CONFIG_SPE */
471
Michael Neuling9e751182008-06-25 14:07:17 +1000472 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
473 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000474 if (sigret) {
475 /* Set up the sigreturn trampoline: li r0,sigret; sc */
476 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
477 || __put_user(0x44000002UL, &frame->tramp[1]))
478 return 1;
479 flush_icache_range((unsigned long) &frame->tramp[0],
480 (unsigned long) &frame->tramp[2]);
481 }
482
483 return 0;
484}
485
486/*
487 * Restore the current user register values from the user stack,
488 * (except for MSR).
489 */
490static long restore_user_regs(struct pt_regs *regs,
491 struct mcontext __user *sr, int sig)
492{
493 long err;
494 unsigned int save_r2 = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000495 unsigned long msr;
Michael Neulingc6e67712008-06-25 14:07:18 +1000496#ifdef CONFIG_VSX
Michael Neulingc6e67712008-06-25 14:07:18 +1000497 int i;
498#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000499
500 /*
501 * restore general registers but not including MSR or SOFTE. Also
502 * take care of keeping r2 (TLS) intact if not a signal
503 */
504 if (!sig)
505 save_r2 = (unsigned int)regs->gpr[2];
506 err = restore_general_regs(regs, sr);
Al Viro9a81c162010-09-20 21:48:57 +0100507 regs->trap = 0;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000508 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000509 if (!sig)
510 regs->gpr[2] = (unsigned long) save_r2;
511 if (err)
512 return 1;
513
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000514 /* if doing signal return, restore the previous little-endian mode */
515 if (sig)
516 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
517
Paul Mackerras5388fb12006-01-11 22:11:39 +1100518 /*
519 * Do this before updating the thread state in
520 * current->thread.fpr/vr/evr. That way, if we get preempted
521 * and another task grabs the FPU/Altivec/SPE, it won't be
522 * tempted to save the current CPU state into the thread_struct
523 * and corrupt what we are writing there.
524 */
525 discard_lazy_cpu_state();
526
Stephen Rothwell81e70092005-10-18 11:17:58 +1000527#ifdef CONFIG_ALTIVEC
Michael Neulingc6e67712008-06-25 14:07:18 +1000528 /*
529 * Force the process to reload the altivec registers from
530 * current->thread when it next does altivec instructions
531 */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000532 regs->msr &= ~MSR_VEC;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000533 if (msr & MSR_VEC) {
Stephen Rothwell81e70092005-10-18 11:17:58 +1000534 /* restore altivec registers from the stack */
535 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
536 sizeof(sr->mc_vregs)))
537 return 1;
538 } else if (current->thread.used_vr)
539 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
540
541 /* Always get VRSAVE back */
542 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
543 return 1;
544#endif /* CONFIG_ALTIVEC */
Michael Neuling6a274c02008-07-02 14:06:37 +1000545 if (copy_fpr_from_user(current, &sr->mc_fregs))
546 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000547
Michael Neulingc6e67712008-06-25 14:07:18 +1000548#ifdef CONFIG_VSX
Michael Neulingce48b212008-06-25 14:07:18 +1000549 /*
550 * Force the process to reload the VSX registers from
551 * current->thread when it next does VSX instruction.
552 */
553 regs->msr &= ~MSR_VSX;
554 if (msr & MSR_VSX) {
555 /*
556 * Restore altivec registers from the stack to a local
557 * buffer, then write this out to the thread_struct
558 */
Michael Neuling6a274c02008-07-02 14:06:37 +1000559 if (copy_vsx_from_user(current, &sr->mc_vsregs))
Michael Neulingce48b212008-06-25 14:07:18 +1000560 return 1;
Michael Neulingce48b212008-06-25 14:07:18 +1000561 } else if (current->thread.used_vsr)
562 for (i = 0; i < 32 ; i++)
563 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
Michael Neulingc6e67712008-06-25 14:07:18 +1000564#endif /* CONFIG_VSX */
565 /*
566 * force the process to reload the FP registers from
567 * current->thread when it next does FP instructions
568 */
569 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
570
Stephen Rothwell81e70092005-10-18 11:17:58 +1000571#ifdef CONFIG_SPE
572 /* force the process to reload the spe registers from
573 current->thread when it next does spe instructions */
574 regs->msr &= ~MSR_SPE;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000575 if (msr & MSR_SPE) {
Stephen Rothwell81e70092005-10-18 11:17:58 +1000576 /* restore spe registers from the stack */
577 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
578 ELF_NEVRREG * sizeof(u32)))
579 return 1;
580 } else if (current->thread.used_spe)
581 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
582
583 /* Always get SPEFSCR back */
584 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
585 return 1;
586#endif /* CONFIG_SPE */
587
Stephen Rothwell81e70092005-10-18 11:17:58 +1000588 return 0;
589}
590
591#ifdef CONFIG_PPC64
Stephen Rothwellb09a4912005-10-18 14:51:57 +1000592long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000593 struct sigaction32 __user *oact, size_t sigsetsize)
594{
595 struct k_sigaction new_ka, old_ka;
596 int ret;
597
598 /* XXX: Don't preclude handling different sized sigset_t's. */
599 if (sigsetsize != sizeof(compat_sigset_t))
600 return -EINVAL;
601
602 if (act) {
603 compat_uptr_t handler;
604
605 ret = get_user(handler, &act->sa_handler);
606 new_ka.sa.sa_handler = compat_ptr(handler);
607 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
608 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
609 if (ret)
610 return -EFAULT;
611 }
612
613 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
614 if (!ret && oact) {
Al Viro29e646d2006-02-01 05:28:09 -0500615 ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000616 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
617 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
618 }
619 return ret;
620}
621
622/*
623 * Note: it is necessary to treat how as an unsigned int, with the
624 * corresponding cast to a signed int to insure that the proper
625 * conversion (sign extension) between the register representation
626 * of a signed int (msr in 32-bit mode) and the register representation
627 * of a signed int (msr in 64-bit mode) is performed.
628 */
Stephen Rothwellb09a4912005-10-18 14:51:57 +1000629long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000630 compat_sigset_t __user *oset, size_t sigsetsize)
631{
632 sigset_t s;
633 sigset_t __user *up;
634 int ret;
635 mm_segment_t old_fs = get_fs();
636
637 if (set) {
638 if (get_sigset_t(&s, set))
639 return -EFAULT;
640 }
641
642 set_fs(KERNEL_DS);
643 /* This is valid because of the set_fs() */
644 up = (sigset_t __user *) &s;
645 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
646 sigsetsize);
647 set_fs(old_fs);
648 if (ret)
649 return ret;
650 if (oset) {
651 if (put_sigset_t(oset, &s))
652 return -EFAULT;
653 }
654 return 0;
655}
656
Stephen Rothwellb09a4912005-10-18 14:51:57 +1000657long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000658{
659 sigset_t s;
660 int ret;
661 mm_segment_t old_fs = get_fs();
662
663 set_fs(KERNEL_DS);
664 /* The __user pointer cast is valid because of the set_fs() */
665 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
666 set_fs(old_fs);
667 if (!ret) {
668 if (put_sigset_t(set, &s))
669 return -EFAULT;
670 }
671 return ret;
672}
673
674
675int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
676{
677 int err;
678
679 if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
680 return -EFAULT;
681
682 /* If you change siginfo_t structure, please be sure
683 * this code is fixed accordingly.
684 * It should never copy any pad contained in the structure
685 * to avoid security leaks, but must copy the generic
686 * 3 ints plus the relevant union member.
687 * This routine must convert siginfo from 64bit to 32bit as well
688 * at the same time.
689 */
690 err = __put_user(s->si_signo, &d->si_signo);
691 err |= __put_user(s->si_errno, &d->si_errno);
692 err |= __put_user((short)s->si_code, &d->si_code);
693 if (s->si_code < 0)
694 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
695 SI_PAD_SIZE32);
696 else switch(s->si_code >> 16) {
697 case __SI_CHLD >> 16:
698 err |= __put_user(s->si_pid, &d->si_pid);
699 err |= __put_user(s->si_uid, &d->si_uid);
700 err |= __put_user(s->si_utime, &d->si_utime);
701 err |= __put_user(s->si_stime, &d->si_stime);
702 err |= __put_user(s->si_status, &d->si_status);
703 break;
704 case __SI_FAULT >> 16:
705 err |= __put_user((unsigned int)(unsigned long)s->si_addr,
706 &d->si_addr);
707 break;
708 case __SI_POLL >> 16:
709 err |= __put_user(s->si_band, &d->si_band);
710 err |= __put_user(s->si_fd, &d->si_fd);
711 break;
712 case __SI_TIMER >> 16:
713 err |= __put_user(s->si_tid, &d->si_tid);
714 err |= __put_user(s->si_overrun, &d->si_overrun);
715 err |= __put_user(s->si_int, &d->si_int);
716 break;
717 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
718 case __SI_MESGQ >> 16:
719 err |= __put_user(s->si_int, &d->si_int);
720 /* fallthrough */
721 case __SI_KILL >> 16:
722 default:
723 err |= __put_user(s->si_pid, &d->si_pid);
724 err |= __put_user(s->si_uid, &d->si_uid);
725 break;
726 }
727 return err;
728}
729
730#define copy_siginfo_to_user copy_siginfo_to_user32
731
Roland McGrath9c0c44d2008-04-20 08:19:24 +1000732int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
733{
734 memset(to, 0, sizeof *to);
735
736 if (copy_from_user(to, from, 3*sizeof(int)) ||
737 copy_from_user(to->_sifields._pad,
738 from->_sifields._pad, SI_PAD_SIZE32))
739 return -EFAULT;
740
741 return 0;
742}
743
Stephen Rothwell81e70092005-10-18 11:17:58 +1000744/*
745 * Note: it is necessary to treat pid and sig as unsigned ints, with the
746 * corresponding cast to a signed int to insure that the proper conversion
747 * (sign extension) between the register representation of a signed int
748 * (msr in 32-bit mode) and the register representation of a signed int
749 * (msr in 64-bit mode) is performed.
750 */
Stephen Rothwellb09a4912005-10-18 14:51:57 +1000751long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000752{
753 siginfo_t info;
754 int ret;
755 mm_segment_t old_fs = get_fs();
756
Roland McGrath9c0c44d2008-04-20 08:19:24 +1000757 ret = copy_siginfo_from_user32(&info, uinfo);
758 if (unlikely(ret))
759 return ret;
760
Stephen Rothwell81e70092005-10-18 11:17:58 +1000761 set_fs (KERNEL_DS);
762 /* The __user pointer cast is valid becasuse of the set_fs() */
763 ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
764 set_fs (old_fs);
765 return ret;
766}
767/*
768 * Start Alternate signal stack support
769 *
770 * System Calls
Stephen Rothwellb09a4912005-10-18 14:51:57 +1000771 * sigaltatck compat_sys_sigaltstack
Stephen Rothwell81e70092005-10-18 11:17:58 +1000772 */
773
Stephen Rothwellb09a4912005-10-18 14:51:57 +1000774int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000775 int r6, int r7, int r8, struct pt_regs *regs)
776{
Al Viro29e646d2006-02-01 05:28:09 -0500777 stack_32_t __user * newstack = compat_ptr(__new);
778 stack_32_t __user * oldstack = compat_ptr(__old);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000779 stack_t uss, uoss;
780 int ret;
781 mm_segment_t old_fs;
782 unsigned long sp;
783 compat_uptr_t ss_sp;
784
785 /*
786 * set sp to the user stack on entry to the system call
787 * the system call router sets R9 to the saved registers
788 */
789 sp = regs->gpr[1];
790
791 /* Put new stack info in local 64 bit stack struct */
792 if (newstack) {
793 if (get_user(ss_sp, &newstack->ss_sp) ||
794 __get_user(uss.ss_flags, &newstack->ss_flags) ||
795 __get_user(uss.ss_size, &newstack->ss_size))
796 return -EFAULT;
797 uss.ss_sp = compat_ptr(ss_sp);
798 }
799
800 old_fs = get_fs();
801 set_fs(KERNEL_DS);
802 /* The __user pointer casts are valid because of the set_fs() */
803 ret = do_sigaltstack(
804 newstack ? (stack_t __user *) &uss : NULL,
805 oldstack ? (stack_t __user *) &uoss : NULL,
806 sp);
807 set_fs(old_fs);
808 /* Copy the stack information to the user output buffer */
809 if (!ret && oldstack &&
Al Viro29e646d2006-02-01 05:28:09 -0500810 (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
Stephen Rothwell81e70092005-10-18 11:17:58 +1000811 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
812 __put_user(uoss.ss_size, &oldstack->ss_size)))
813 return -EFAULT;
814 return ret;
815}
816#endif /* CONFIG_PPC64 */
817
Stephen Rothwell81e70092005-10-18 11:17:58 +1000818/*
819 * Set up a signal frame for a "real-time" signal handler
820 * (one which gets siginfo).
821 */
Christoph Hellwigf478f542007-06-04 15:15:52 +1000822int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000823 siginfo_t *info, sigset_t *oldset,
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000824 struct pt_regs *regs)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000825{
826 struct rt_sigframe __user *rt_sf;
827 struct mcontext __user *frame;
Olof Johanssond0c3d532007-10-12 10:20:07 +1000828 void __user *addr;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000829 unsigned long newsp = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000830
831 /* Set up Signal Frame */
832 /* Put a Real Time Context onto stack */
Josh Boyerefbda862009-03-25 06:23:59 +0000833 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
Olof Johanssond0c3d532007-10-12 10:20:07 +1000834 addr = rt_sf;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000835 if (unlikely(rt_sf == NULL))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000836 goto badframe;
837
838 /* Put the siginfo & fill in most of the ucontext */
839 if (copy_siginfo_to_user(&rt_sf->info, info)
840 || __put_user(0, &rt_sf->uc.uc_flags)
841 || __put_user(0, &rt_sf->uc.uc_link)
842 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
843 || __put_user(sas_ss_flags(regs->gpr[1]),
844 &rt_sf->uc.uc_stack.ss_flags)
845 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
846 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
847 &rt_sf->uc.uc_regs)
848 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
849 goto badframe;
850
851 /* Save user registers on the stack */
852 frame = &rt_sf->uc.uc_mcontext;
Olof Johanssond0c3d532007-10-12 10:20:07 +1000853 addr = frame;
Benjamin Herrenschmidta5bba932006-05-30 13:51:37 +1000854 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
Michael Neuling16c29d12008-10-23 00:42:36 +0000855 if (save_user_regs(regs, frame, 0, 1))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000856 goto badframe;
Benjamin Herrenschmidta5bba932006-05-30 13:51:37 +1000857 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100858 } else {
Michael Neuling16c29d12008-10-23 00:42:36 +0000859 if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000860 goto badframe;
861 regs->link = (unsigned long) frame->tramp;
862 }
Paul Mackerrascc657f52005-11-14 21:55:15 +1100863
864 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
865
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000866 /* create a stack frame for the caller of the handler */
867 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
Olof Johanssond0c3d532007-10-12 10:20:07 +1000868 addr = (void __user *)regs->gpr[1];
Paul Mackerrase2b55302005-10-22 14:46:33 +1000869 if (put_user(regs->gpr[1], (u32 __user *)newsp))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000870 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000871
872 /* Fill registers for signal handler */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000873 regs->gpr[1] = newsp;
874 regs->gpr[3] = sig;
875 regs->gpr[4] = (unsigned long) &rt_sf->info;
876 regs->gpr[5] = (unsigned long) &rt_sf->uc;
877 regs->gpr[6] = (unsigned long) rt_sf;
878 regs->nip = (unsigned long) ka->sa.sa_handler;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000879 /* enter the signal handler in big-endian mode */
880 regs->msr &= ~MSR_LE;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000881 return 1;
882
883badframe:
884#ifdef DEBUG_SIG
885 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
886 regs, frame, newsp);
887#endif
Christian Dietrich76462232011-06-04 05:36:54 +0000888 if (show_unhandled_signals)
889 printk_ratelimited(KERN_INFO
890 "%s[%d]: bad frame in handle_rt_signal32: "
891 "%p nip %08lx lr %08lx\n",
892 current->comm, current->pid,
893 addr, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +1000894
Stephen Rothwell81e70092005-10-18 11:17:58 +1000895 force_sigsegv(sig, current);
896 return 0;
897}
898
899static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
900{
901 sigset_t set;
902 struct mcontext __user *mcp;
903
904 if (get_sigset_t(&set, &ucp->uc_sigmask))
905 return -EFAULT;
906#ifdef CONFIG_PPC64
907 {
908 u32 cmcp;
909
910 if (__get_user(cmcp, &ucp->uc_regs))
911 return -EFAULT;
912 mcp = (struct mcontext __user *)(u64)cmcp;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +1000913 /* no need to check access_ok(mcp), since mcp < 4GB */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000914 }
915#else
916 if (__get_user(mcp, &ucp->uc_regs))
917 return -EFAULT;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +1000918 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
919 return -EFAULT;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000920#endif
Al Viro17440f12012-04-27 14:09:19 -0400921 set_current_blocked(&set);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000922 if (restore_user_regs(regs, mcp, sig))
923 return -EFAULT;
924
925 return 0;
926}
927
928long sys_swapcontext(struct ucontext __user *old_ctx,
Paul Mackerras1bd79332006-03-08 13:24:22 +1100929 struct ucontext __user *new_ctx,
930 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000931{
932 unsigned char tmp;
Michael Neuling16c29d12008-10-23 00:42:36 +0000933 int ctx_has_vsx_region = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000934
Michael Neulingc1cb2992008-07-08 18:43:41 +1000935#ifdef CONFIG_PPC64
936 unsigned long new_msr = 0;
937
Andreas Schwab77eb50a2008-11-06 00:49:00 +0000938 if (new_ctx) {
939 struct mcontext __user *mcp;
940 u32 cmcp;
941
942 /*
943 * Get pointer to the real mcontext. No need for
944 * access_ok since we are dealing with compat
945 * pointers.
946 */
947 if (__get_user(cmcp, &new_ctx->uc_regs))
948 return -EFAULT;
949 mcp = (struct mcontext __user *)(u64)cmcp;
950 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
951 return -EFAULT;
952 }
Michael Neulingc1cb2992008-07-08 18:43:41 +1000953 /*
954 * Check that the context is not smaller than the original
955 * size (with VMX but without VSX)
956 */
957 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
958 return -EINVAL;
959 /*
960 * If the new context state sets the MSR VSX bits but
961 * it doesn't provide VSX state.
962 */
963 if ((ctx_size < sizeof(struct ucontext)) &&
964 (new_msr & MSR_VSX))
965 return -EINVAL;
Michael Neuling16c29d12008-10-23 00:42:36 +0000966 /* Does the context have enough room to store VSX data? */
967 if (ctx_size >= sizeof(struct ucontext))
968 ctx_has_vsx_region = 1;
Michael Neulingc1cb2992008-07-08 18:43:41 +1000969#else
Stephen Rothwell81e70092005-10-18 11:17:58 +1000970 /* Context size is for future use. Right now, we only make sure
971 * we are passed something we understand
972 */
973 if (ctx_size < sizeof(struct ucontext))
974 return -EINVAL;
Michael Neulingc1cb2992008-07-08 18:43:41 +1000975#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000976 if (old_ctx != NULL) {
Paul Mackerras1c9bb1a2006-12-20 13:57:06 +1100977 struct mcontext __user *mctx;
978
979 /*
980 * old_ctx might not be 16-byte aligned, in which
981 * case old_ctx->uc_mcontext won't be either.
982 * Because we have the old_ctx->uc_pad2 field
983 * before old_ctx->uc_mcontext, we need to round down
984 * from &old_ctx->uc_mcontext to a 16-byte boundary.
985 */
986 mctx = (struct mcontext __user *)
987 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
Michael Neuling16c29d12008-10-23 00:42:36 +0000988 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
989 || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000990 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
Paul Mackerras1c9bb1a2006-12-20 13:57:06 +1100991 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000992 return -EFAULT;
993 }
994 if (new_ctx == NULL)
995 return 0;
Michael Neuling16c29d12008-10-23 00:42:36 +0000996 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000997 || __get_user(tmp, (u8 __user *) new_ctx)
Michael Neuling16c29d12008-10-23 00:42:36 +0000998 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000999 return -EFAULT;
1000
1001 /*
1002 * If we get a fault copying the context into the kernel's
1003 * image of the user's registers, we can't just return -EFAULT
1004 * because the user's registers will be corrupted. For instance
1005 * the NIP value may have been updated but not some of the
1006 * other registers. Given that we have done the access_ok
1007 * and successfully read the first and last bytes of the region
1008 * above, this should only happen in an out-of-memory situation
1009 * or if another thread unmaps the region containing the context.
1010 * We kill the task with a SIGSEGV in this situation.
1011 */
1012 if (do_setcontext(new_ctx, regs, 0))
1013 do_exit(SIGSEGV);
David Woodhouse401d1f02005-11-15 18:52:18 +00001014
1015 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001016 return 0;
1017}
1018
1019long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1020 struct pt_regs *regs)
1021{
1022 struct rt_sigframe __user *rt_sf;
1023
1024 /* Always make any pending restarted system calls return -EINTR */
1025 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1026
1027 rt_sf = (struct rt_sigframe __user *)
1028 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1029 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1030 goto bad;
1031 if (do_setcontext(&rt_sf->uc, regs, 1))
1032 goto bad;
1033
1034 /*
1035 * It's not clear whether or why it is desirable to save the
1036 * sigaltstack setting on signal delivery and restore it on
1037 * signal return. But other architectures do this and we have
1038 * always done it up until now so it is probably better not to
1039 * change it. -- paulus
1040 */
1041#ifdef CONFIG_PPC64
1042 /*
Stephen Rothwellb09a4912005-10-18 14:51:57 +10001043 * We use the compat_sys_ version that does the 32/64 bits conversion
Stephen Rothwell81e70092005-10-18 11:17:58 +10001044 * and takes userland pointer directly. What about error checking ?
1045 * nobody does any...
1046 */
Stephen Rothwellb09a4912005-10-18 14:51:57 +10001047 compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001048#else
1049 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001050#endif
David Woodhouse401d1f02005-11-15 18:52:18 +00001051 set_thread_flag(TIF_RESTOREALL);
1052 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001053
1054 bad:
Christian Dietrich76462232011-06-04 05:36:54 +00001055 if (show_unhandled_signals)
1056 printk_ratelimited(KERN_INFO
1057 "%s[%d]: bad frame in sys_rt_sigreturn: "
1058 "%p nip %08lx lr %08lx\n",
1059 current->comm, current->pid,
1060 rt_sf, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001061
Stephen Rothwell81e70092005-10-18 11:17:58 +10001062 force_sig(SIGSEGV, current);
1063 return 0;
1064}
1065
1066#ifdef CONFIG_PPC32
1067int sys_debug_setcontext(struct ucontext __user *ctx,
1068 int ndbg, struct sig_dbg_op __user *dbg,
1069 int r6, int r7, int r8,
1070 struct pt_regs *regs)
1071{
1072 struct sig_dbg_op op;
1073 int i;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001074 unsigned char tmp;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001075 unsigned long new_msr = regs->msr;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001076#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001077 unsigned long new_dbcr0 = current->thread.dbcr0;
1078#endif
1079
1080 for (i=0; i<ndbg; i++) {
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001081 if (copy_from_user(&op, dbg + i, sizeof(op)))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001082 return -EFAULT;
1083 switch (op.dbg_type) {
1084 case SIG_DBG_SINGLE_STEPPING:
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001085#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001086 if (op.dbg_value) {
1087 new_msr |= MSR_DE;
1088 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1089 } else {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00001090 new_dbcr0 &= ~DBCR0_IC;
1091 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1092 current->thread.dbcr1)) {
1093 new_msr &= ~MSR_DE;
1094 new_dbcr0 &= ~DBCR0_IDM;
1095 }
Stephen Rothwell81e70092005-10-18 11:17:58 +10001096 }
1097#else
1098 if (op.dbg_value)
1099 new_msr |= MSR_SE;
1100 else
1101 new_msr &= ~MSR_SE;
1102#endif
1103 break;
1104 case SIG_DBG_BRANCH_TRACING:
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001105#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001106 return -EINVAL;
1107#else
1108 if (op.dbg_value)
1109 new_msr |= MSR_BE;
1110 else
1111 new_msr &= ~MSR_BE;
1112#endif
1113 break;
1114
1115 default:
1116 return -EINVAL;
1117 }
1118 }
1119
1120 /* We wait until here to actually install the values in the
1121 registers so if we fail in the above loop, it will not
1122 affect the contents of these registers. After this point,
1123 failure is a problem, anyway, and it's very unlikely unless
1124 the user is really doing something wrong. */
1125 regs->msr = new_msr;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001126#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001127 current->thread.dbcr0 = new_dbcr0;
1128#endif
1129
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001130 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1131 || __get_user(tmp, (u8 __user *) ctx)
1132 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1133 return -EFAULT;
1134
Stephen Rothwell81e70092005-10-18 11:17:58 +10001135 /*
1136 * If we get a fault copying the context into the kernel's
1137 * image of the user's registers, we can't just return -EFAULT
1138 * because the user's registers will be corrupted. For instance
1139 * the NIP value may have been updated but not some of the
1140 * other registers. Given that we have done the access_ok
1141 * and successfully read the first and last bytes of the region
1142 * above, this should only happen in an out-of-memory situation
1143 * or if another thread unmaps the region containing the context.
1144 * We kill the task with a SIGSEGV in this situation.
1145 */
1146 if (do_setcontext(ctx, regs, 1)) {
Christian Dietrich76462232011-06-04 05:36:54 +00001147 if (show_unhandled_signals)
1148 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1149 "sys_debug_setcontext: %p nip %08lx "
1150 "lr %08lx\n",
1151 current->comm, current->pid,
1152 ctx, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001153
Stephen Rothwell81e70092005-10-18 11:17:58 +10001154 force_sig(SIGSEGV, current);
1155 goto out;
1156 }
1157
1158 /*
1159 * It's not clear whether or why it is desirable to save the
1160 * sigaltstack setting on signal delivery and restore it on
1161 * signal return. But other architectures do this and we have
1162 * always done it up until now so it is probably better not to
1163 * change it. -- paulus
1164 */
1165 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1166
David Woodhouse401d1f02005-11-15 18:52:18 +00001167 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001168 out:
1169 return 0;
1170}
1171#endif
1172
1173/*
1174 * OK, we're invoking a handler
1175 */
Christoph Hellwigf478f542007-06-04 15:15:52 +10001176int handle_signal32(unsigned long sig, struct k_sigaction *ka,
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001177 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001178{
1179 struct sigcontext __user *sc;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001180 struct sigframe __user *frame;
1181 unsigned long newsp = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001182
1183 /* Set up Signal Frame */
Josh Boyerefbda862009-03-25 06:23:59 +00001184 frame = get_sigframe(ka, regs, sizeof(*frame), 1);
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001185 if (unlikely(frame == NULL))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001186 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001187 sc = (struct sigcontext __user *) &frame->sctx;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001188
1189#if _NSIG != 64
1190#error "Please adjust handle_signal()"
1191#endif
1192 if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1193 || __put_user(oldset->sig[0], &sc->oldmask)
1194#ifdef CONFIG_PPC64
1195 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1196#else
1197 || __put_user(oldset->sig[1], &sc->_unused[3])
1198#endif
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001199 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001200 || __put_user(sig, &sc->signal))
1201 goto badframe;
1202
Benjamin Herrenschmidta5bba932006-05-30 13:51:37 +10001203 if (vdso32_sigtramp && current->mm->context.vdso_base) {
Michael Neuling16c29d12008-10-23 00:42:36 +00001204 if (save_user_regs(regs, &frame->mctx, 0, 1))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001205 goto badframe;
Benjamin Herrenschmidta5bba932006-05-30 13:51:37 +10001206 regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +11001207 } else {
Michael Neuling16c29d12008-10-23 00:42:36 +00001208 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001209 goto badframe;
1210 regs->link = (unsigned long) frame->mctx.tramp;
1211 }
1212
Paul Mackerrascc657f52005-11-14 21:55:15 +11001213 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
1214
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001215 /* create a stack frame for the caller of the handler */
1216 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001217 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1218 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001219
Stephen Rothwell81e70092005-10-18 11:17:58 +10001220 regs->gpr[1] = newsp;
1221 regs->gpr[3] = sig;
1222 regs->gpr[4] = (unsigned long) sc;
1223 regs->nip = (unsigned long) ka->sa.sa_handler;
Paul Mackerrasfab5db92006-06-07 16:14:40 +10001224 /* enter the signal handler in big-endian mode */
1225 regs->msr &= ~MSR_LE;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001226
1227 return 1;
1228
1229badframe:
1230#ifdef DEBUG_SIG
1231 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1232 regs, frame, newsp);
1233#endif
Christian Dietrich76462232011-06-04 05:36:54 +00001234 if (show_unhandled_signals)
1235 printk_ratelimited(KERN_INFO
1236 "%s[%d]: bad frame in handle_signal32: "
1237 "%p nip %08lx lr %08lx\n",
1238 current->comm, current->pid,
1239 frame, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001240
Stephen Rothwell81e70092005-10-18 11:17:58 +10001241 force_sigsegv(sig, current);
1242 return 0;
1243}
1244
1245/*
1246 * Do a signal return; undo the signal stack.
1247 */
1248long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1249 struct pt_regs *regs)
1250{
1251 struct sigcontext __user *sc;
1252 struct sigcontext sigctx;
1253 struct mcontext __user *sr;
Olof Johanssond0c3d532007-10-12 10:20:07 +10001254 void __user *addr;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001255 sigset_t set;
1256
1257 /* Always make any pending restarted system calls return -EINTR */
1258 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1259
1260 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001261 addr = sc;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001262 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1263 goto badframe;
1264
1265#ifdef CONFIG_PPC64
1266 /*
1267 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1268 * unused part of the signal stackframe
1269 */
1270 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1271#else
1272 set.sig[0] = sigctx.oldmask;
1273 set.sig[1] = sigctx._unused[3];
1274#endif
Al Viro17440f12012-04-27 14:09:19 -04001275 set_current_blocked(&set);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001276
1277 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001278 addr = sr;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001279 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1280 || restore_user_regs(regs, sr, 1))
1281 goto badframe;
1282
David Woodhouse401d1f02005-11-15 18:52:18 +00001283 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001284 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001285
1286badframe:
Christian Dietrich76462232011-06-04 05:36:54 +00001287 if (show_unhandled_signals)
1288 printk_ratelimited(KERN_INFO
1289 "%s[%d]: bad frame in sys_sigreturn: "
1290 "%p nip %08lx lr %08lx\n",
1291 current->comm, current->pid,
1292 addr, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001293
Stephen Rothwell81e70092005-10-18 11:17:58 +10001294 force_sig(SIGSEGV, current);
1295 return 0;
1296}