blob: 418776c41d24a0d061b8545bd7b54896edba1e1f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
14#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070023#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070024#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090025#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070026#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080027#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080028#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080029#include <linux/pid_namespace.h>
30#include <linux/nsproxy.h>
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050031#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/param.h>
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/siginfo.h>
Al Viroe1396062006-05-25 10:19:47 -040038#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/*
41 * SLAB caches for signal bits.
42 */
43
Christoph Lametere18b8902006-12-06 20:33:20 -080044static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090046int print_fatal_signals __read_mostly;
47
Roland McGrath35de2542008-07-25 19:45:51 -070048static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070049{
Roland McGrath35de2542008-07-25 19:45:51 -070050 return t->sighand->action[sig - 1].sa.sa_handler;
51}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070052
Roland McGrath35de2542008-07-25 19:45:51 -070053static int sig_handler_ignored(void __user *handler, int sig)
54{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070055 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070056 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
58}
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -070060static int sig_task_ignored(struct task_struct *t, int sig,
61 int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
Roland McGrath35de2542008-07-25 19:45:51 -070063 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Oleg Nesterovf008faf2009-04-02 16:58:02 -070065 handler = sig_handler(t, sig);
66
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -070068 handler == SIG_DFL && !from_ancestor_ns)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070069 return 1;
70
71 return sig_handler_ignored(handler, sig);
72}
73
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -070074static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070075{
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 /*
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
79 * unblocked.
80 */
Roland McGrath325d22d2007-11-12 15:41:55 -080081 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 return 0;
83
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -070084 if (!sig_task_ignored(t, sig, from_ancestor_ns))
Roland McGrath35de2542008-07-25 19:45:51 -070085 return 0;
86
87 /*
88 * Tracers may want to know about even ignored signals.
89 */
Oleg Nesterov43918f22009-04-02 16:58:00 -070090 return !tracehook_consider_ignored_signal(t, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
93/*
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
96 */
97static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98{
99 unsigned long ready;
100 long i;
101
102 switch (_NSIG_WORDS) {
103 default:
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
106 break;
107
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
112 break;
113
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
116 break;
117
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
119 }
120 return ready != 0;
121}
122
123#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700125static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
Tejun Heo39efa3e2011-03-23 10:37:00 +0100127 if ((t->group_stop & GROUP_STOP_PENDING) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700129 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 set_tsk_thread_flag(t, TIF_SIGPENDING);
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700131 return 1;
132 }
Roland McGrathb74d0de2007-06-06 03:59:00 -0700133 /*
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
137 */
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700138 return 0;
139}
140
141/*
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
144 */
145void recalc_sigpending_and_wake(struct task_struct *t)
146{
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149}
150
151void recalc_sigpending(void)
152{
Roland McGrathb787f7b2008-07-25 19:45:55 -0700153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700156 clear_thread_flag(TIF_SIGPENDING);
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158}
159
160/* Given the mask, find the first available signal that should be serviced. */
161
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800162#define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
165
Davide Libenzifba2afa2007-05-10 22:23:13 -0700166int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 unsigned long i, *s, *m, x;
169 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 s = pending->signal.sig;
172 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800173
174 /*
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
177 */
178 x = *s &~ *m;
179 if (x) {
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
182 sig = ffz(~x) + 1;
183 return sig;
184 }
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 switch (_NSIG_WORDS) {
187 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800188 for (i = 1; i < _NSIG_WORDS; ++i) {
189 x = *++s &~ *++m;
190 if (!x)
191 continue;
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 break;
196
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800197 case 2:
198 x = s[1] &~ m[1];
199 if (!x)
200 break;
201 sig = ffz(~x) + _NSIG_BPW + 1;
202 break;
203
204 case 1:
205 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 break;
207 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 return sig;
210}
211
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900212static inline void print_dropped_signal(int sig)
213{
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216 if (!print_fatal_signals)
217 return;
218
219 if (!__ratelimit(&ratelimit_state))
220 return;
221
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
224}
225
Tejun Heoe5c19022011-03-23 10:37:00 +0100226/**
227 * task_clear_group_stop_pending - clear pending group stop
228 * @task: target task
229 *
230 * Clear group stop states for @task.
231 *
232 * CONTEXT:
233 * Must be called with @task->sighand->siglock held.
234 */
Tejun Heo39efa3e2011-03-23 10:37:00 +0100235void task_clear_group_stop_pending(struct task_struct *task)
Tejun Heoe5c19022011-03-23 10:37:00 +0100236{
Tejun Heo39efa3e2011-03-23 10:37:00 +0100237 task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME);
Tejun Heoe5c19022011-03-23 10:37:00 +0100238}
239
240/**
241 * task_participate_group_stop - participate in a group stop
242 * @task: task participating in a group stop
243 *
Tejun Heo39efa3e2011-03-23 10:37:00 +0100244 * @task has GROUP_STOP_PENDING set and is participating in a group stop.
245 * Group stop states are cleared and the group stop count is consumed if
246 * %GROUP_STOP_CONSUME was set. If the consumption completes the group
247 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c19022011-03-23 10:37:00 +0100248 *
249 * CONTEXT:
250 * Must be called with @task->sighand->siglock held.
251 */
252static bool task_participate_group_stop(struct task_struct *task)
253{
254 struct signal_struct *sig = task->signal;
255 bool consume = task->group_stop & GROUP_STOP_CONSUME;
256
Tejun Heo39efa3e2011-03-23 10:37:00 +0100257 WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
258
Tejun Heoe5c19022011-03-23 10:37:00 +0100259 task_clear_group_stop_pending(task);
260
261 if (!consume)
262 return false;
263
264 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
265 sig->group_stop_count--;
266
267 if (!sig->group_stop_count) {
268 sig->flags = SIGNAL_STOP_STOPPED;
269 return true;
270 }
271 return false;
272}
273
David Howellsc69e8d92008-11-14 10:39:19 +1100274/*
275 * allocate a new signal queue record
276 * - this may be called without locks if and only if t == current, otherwise an
David Howellsd84f4f92008-11-14 10:39:23 +1100277 * appopriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100278 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900279static struct sigqueue *
280__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
282 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800283 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800285 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000286 * Protect access to @t credentials. This can go away when all
287 * callers hold rcu read lock.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800288 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000289 rcu_read_lock();
David Howellsd84f4f92008-11-14 10:39:23 +1100290 user = get_uid(__task_cred(t)->user);
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800291 atomic_inc(&user->sigpending);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000292 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800295 atomic_read(&user->sigpending) <=
Jiri Slaby78d7d402010-03-05 13:42:54 -0800296 task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900298 } else {
299 print_dropped_signal(sig);
300 }
301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800303 atomic_dec(&user->sigpending);
David Howellsd84f4f92008-11-14 10:39:23 +1100304 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 } else {
306 INIT_LIST_HEAD(&q->list);
307 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100308 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 }
David Howellsd84f4f92008-11-14 10:39:23 +1100310
311 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}
313
Andrew Morton514a01b2006-02-03 03:04:41 -0800314static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
316 if (q->flags & SIGQUEUE_PREALLOC)
317 return;
318 atomic_dec(&q->user->sigpending);
319 free_uid(q->user);
320 kmem_cache_free(sigqueue_cachep, q);
321}
322
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800323void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
325 struct sigqueue *q;
326
327 sigemptyset(&queue->signal);
328 while (!list_empty(&queue->list)) {
329 q = list_entry(queue->list.next, struct sigqueue , list);
330 list_del_init(&q->list);
331 __sigqueue_free(q);
332 }
333}
334
335/*
336 * Flush all pending signals for a task.
337 */
David Howells3bcac022009-04-29 13:45:05 +0100338void __flush_signals(struct task_struct *t)
339{
340 clear_tsk_thread_flag(t, TIF_SIGPENDING);
341 flush_sigqueue(&t->pending);
342 flush_sigqueue(&t->signal->shared_pending);
343}
344
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800345void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
347 unsigned long flags;
348
349 spin_lock_irqsave(&t->sighand->siglock, flags);
David Howells3bcac022009-04-29 13:45:05 +0100350 __flush_signals(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 spin_unlock_irqrestore(&t->sighand->siglock, flags);
352}
353
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400354static void __flush_itimer_signals(struct sigpending *pending)
355{
356 sigset_t signal, retain;
357 struct sigqueue *q, *n;
358
359 signal = pending->signal;
360 sigemptyset(&retain);
361
362 list_for_each_entry_safe(q, n, &pending->list, list) {
363 int sig = q->info.si_signo;
364
365 if (likely(q->info.si_code != SI_TIMER)) {
366 sigaddset(&retain, sig);
367 } else {
368 sigdelset(&signal, sig);
369 list_del_init(&q->list);
370 __sigqueue_free(q);
371 }
372 }
373
374 sigorsets(&pending->signal, &signal, &retain);
375}
376
377void flush_itimer_signals(void)
378{
379 struct task_struct *tsk = current;
380 unsigned long flags;
381
382 spin_lock_irqsave(&tsk->sighand->siglock, flags);
383 __flush_itimer_signals(&tsk->pending);
384 __flush_itimer_signals(&tsk->signal->shared_pending);
385 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
386}
387
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700388void ignore_signals(struct task_struct *t)
389{
390 int i;
391
392 for (i = 0; i < _NSIG; ++i)
393 t->sighand->action[i].sa.sa_handler = SIG_IGN;
394
395 flush_signals(t);
396}
397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 * Flush all handlers for a task.
400 */
401
402void
403flush_signal_handlers(struct task_struct *t, int force_default)
404{
405 int i;
406 struct k_sigaction *ka = &t->sighand->action[0];
407 for (i = _NSIG ; i != 0 ; i--) {
408 if (force_default || ka->sa.sa_handler != SIG_IGN)
409 ka->sa.sa_handler = SIG_DFL;
410 ka->sa.sa_flags = 0;
411 sigemptyset(&ka->sa.sa_mask);
412 ka++;
413 }
414}
415
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200416int unhandled_signal(struct task_struct *tsk, int sig)
417{
Roland McGrath445a91d2008-07-25 19:45:52 -0700418 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700419 if (is_global_init(tsk))
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200420 return 1;
Roland McGrath445a91d2008-07-25 19:45:52 -0700421 if (handler != SIG_IGN && handler != SIG_DFL)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200422 return 0;
Oleg Nesterov43918f22009-04-02 16:58:00 -0700423 return !tracehook_consider_fatal_signal(tsk, sig);
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200424}
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427/* Notify the system that a driver wants to block all signals for this
428 * process, and wants to be notified if any signals at all were to be
429 * sent/acted upon. If the notifier routine returns non-zero, then the
430 * signal will be acted upon after all. If the notifier routine returns 0,
431 * then then signal will be blocked. Only one block per process is
432 * allowed. priv is a pointer to private data that the notifier routine
433 * can use to determine if the signal should be blocked or not. */
434
435void
436block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
437{
438 unsigned long flags;
439
440 spin_lock_irqsave(&current->sighand->siglock, flags);
441 current->notifier_mask = mask;
442 current->notifier_data = priv;
443 current->notifier = notifier;
444 spin_unlock_irqrestore(&current->sighand->siglock, flags);
445}
446
447/* Notify the system that blocking has ended. */
448
449void
450unblock_all_signals(void)
451{
452 unsigned long flags;
453
454 spin_lock_irqsave(&current->sighand->siglock, flags);
455 current->notifier = NULL;
456 current->notifier_data = NULL;
457 recalc_sigpending();
458 spin_unlock_irqrestore(&current->sighand->siglock, flags);
459}
460
Oleg Nesterov100360f2008-07-25 01:47:29 -0700461static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462{
463 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 /*
466 * Collect the siginfo appropriate to this signal. Check if
467 * there is another siginfo for the same signal.
468 */
469 list_for_each_entry(q, &list->list, list) {
470 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700471 if (first)
472 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 first = q;
474 }
475 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700476
477 sigdelset(&list->signal, sig);
478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700480still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 list_del_init(&first->list);
482 copy_siginfo(info, &first->info);
483 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 /* Ok, it wasn't in the queue. This must be
486 a fast-pathed signal or we must have been
487 out of queue space. So zero out the info.
488 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 info->si_signo = sig;
490 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800491 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 info->si_pid = 0;
493 info->si_uid = 0;
494 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
497static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
498 siginfo_t *info)
499{
Roland McGrath27d91e02006-09-29 02:00:31 -0700500 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 if (sig) {
503 if (current->notifier) {
504 if (sigismember(current->notifier_mask, sig)) {
505 if (!(current->notifier)(current->notifier_data)) {
506 clear_thread_flag(TIF_SIGPENDING);
507 return 0;
508 }
509 }
510 }
511
Oleg Nesterov100360f2008-07-25 01:47:29 -0700512 collect_signal(sig, pending, info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
515 return sig;
516}
517
518/*
519 * Dequeue a signal and return the element to the caller, which is
520 * expected to free it.
521 *
522 * All callers have to hold the siglock.
523 */
524int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
525{
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700526 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000527
528 /* We only dequeue private signals from ourselves, we don't let
529 * signalfd steal them
530 */
Davide Libenzib8fceee2007-09-20 12:40:16 -0700531 signr = __dequeue_signal(&tsk->pending, mask, info);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800532 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 signr = __dequeue_signal(&tsk->signal->shared_pending,
534 mask, info);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800535 /*
536 * itimer signal ?
537 *
538 * itimers are process shared and we restart periodic
539 * itimers in the signal delivery path to prevent DoS
540 * attacks in the high resolution timer case. This is
541 * compliant with the old way of self restarting
542 * itimers, as the SIGALRM is a legacy signal and only
543 * queued once. Changing the restart behaviour to
544 * restart the timer in the signal dequeue path is
545 * reducing the timer noise on heavy loaded !highres
546 * systems too.
547 */
548 if (unlikely(signr == SIGALRM)) {
549 struct hrtimer *tmr = &tsk->signal->real_timer;
550
551 if (!hrtimer_is_queued(tmr) &&
552 tsk->signal->it_real_incr.tv64 != 0) {
553 hrtimer_forward(tmr, tmr->base->get_time(),
554 tsk->signal->it_real_incr);
555 hrtimer_restart(tmr);
556 }
557 }
558 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700559
Davide Libenzib8fceee2007-09-20 12:40:16 -0700560 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700561 if (!signr)
562 return 0;
563
564 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800565 /*
566 * Set a marker that we have dequeued a stop signal. Our
567 * caller might release the siglock and then the pending
568 * stop signal it is about to process is no longer in the
569 * pending bitmasks, but must still be cleared by a SIGCONT
570 * (and overruled by a SIGKILL). So those cases clear this
571 * shared flag after we've set it. Note that this flag may
572 * remain set after the signal we return is ignored or
573 * handled. That doesn't matter because its only purpose
574 * is to alert stop-signal processing code when another
575 * processor has come along and cleared the flag.
576 */
Oleg Nesterov92413d72008-07-25 01:47:30 -0700577 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800578 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700579 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 /*
581 * Release the siglock to ensure proper locking order
582 * of timer locks outside of siglocks. Note, we leave
583 * irqs disabled here, since the posix-timers code is
584 * about to disable them again anyway.
585 */
586 spin_unlock(&tsk->sighand->siglock);
587 do_schedule_next_timer(info);
588 spin_lock(&tsk->sighand->siglock);
589 }
590 return signr;
591}
592
593/*
594 * Tell a process that it has a new active signal..
595 *
596 * NOTE! we rely on the previous spin_lock to
597 * lock interrupts for us! We can only be called with
598 * "siglock" held, and the local interrupt must
599 * have been disabled when that got acquired!
600 *
601 * No need to set need_resched since signal event passing
602 * goes through ->blocked
603 */
604void signal_wake_up(struct task_struct *t, int resume)
605{
606 unsigned int mask;
607
608 set_tsk_thread_flag(t, TIF_SIGPENDING);
609
610 /*
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500611 * For SIGKILL, we want to wake it up in the stopped/traced/killable
612 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 * executing another processor and just now entering stopped state.
614 * By using wake_up_state, we ensure the process will wake up and
615 * handle its death signal.
616 */
617 mask = TASK_INTERRUPTIBLE;
618 if (resume)
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500619 mask |= TASK_WAKEKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 if (!wake_up_state(t, mask))
621 kick_process(t);
622}
623
624/*
625 * Remove signals in mask from the pending set and queue.
626 * Returns 1 if any signals were found.
627 *
628 * All callers must be holding the siglock.
George Anzinger71fabd52006-01-08 01:02:48 -0800629 *
630 * This version takes a sigset mask and looks at all signals,
631 * not just those in the first mask word.
632 */
633static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
634{
635 struct sigqueue *q, *n;
636 sigset_t m;
637
638 sigandsets(&m, mask, &s->signal);
639 if (sigisemptyset(&m))
640 return 0;
641
642 signandsets(&s->signal, &s->signal, mask);
643 list_for_each_entry_safe(q, n, &s->list, list) {
644 if (sigismember(mask, q->info.si_signo)) {
645 list_del_init(&q->list);
646 __sigqueue_free(q);
647 }
648 }
649 return 1;
650}
651/*
652 * Remove signals in mask from the pending set and queue.
653 * Returns 1 if any signals were found.
654 *
655 * All callers must be holding the siglock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 */
657static int rm_from_queue(unsigned long mask, struct sigpending *s)
658{
659 struct sigqueue *q, *n;
660
661 if (!sigtestsetmask(&s->signal, mask))
662 return 0;
663
664 sigdelsetmask(&s->signal, mask);
665 list_for_each_entry_safe(q, n, &s->list, list) {
666 if (q->info.si_signo < SIGRTMIN &&
667 (mask & sigmask(q->info.si_signo))) {
668 list_del_init(&q->list);
669 __sigqueue_free(q);
670 }
671 }
672 return 1;
673}
674
Oleg Nesterov614c5172009-12-15 16:47:22 -0800675static inline int is_si_special(const struct siginfo *info)
676{
677 return info <= SEND_SIG_FORCED;
678}
679
680static inline bool si_fromuser(const struct siginfo *info)
681{
682 return info == SEND_SIG_NOINFO ||
683 (!is_si_special(info) && SI_FROMUSER(info));
684}
685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686/*
687 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100688 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 */
690static int check_kill_permission(int sig, struct siginfo *info,
691 struct task_struct *t)
692{
Oleg Nesterov065add32010-05-26 14:42:54 -0700693 const struct cred *cred, *tcred;
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700694 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700695 int error;
696
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700697 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700698 return -EINVAL;
699
Oleg Nesterov614c5172009-12-15 16:47:22 -0800700 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700701 return 0;
702
703 error = audit_signal_info(sig, t); /* Let audit system see the signal */
704 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400706
Oleg Nesterov065add32010-05-26 14:42:54 -0700707 cred = current_cred();
David Howellsc69e8d92008-11-14 10:39:19 +1100708 tcred = __task_cred(t);
Oleg Nesterov065add32010-05-26 14:42:54 -0700709 if (!same_thread_group(current, t) &&
710 (cred->euid ^ tcred->suid) &&
David Howellsc69e8d92008-11-14 10:39:19 +1100711 (cred->euid ^ tcred->uid) &&
712 (cred->uid ^ tcred->suid) &&
713 (cred->uid ^ tcred->uid) &&
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700714 !capable(CAP_KILL)) {
715 switch (sig) {
716 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700717 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700718 /*
719 * We don't return the error if sid == NULL. The
720 * task was unhashed, the caller must notice this.
721 */
722 if (!sid || sid == task_session(current))
723 break;
724 default:
725 return -EPERM;
726 }
727 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100728
Amy Griffise54dc242007-03-29 18:01:04 -0400729 return security_task_kill(t, info, sig, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730}
731
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700733 * Handle magic process-wide effects of stop/continue signals. Unlike
734 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 * time regardless of blocking, ignoring, or handling. This does the
736 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700737 * signals. The process stop is done as a signal action for SIG_DFL.
738 *
739 * Returns true if the signal should be actually delivered, otherwise
740 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 */
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -0700742static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
Oleg Nesterovad16a462008-04-30 00:52:46 -0700744 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 struct task_struct *t;
746
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700747 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700749 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700751 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 /*
753 * This is a stop signal. Remove SIGCONT from all queues.
754 */
Oleg Nesterovad16a462008-04-30 00:52:46 -0700755 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 t = p;
757 do {
758 rm_from_queue(sigmask(SIGCONT), &t->pending);
Oleg Nesterovad16a462008-04-30 00:52:46 -0700759 } while_each_thread(p, t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700761 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 /*
763 * Remove all stop signals from all queues,
764 * and wake all threads.
765 */
Oleg Nesterovad16a462008-04-30 00:52:46 -0700766 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 t = p;
768 do {
769 unsigned int state;
Tejun Heo39efa3e2011-03-23 10:37:00 +0100770
771 task_clear_group_stop_pending(t);
772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 /*
775 * If there is a handler for SIGCONT, we must make
776 * sure that no thread returns to user mode before
777 * we post the signal, in case it was the only
778 * thread eligible to run the signal handler--then
779 * it must not do anything between resuming and
780 * running the handler. With the TIF_SIGPENDING
781 * flag set, the thread will pause and acquire the
782 * siglock that we hold now and until we've queued
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700783 * the pending signal.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 *
785 * Wake up the stopped thread _after_ setting
786 * TIF_SIGPENDING
787 */
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500788 state = __TASK_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
790 set_tsk_thread_flag(t, TIF_SIGPENDING);
791 state |= TASK_INTERRUPTIBLE;
792 }
793 wake_up_state(t, state);
Oleg Nesterovad16a462008-04-30 00:52:46 -0700794 } while_each_thread(p, t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700796 /*
797 * Notify the parent with CLD_CONTINUED if we were stopped.
798 *
799 * If we were in the middle of a group stop, we pretend it
800 * was already finished, and then continued. Since SIGCHLD
801 * doesn't queue we report only CLD_STOPPED, as if the next
802 * CLD_CONTINUED was dropped.
803 */
804 why = 0;
Oleg Nesterovad16a462008-04-30 00:52:46 -0700805 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700806 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a462008-04-30 00:52:46 -0700807 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700808 why |= SIGNAL_CLD_STOPPED;
809
810 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700811 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700812 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700813 * will take ->siglock, notice SIGNAL_CLD_MASK, and
814 * notify its parent. See get_signal_to_deliver().
815 */
Oleg Nesterovad16a462008-04-30 00:52:46 -0700816 signal->flags = why | SIGNAL_STOP_CONTINUED;
817 signal->group_stop_count = 0;
818 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 } else {
820 /*
821 * We are not stopped, but there could be a stop
822 * signal in the middle of being processed after
823 * being removed from the queue. Clear that too.
824 */
Oleg Nesterovad16a462008-04-30 00:52:46 -0700825 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700828
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -0700829 return !sig_ignored(p, sig, from_ancestor_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830}
831
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700832/*
833 * Test if P wants to take SIG. After we've checked all threads with this,
834 * it's equivalent to finding no threads not blocking SIG. Any threads not
835 * blocking SIG were ruled out because they are not running and already
836 * have pending signals. Such threads will dequeue from the shared queue
837 * as soon as they're available, so putting the signal on the shared queue
838 * will be equivalent to sending it to one such thread.
839 */
840static inline int wants_signal(int sig, struct task_struct *p)
841{
842 if (sigismember(&p->blocked, sig))
843 return 0;
844 if (p->flags & PF_EXITING)
845 return 0;
846 if (sig == SIGKILL)
847 return 1;
848 if (task_is_stopped_or_traced(p))
849 return 0;
850 return task_curr(p) || !signal_pending(p);
851}
852
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700853static void complete_signal(int sig, struct task_struct *p, int group)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700854{
855 struct signal_struct *signal = p->signal;
856 struct task_struct *t;
857
858 /*
859 * Now find a thread we can wake up to take the signal off the queue.
860 *
861 * If the main thread wants the signal, it gets first crack.
862 * Probably the least surprising to the average bear.
863 */
864 if (wants_signal(sig, p))
865 t = p;
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700866 else if (!group || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700867 /*
868 * There is just one thread and it does not need to be woken.
869 * It will dequeue unblocked signals before it runs again.
870 */
871 return;
872 else {
873 /*
874 * Otherwise try to find a suitable thread.
875 */
876 t = signal->curr_target;
877 while (!wants_signal(sig, t)) {
878 t = next_thread(t);
879 if (t == signal->curr_target)
880 /*
881 * No thread needs to be woken.
882 * Any eligible threads will see
883 * the signal in the queue soon.
884 */
885 return;
886 }
887 signal->curr_target = t;
888 }
889
890 /*
891 * Found a killable thread. If the signal will be fatal,
892 * then start taking the whole group down immediately.
893 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700894 if (sig_fatal(p, sig) &&
895 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700896 !sigismember(&t->real_blocked, sig) &&
Roland McGrath445a91d2008-07-25 19:45:52 -0700897 (sig == SIGKILL ||
Oleg Nesterov43918f22009-04-02 16:58:00 -0700898 !tracehook_consider_fatal_signal(t, sig))) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700899 /*
900 * This signal will be fatal to the whole group.
901 */
902 if (!sig_kernel_coredump(sig)) {
903 /*
904 * Start a group exit and wake everybody up.
905 * This way we don't have other threads
906 * running and doing things after a slower
907 * thread has the fatal signal pending.
908 */
909 signal->flags = SIGNAL_GROUP_EXIT;
910 signal->group_exit_code = sig;
911 signal->group_stop_count = 0;
912 t = p;
913 do {
Tejun Heo39efa3e2011-03-23 10:37:00 +0100914 task_clear_group_stop_pending(t);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700915 sigaddset(&t->pending.signal, SIGKILL);
916 signal_wake_up(t, 1);
917 } while_each_thread(p, t);
918 return;
919 }
920 }
921
922 /*
923 * The signal is already in the shared-pending queue.
924 * Tell the chosen thread to wake up and dequeue it.
925 */
926 signal_wake_up(t, sig == SIGKILL);
927 return;
928}
929
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -0700930static inline int legacy_queue(struct sigpending *signals, int sig)
931{
932 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
933}
934
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -0700935static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
936 int group, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
Oleg Nesterov2ca35152008-04-30 00:52:54 -0700938 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -0700939 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +0200940 int override_rlimit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
Masami Hiramatsud1eb6502009-11-24 16:56:45 -0500942 trace_signal_generate(sig, info, t);
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -0400943
Oleg Nesterov6e65acb2008-04-30 00:52:50 -0700944 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -0700945
946 if (!prepare_signal(sig, t, from_ancestor_ns))
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700947 return 0;
Oleg Nesterov2ca35152008-04-30 00:52:54 -0700948
949 pending = group ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -0700951 * Short-circuit ignored signals and support queuing
952 * exactly one non-rt signal, so that we can get more
953 * detailed information about the cause of the signal.
954 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700955 if (legacy_queue(pending, sig))
Pavel Emelyanov2acb0242008-04-30 00:52:35 -0700956 return 0;
Davide Libenzifba2afa2007-05-10 22:23:13 -0700957 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 * fast-pathed signals for kernel-internal things like SIGSTOP
959 * or SIGKILL.
960 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800961 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 goto out_set;
963
964 /* Real-time signals must be queued if sent by sigqueue, or
965 some other real-time mechanism. It is implementation
966 defined whether kill() does so. We attempt to do so, on
967 the principle of least surprise, but since kill is not
968 allowed to fail with EAGAIN when low on memory we just
969 make sure at least one signal gets delivered and don't
970 pass on the info struct. */
971
Vegard Nossum7a0aeb12009-05-16 11:28:33 +0200972 if (sig < SIGRTMIN)
973 override_rlimit = (is_si_special(info) || info->si_code >= 0);
974 else
975 override_rlimit = 0;
976
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900977 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
Vegard Nossum7a0aeb12009-05-16 11:28:33 +0200978 override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -0700980 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800982 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 q->info.si_signo = sig;
984 q->info.si_errno = 0;
985 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -0800986 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -0800987 task_active_pid_ns(t));
David Howells76aac0e2008-11-14 10:39:12 +1100988 q->info.si_uid = current_uid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800990 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 q->info.si_signo = sig;
992 q->info.si_errno = 0;
993 q->info.si_code = SI_KERNEL;
994 q->info.si_pid = 0;
995 q->info.si_uid = 0;
996 break;
997 default:
998 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -0700999 if (from_ancestor_ns)
1000 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 break;
1002 }
Oleg Nesterov621d3122005-10-30 15:03:45 -08001003 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001004 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1005 /*
1006 * Queue overflow, abort. We may abort if the
1007 * signal was rt and sent by user using something
1008 * other than kill().
1009 */
1010 trace_signal_overflow_fail(sig, group, info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 return -EAGAIN;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001012 } else {
1013 /*
1014 * This is a silent loss of information. We still
1015 * send the signal, but the *info bits are lost.
1016 */
1017 trace_signal_lose_info(sig, group, info);
1018 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 }
1020
1021out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001022 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001023 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001024 complete_signal(sig, t, group);
1025 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026}
1027
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001028static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1029 int group)
1030{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001031 int from_ancestor_ns = 0;
1032
1033#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001034 from_ancestor_ns = si_fromuser(info) &&
1035 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001036#endif
1037
1038 return __send_signal(sig, info, t, group, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001039}
1040
Ingo Molnar45807a12007-07-15 23:40:10 -07001041static void print_fatal_signal(struct pt_regs *regs, int signr)
1042{
1043 printk("%s/%d: potentially unexpected fatal signal %d.\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001044 current->comm, task_pid_nr(current), signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001045
Al Viroca5cd872007-10-29 04:31:16 +00001046#if defined(__i386__) && !defined(__arch_um__)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +01001047 printk("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001048 {
1049 int i;
1050 for (i = 0; i < 16; i++) {
1051 unsigned char insn;
1052
Andi Kleenb45c6e72010-01-08 14:42:52 -08001053 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1054 break;
Ingo Molnar45807a12007-07-15 23:40:10 -07001055 printk("%02x ", insn);
1056 }
1057 }
1058#endif
1059 printk("\n");
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001060 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001061 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001062 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001063}
1064
1065static int __init setup_print_fatal_signals(char *str)
1066{
1067 get_option (&str, &print_fatal_signals);
1068
1069 return 1;
1070}
1071
1072__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001074int
1075__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1076{
1077 return send_signal(sig, info, p, 1);
1078}
1079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080static int
1081specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1082{
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001083 return send_signal(sig, info, t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084}
1085
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001086int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1087 bool group)
1088{
1089 unsigned long flags;
1090 int ret = -ESRCH;
1091
1092 if (lock_task_sighand(p, &flags)) {
1093 ret = send_signal(sig, info, p, group);
1094 unlock_task_sighand(p, &flags);
1095 }
1096
1097 return ret;
1098}
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100/*
1101 * Force a signal that the process can't ignore: if necessary
1102 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001103 *
1104 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1105 * since we do not want to have a signal handler that was blocked
1106 * be invoked when user space had explicitly blocked it.
1107 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001108 * We don't want to have recursive SIGSEGV's etc, for example,
1109 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111int
1112force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1113{
1114 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001115 int ret, blocked, ignored;
1116 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
1118 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001119 action = &t->sighand->action[sig-1];
1120 ignored = action->sa.sa_handler == SIG_IGN;
1121 blocked = sigismember(&t->blocked, sig);
1122 if (blocked || ignored) {
1123 action->sa.sa_handler = SIG_DFL;
1124 if (blocked) {
1125 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001126 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001127 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 }
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001129 if (action->sa.sa_handler == SIG_DFL)
1130 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 ret = specific_send_sig_info(sig, info, t);
1132 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1133
1134 return ret;
1135}
1136
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137/*
1138 * Nuke all other threads in the group.
1139 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001140int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001142 struct task_struct *t = p;
1143 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 p->signal->group_stop_count = 0;
1146
Oleg Nesterov09faef12010-05-26 14:43:11 -07001147 while_each_thread(p, t) {
Tejun Heo39efa3e2011-03-23 10:37:00 +01001148 task_clear_group_stop_pending(t);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001149 count++;
1150
1151 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 if (t->exit_state)
1153 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 signal_wake_up(t, 1);
1156 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001157
1158 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159}
1160
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001161struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1162 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001163{
1164 struct sighand_struct *sighand;
1165
Oleg Nesterov1406f2d2008-04-30 00:52:37 -07001166 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001167 for (;;) {
1168 sighand = rcu_dereference(tsk->sighand);
1169 if (unlikely(sighand == NULL))
1170 break;
1171
1172 spin_lock_irqsave(&sighand->siglock, *flags);
1173 if (likely(sighand == tsk->sighand))
1174 break;
1175 spin_unlock_irqrestore(&sighand->siglock, *flags);
1176 }
Oleg Nesterov1406f2d2008-04-30 00:52:37 -07001177 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001178
1179 return sighand;
1180}
1181
David Howellsc69e8d92008-11-14 10:39:19 +11001182/*
1183 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001184 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1186{
David Howells694f6902010-08-04 16:59:14 +01001187 int ret;
1188
1189 rcu_read_lock();
1190 ret = check_kill_permission(sig, info, p);
1191 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001193 if (!ret && sig)
1194 ret = do_send_sig_info(sig, info, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
1196 return ret;
1197}
1198
1199/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001200 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001202 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001204int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205{
1206 struct task_struct *p = NULL;
1207 int retval, success;
1208
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 success = 0;
1210 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001211 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 int err = group_send_sig_info(sig, info, p);
1213 success |= !err;
1214 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001215 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 return success ? 0 : retval;
1217}
1218
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001219int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001221 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 struct task_struct *p;
1223
Ingo Molnare56d0902006-01-08 01:01:37 -08001224 rcu_read_lock();
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001225retry:
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001226 p = pid_task(pid, PIDTYPE_PID);
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001227 if (p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 error = group_send_sig_info(sig, info, p);
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001229 if (unlikely(error == -ESRCH))
1230 /*
1231 * The task was unhashed in between, try again.
1232 * If it is dead, pid_task() will return NULL,
1233 * if we race with de_thread() it will find the
1234 * new leader.
1235 */
1236 goto retry;
1237 }
Ingo Molnare56d0902006-01-08 01:01:37 -08001238 rcu_read_unlock();
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 return error;
1241}
1242
Matthew Wilcoxc3de4b32007-02-09 08:11:47 -07001243int
1244kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001245{
1246 int error;
1247 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001248 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001249 rcu_read_unlock();
1250 return error;
1251}
1252
Eric W. Biederman2425c082006-10-02 02:17:28 -07001253/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1254int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
David Quigley8f95dc52006-06-30 01:55:47 -07001255 uid_t uid, uid_t euid, u32 secid)
Harald Welte46113832005-10-10 19:44:29 +02001256{
1257 int ret = -EINVAL;
1258 struct task_struct *p;
David Howellsc69e8d92008-11-14 10:39:19 +11001259 const struct cred *pcred;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001260 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001261
1262 if (!valid_signal(sig))
1263 return ret;
1264
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001265 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001266 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001267 if (!p) {
1268 ret = -ESRCH;
1269 goto out_unlock;
1270 }
David Howellsc69e8d92008-11-14 10:39:19 +11001271 pcred = __task_cred(p);
Oleg Nesterov614c5172009-12-15 16:47:22 -08001272 if (si_fromuser(info) &&
David Howellsc69e8d92008-11-14 10:39:19 +11001273 euid != pcred->suid && euid != pcred->uid &&
1274 uid != pcred->suid && uid != pcred->uid) {
Harald Welte46113832005-10-10 19:44:29 +02001275 ret = -EPERM;
1276 goto out_unlock;
1277 }
David Quigley8f95dc52006-06-30 01:55:47 -07001278 ret = security_task_kill(p, info, sig, secid);
1279 if (ret)
1280 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001281
1282 if (sig) {
1283 if (lock_task_sighand(p, &flags)) {
1284 ret = __send_signal(sig, info, p, 1, 0);
1285 unlock_task_sighand(p, &flags);
1286 } else
1287 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001288 }
1289out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001290 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001291 return ret;
1292}
Eric W. Biederman2425c082006-10-02 02:17:28 -07001293EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294
1295/*
1296 * kill_something_info() interprets pid in interesting ways just like kill(2).
1297 *
1298 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1299 * is probably wrong. Should make it like BSD or SYSV.
1300 */
1301
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001302static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001304 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001305
1306 if (pid > 0) {
1307 rcu_read_lock();
1308 ret = kill_pid_info(sig, info, find_vpid(pid));
1309 rcu_read_unlock();
1310 return ret;
1311 }
1312
1313 read_lock(&tasklist_lock);
1314 if (pid != -1) {
1315 ret = __kill_pgrp_info(sig, info,
1316 pid ? find_vpid(-pid) : task_pgrp(current));
1317 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 int retval = 0, count = 0;
1319 struct task_struct * p;
1320
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001322 if (task_pid_vnr(p) > 1 &&
1323 !same_thread_group(p, current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 int err = group_send_sig_info(sig, info, p);
1325 ++count;
1326 if (err != -EPERM)
1327 retval = err;
1328 }
1329 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001330 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001332 read_unlock(&tasklist_lock);
1333
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001334 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335}
1336
1337/*
1338 * These are for backward compatibility with the rest of the kernel source.
1339 */
1340
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341int
1342send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1343{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 /*
1345 * Make sure legacy kernel users don't send in bad values
1346 * (normal paths check this in check_kill_permission).
1347 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001348 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 return -EINVAL;
1350
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001351 return do_send_sig_info(sig, info, p, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352}
1353
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001354#define __si_special(priv) \
1355 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1356
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357int
1358send_sig(int sig, struct task_struct *p, int priv)
1359{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001360 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361}
1362
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363void
1364force_sig(int sig, struct task_struct *p)
1365{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001366 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367}
1368
1369/*
1370 * When things go south during signal handling, we
1371 * will force a SIGSEGV. And if the signal that caused
1372 * the problem was already a SIGSEGV, we'll want to
1373 * make sure we don't even try to deliver the signal..
1374 */
1375int
1376force_sigsegv(int sig, struct task_struct *p)
1377{
1378 if (sig == SIGSEGV) {
1379 unsigned long flags;
1380 spin_lock_irqsave(&p->sighand->siglock, flags);
1381 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1382 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1383 }
1384 force_sig(SIGSEGV, p);
1385 return 0;
1386}
1387
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001388int kill_pgrp(struct pid *pid, int sig, int priv)
1389{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001390 int ret;
1391
1392 read_lock(&tasklist_lock);
1393 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1394 read_unlock(&tasklist_lock);
1395
1396 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001397}
1398EXPORT_SYMBOL(kill_pgrp);
1399
1400int kill_pid(struct pid *pid, int sig, int priv)
1401{
1402 return kill_pid_info(sig, __si_special(priv), pid);
1403}
1404EXPORT_SYMBOL(kill_pid);
1405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406/*
1407 * These functions support sending signals using preallocated sigqueue
1408 * structures. This is needed "because realtime applications cannot
1409 * afford to lose notifications of asynchronous events, like timer
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001410 * expirations or I/O completions". In the case of Posix Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 * we allocate the sigqueue structure from the timer_create. If this
1412 * allocation fails we are able to report the failure to the application
1413 * with an EAGAIN error.
1414 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415struct sigqueue *sigqueue_alloc(void)
1416{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001417 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001419 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001421
1422 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423}
1424
1425void sigqueue_free(struct sigqueue *q)
1426{
1427 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001428 spinlock_t *lock = &current->sighand->siglock;
1429
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1431 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001432 * We must hold ->siglock while testing q->list
1433 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001434 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001436 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001437 q->flags &= ~SIGQUEUE_PREALLOC;
1438 /*
1439 * If it is queued it will be freed when dequeued,
1440 * like the "regular" sigqueue.
1441 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001442 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001443 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001444 spin_unlock_irqrestore(lock, flags);
1445
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001446 if (q)
1447 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448}
1449
Oleg Nesterovac5c2152008-04-30 00:52:57 -07001450int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001451{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001452 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001453 struct sigpending *pending;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001454 unsigned long flags;
1455 int ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001456
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001457 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001458
1459 ret = -1;
1460 if (!likely(lock_task_sighand(t, &flags)))
1461 goto ret;
1462
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001463 ret = 1; /* the signal is ignored */
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001464 if (!prepare_signal(sig, t, 0))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001465 goto out;
1466
1467 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001468 if (unlikely(!list_empty(&q->list))) {
1469 /*
1470 * If an SI_TIMER entry is already queue just increment
1471 * the overrun count.
1472 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001473 BUG_ON(q->info.si_code != SI_TIMER);
1474 q->info.si_overrun++;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001475 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001476 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001477 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001478
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001479 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001480 pending = group ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001481 list_add_tail(&q->list, &pending->list);
1482 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001483 complete_signal(sig, t, group);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001484out:
1485 unlock_task_sighand(t, &flags);
1486ret:
1487 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001488}
1489
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 * Let a parent know about the death of a child.
1492 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001493 *
1494 * Returns -1 if our parent ignored us and so we've switched to
1495 * self-reaping, or else @sig.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 */
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001497int do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498{
1499 struct siginfo info;
1500 unsigned long flags;
1501 struct sighand_struct *psig;
Roland McGrath1b046242008-08-19 20:37:07 -07001502 int ret = sig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
1504 BUG_ON(sig == -1);
1505
1506 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001507 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Oleg Nesterov5cb11442009-06-17 16:27:30 -07001509 BUG_ON(!task_ptrace(tsk) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1511
1512 info.si_signo = sig;
1513 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001514 /*
1515 * we are under tasklist_lock here so our parent is tied to
1516 * us and cannot exit and release its namespace.
1517 *
1518 * the only it can is to switch its nsproxy with sys_unshare,
1519 * bu uncharing pid namespaces is not allowed, so we'll always
1520 * see relevant namespace
1521 *
1522 * write_lock() currently calls preempt_disable() which is the
1523 * same as rcu_read_lock(), but according to Oleg, this is not
1524 * correct to rely on this
1525 */
1526 rcu_read_lock();
1527 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
David Howellsc69e8d92008-11-14 10:39:19 +11001528 info.si_uid = __task_cred(tsk)->uid;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001529 rcu_read_unlock();
1530
Peter Zijlstra32bd6712009-02-05 12:24:15 +01001531 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1532 tsk->signal->utime));
1533 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1534 tsk->signal->stime));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
1536 info.si_status = tsk->exit_code & 0x7f;
1537 if (tsk->exit_code & 0x80)
1538 info.si_code = CLD_DUMPED;
1539 else if (tsk->exit_code & 0x7f)
1540 info.si_code = CLD_KILLED;
1541 else {
1542 info.si_code = CLD_EXITED;
1543 info.si_status = tsk->exit_code >> 8;
1544 }
1545
1546 psig = tsk->parent->sighand;
1547 spin_lock_irqsave(&psig->siglock, flags);
Oleg Nesterov5cb11442009-06-17 16:27:30 -07001548 if (!task_ptrace(tsk) && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1550 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1551 /*
1552 * We are exiting and our parent doesn't care. POSIX.1
1553 * defines special semantics for setting SIGCHLD to SIG_IGN
1554 * or setting the SA_NOCLDWAIT flag: we should be reaped
1555 * automatically and not left for our parent's wait4 call.
1556 * Rather than having the parent do it as a magic kind of
1557 * signal handler, we just set this to tell do_exit that we
1558 * can be cleaned up without becoming a zombie. Note that
1559 * we still call __wake_up_parent in this case, because a
1560 * blocked sys_wait4 might now return -ECHILD.
1561 *
1562 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1563 * is implementation-defined: we do (if you don't want
1564 * it, just use SIG_IGN instead).
1565 */
Roland McGrath1b046242008-08-19 20:37:07 -07001566 ret = tsk->exit_signal = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001568 sig = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 }
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001570 if (valid_signal(sig) && sig > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 __group_send_sig_info(sig, &info, tsk->parent);
1572 __wake_up_parent(tsk, tsk->parent);
1573 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001574
Roland McGrath1b046242008-08-19 20:37:07 -07001575 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576}
1577
Oleg Nesterova1d5e212006-03-28 16:11:29 -08001578static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579{
1580 struct siginfo info;
1581 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001582 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 struct sighand_struct *sighand;
1584
Oleg Nesterov5cb11442009-06-17 16:27:30 -07001585 if (task_ptrace(tsk))
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001586 parent = tsk->parent;
1587 else {
1588 tsk = tsk->group_leader;
1589 parent = tsk->real_parent;
1590 }
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 info.si_signo = SIGCHLD;
1593 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001594 /*
1595 * see comment in do_notify_parent() abot the following 3 lines
1596 */
1597 rcu_read_lock();
Oleg Nesterovd9265662009-06-17 16:27:35 -07001598 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
David Howellsc69e8d92008-11-14 10:39:19 +11001599 info.si_uid = __task_cred(tsk)->uid;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001600 rcu_read_unlock();
1601
Michael Kerriskd8878ba2008-07-25 01:47:32 -07001602 info.si_utime = cputime_to_clock_t(tsk->utime);
1603 info.si_stime = cputime_to_clock_t(tsk->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
1605 info.si_code = why;
1606 switch (why) {
1607 case CLD_CONTINUED:
1608 info.si_status = SIGCONT;
1609 break;
1610 case CLD_STOPPED:
1611 info.si_status = tsk->signal->group_exit_code & 0x7f;
1612 break;
1613 case CLD_TRAPPED:
1614 info.si_status = tsk->exit_code & 0x7f;
1615 break;
1616 default:
1617 BUG();
1618 }
1619
1620 sighand = parent->sighand;
1621 spin_lock_irqsave(&sighand->siglock, flags);
1622 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1623 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1624 __group_send_sig_info(SIGCHLD, &info, parent);
1625 /*
1626 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1627 */
1628 __wake_up_parent(tsk, parent);
1629 spin_unlock_irqrestore(&sighand->siglock, flags);
1630}
1631
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001632static inline int may_ptrace_stop(void)
1633{
Oleg Nesterov5cb11442009-06-17 16:27:30 -07001634 if (!likely(task_ptrace(current)))
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001635 return 0;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001636 /*
1637 * Are we in the middle of do_coredump?
1638 * If so and our tracer is also part of the coredump stopping
1639 * is a deadlock situation, and pointless because our tracer
1640 * is dead so don't allow us to stop.
1641 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001642 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001643 * is safe to enter schedule().
1644 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001645 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001646 unlikely(current->mm == current->parent->mm))
1647 return 0;
1648
1649 return 1;
1650}
1651
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652/*
Roland McGrath1a669c22008-02-06 01:37:37 -08001653 * Return nonzero if there is a SIGKILL that should be waking us up.
1654 * Called with the siglock held.
1655 */
1656static int sigkill_pending(struct task_struct *tsk)
1657{
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001658 return sigismember(&tsk->pending.signal, SIGKILL) ||
1659 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08001660}
1661
1662/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 * This must be called with current->sighand->siglock held.
1664 *
1665 * This should be the path for all ptrace stops.
1666 * We always set current->last_siginfo while stopped here.
1667 * That makes it a way to test a stopped process for
1668 * being ptrace-stopped vs being job-control-stopped.
1669 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001670 * If we actually decide not to stop at all because the tracer
1671 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001673static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001674 __releases(&current->sighand->siglock)
1675 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676{
Roland McGrath1a669c22008-02-06 01:37:37 -08001677 if (arch_ptrace_stop_needed(exit_code, info)) {
1678 /*
1679 * The arch code has something special to do before a
1680 * ptrace stop. This is allowed to block, e.g. for faults
1681 * on user stack pages. We can't keep the siglock while
1682 * calling arch_ptrace_stop, so we must release it now.
1683 * To preserve proper semantics, we must do this before
1684 * any signal bookkeeping like checking group_stop_count.
1685 * Meanwhile, a SIGKILL could come in before we retake the
1686 * siglock. That must prevent us from sleeping in TASK_TRACED.
1687 * So after regaining the lock, we must check for SIGKILL.
1688 */
1689 spin_unlock_irq(&current->sighand->siglock);
1690 arch_ptrace_stop(exit_code, info);
1691 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001692 if (sigkill_pending(current))
1693 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08001694 }
1695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 /*
Tejun Heo0ae8ce12011-03-23 10:37:00 +01001697 * If @why is CLD_STOPPED, we're trapping to participate in a group
1698 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1699 * while siglock was released for the arch hook, PENDING could be
1700 * clear now. We act as if SIGCONT is received after TASK_TRACED
1701 * is entered - ignore it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 */
Tejun Heo0ae8ce12011-03-23 10:37:00 +01001703 if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
Tejun Heoe5c19022011-03-23 10:37:00 +01001704 task_participate_group_stop(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
1706 current->last_siginfo = info;
1707 current->exit_code = exit_code;
1708
1709 /* Let the debugger run. */
Oleg Nesterovd9ae90a2008-02-06 01:36:13 -08001710 __set_current_state(TASK_TRACED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 spin_unlock_irq(&current->sighand->siglock);
1712 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001713 if (may_ptrace_stop()) {
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001714 do_notify_parent_cldstop(current, why);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001715 /*
1716 * Don't want to allow preemption here, because
1717 * sys_ptrace() needs this task to be inactive.
1718 *
1719 * XXX: implement read_unlock_no_resched().
1720 */
1721 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001723 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 schedule();
1725 } else {
1726 /*
1727 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001728 * Don't drop the lock yet, another tracer may come.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001730 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08001731 if (clear_code)
1732 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001733 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 }
1735
1736 /*
Roland McGrath13b1c3d2008-03-03 20:22:05 -08001737 * While in TASK_TRACED, we were considered "frozen enough".
1738 * Now that we woke up, it's crucial if we're supposed to be
1739 * frozen that we freeze now before running anything substantial.
1740 */
1741 try_to_freeze();
1742
1743 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 * We are back. Now reacquire the siglock before touching
1745 * last_siginfo, so that we are sure to have synchronized with
1746 * any signal-sending on another CPU that wants to examine it.
1747 */
1748 spin_lock_irq(&current->sighand->siglock);
1749 current->last_siginfo = NULL;
1750
1751 /*
1752 * Queued signals ignored us while we were stopped for tracing.
1753 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07001754 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07001756 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757}
1758
1759void ptrace_notify(int exit_code)
1760{
1761 siginfo_t info;
1762
1763 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1764
1765 memset(&info, 0, sizeof info);
1766 info.si_signo = SIGTRAP;
1767 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001768 info.si_pid = task_pid_vnr(current);
David Howells76aac0e2008-11-14 10:39:12 +11001769 info.si_uid = current_uid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
1771 /* Let the debugger run. */
1772 spin_lock_irq(&current->sighand->siglock);
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001773 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 spin_unlock_irq(&current->sighand->siglock);
1775}
1776
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777/*
1778 * This performs the stopping for SIGSTOP and other stop signals.
1779 * We have to stop all threads in the thread group.
1780 * Returns nonzero if we've actually stopped and released the siglock.
1781 * Returns zero if we didn't stop and still hold the siglock.
1782 */
Oleg Nesterova122b342006-03-28 16:11:22 -08001783static int do_signal_stop(int signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784{
1785 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Tejun Heo39efa3e2011-03-23 10:37:00 +01001787 if (!(current->group_stop & GROUP_STOP_PENDING)) {
1788 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08001789 struct task_struct *t;
1790
Oleg Nesterov2b201a92008-07-25 01:47:31 -07001791 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07001792 unlikely(signal_group_exit(sig)))
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08001793 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 * There is no group stop already in progress.
Oleg Nesterova122b342006-03-28 16:11:22 -08001796 * We must initiate one now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 */
Oleg Nesterova122b342006-03-28 16:11:22 -08001798 sig->group_exit_code = signr;
1799
Tejun Heoe5c19022011-03-23 10:37:00 +01001800 current->group_stop = gstop;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001801 sig->group_stop_count = 1;
Oleg Nesterova122b342006-03-28 16:11:22 -08001802 for (t = next_thread(current); t != current; t = next_thread(t))
1803 /*
1804 * Setting state to TASK_STOPPED for a group
1805 * stop is always done with the siglock held,
1806 * so this check has no races.
1807 */
Tejun Heo39efa3e2011-03-23 10:37:00 +01001808 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
Tejun Heoe5c19022011-03-23 10:37:00 +01001809 t->group_stop = gstop;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001810 sig->group_stop_count++;
Oleg Nesterova122b342006-03-28 16:11:22 -08001811 signal_wake_up(t, 0);
Tejun Heoe5c19022011-03-23 10:37:00 +01001812 } else
1813 task_clear_group_stop_pending(t);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001814 }
Tejun Heoedf2ed12011-03-23 10:37:00 +01001815
1816 current->exit_code = sig->group_exit_code;
1817 __set_current_state(TASK_STOPPED);
1818
Tejun Heo5224fa32011-03-23 10:37:00 +01001819 if (likely(!task_ptrace(current))) {
1820 int notify = 0;
1821
1822 /*
1823 * If there are no other threads in the group, or if there
1824 * is a group stop in progress and we are the last to stop,
1825 * report to the parent.
1826 */
1827 if (task_participate_group_stop(current))
1828 notify = CLD_STOPPED;
1829
1830 spin_unlock_irq(&current->sighand->siglock);
1831
1832 if (notify) {
1833 read_lock(&tasklist_lock);
1834 do_notify_parent_cldstop(current, notify);
1835 read_unlock(&tasklist_lock);
1836 }
1837
1838 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1839 schedule();
1840
1841 spin_lock_irq(&current->sighand->siglock);
1842 } else
1843 ptrace_stop(current->exit_code, CLD_STOPPED, 0, NULL);
1844
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001845 spin_unlock_irq(&current->sighand->siglock);
1846
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001847 tracehook_finish_jctl();
1848 current->exit_code = 0;
1849
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 return 1;
1851}
1852
Roland McGrath18c98b62008-04-17 18:44:38 -07001853static int ptrace_signal(int signr, siginfo_t *info,
1854 struct pt_regs *regs, void *cookie)
1855{
Oleg Nesterov5cb11442009-06-17 16:27:30 -07001856 if (!task_ptrace(current))
Roland McGrath18c98b62008-04-17 18:44:38 -07001857 return signr;
1858
1859 ptrace_signal_deliver(regs, cookie);
1860
1861 /* Let the debugger run. */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001862 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07001863
1864 /* We're back. Did the debugger cancel the sig? */
1865 signr = current->exit_code;
1866 if (signr == 0)
1867 return signr;
1868
1869 current->exit_code = 0;
1870
1871 /* Update the siginfo structure if the signal has
1872 changed. If the debugger wanted something
1873 specific in the siginfo structure then it should
1874 have updated *info via PTRACE_SETSIGINFO. */
1875 if (signr != info->si_signo) {
1876 info->si_signo = signr;
1877 info->si_errno = 0;
1878 info->si_code = SI_USER;
1879 info->si_pid = task_pid_vnr(current->parent);
David Howellsc69e8d92008-11-14 10:39:19 +11001880 info->si_uid = task_uid(current->parent);
Roland McGrath18c98b62008-04-17 18:44:38 -07001881 }
1882
1883 /* If the (new) signal is now blocked, requeue it. */
1884 if (sigismember(&current->blocked, signr)) {
1885 specific_send_sig_info(signr, info, current);
1886 signr = 0;
1887 }
1888
1889 return signr;
1890}
1891
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1893 struct pt_regs *regs, void *cookie)
1894{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07001895 struct sighand_struct *sighand = current->sighand;
1896 struct signal_struct *signal = current->signal;
1897 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
Roland McGrath13b1c3d2008-03-03 20:22:05 -08001899relock:
1900 /*
1901 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1902 * While in TASK_STOPPED, we were considered "frozen enough".
1903 * Now that we woke up, it's crucial if we're supposed to be
1904 * frozen that we freeze now before running anything substantial.
1905 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08001906 try_to_freeze();
1907
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07001908 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07001909 /*
1910 * Every stopped thread goes here after wakeup. Check to see if
1911 * we should notify the parent, prepare_signal(SIGCONT) encodes
1912 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1913 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07001914 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01001915 int why;
1916
1917 if (signal->flags & SIGNAL_CLD_CONTINUED)
1918 why = CLD_CONTINUED;
1919 else
1920 why = CLD_STOPPED;
1921
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07001922 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001923
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07001924 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07001925
Tejun Heoedf2ed12011-03-23 10:37:00 +01001926 read_lock(&tasklist_lock);
1927 do_notify_parent_cldstop(current->group_leader, why);
1928 read_unlock(&tasklist_lock);
Oleg Nesterove4420552008-04-30 00:52:44 -07001929 goto relock;
1930 }
1931
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 for (;;) {
1933 struct k_sigaction *ka;
Roland McGrath7bcf6a22008-07-25 19:45:53 -07001934 /*
1935 * Tracing can induce an artifical signal and choose sigaction.
1936 * The return value in @signr determines the default action,
1937 * but @info->si_signo is the signal number we will report.
1938 */
1939 signr = tracehook_get_signal(current, regs, info, return_ka);
1940 if (unlikely(signr < 0))
1941 goto relock;
1942 if (unlikely(signr != 0))
1943 ka = return_ka;
1944 else {
Tejun Heo39efa3e2011-03-23 10:37:00 +01001945 if (unlikely(current->group_stop &
1946 GROUP_STOP_PENDING) && do_signal_stop(0))
Oleg Nesterov1be53962009-12-15 16:47:26 -08001947 goto relock;
1948
Roland McGrath7bcf6a22008-07-25 19:45:53 -07001949 signr = dequeue_signal(current, &current->blocked,
1950 info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
Roland McGrath18c98b62008-04-17 18:44:38 -07001952 if (!signr)
Roland McGrath7bcf6a22008-07-25 19:45:53 -07001953 break; /* will return 0 */
1954
1955 if (signr != SIGKILL) {
1956 signr = ptrace_signal(signr, info,
1957 regs, cookie);
1958 if (!signr)
1959 continue;
1960 }
1961
1962 ka = &sighand->action[signr-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 }
1964
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05001965 /* Trace actually delivered signals. */
1966 trace_signal_deliver(signr, info, ka);
1967
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1969 continue;
1970 if (ka->sa.sa_handler != SIG_DFL) {
1971 /* Run the handler. */
1972 *return_ka = *ka;
1973
1974 if (ka->sa.sa_flags & SA_ONESHOT)
1975 ka->sa.sa_handler = SIG_DFL;
1976
1977 break; /* will return non-zero "signr" value */
1978 }
1979
1980 /*
1981 * Now we are doing the default action for this signal.
1982 */
1983 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1984 continue;
1985
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08001986 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07001987 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07001988 * Container-init gets no signals it doesn't want from same
1989 * container.
1990 *
1991 * Note that if global/container-init sees a sig_kernel_only()
1992 * signal here, the signal must have been generated internally
1993 * or must have come from an ancestor namespace. In either
1994 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08001995 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07001996 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07001997 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 continue;
1999
2000 if (sig_kernel_stop(signr)) {
2001 /*
2002 * The default action is to stop all threads in
2003 * the thread group. The job control signals
2004 * do nothing in an orphaned pgrp, but SIGSTOP
2005 * always works. Note that siglock needs to be
2006 * dropped during the call to is_orphaned_pgrp()
2007 * because of lock ordering with tasklist_lock.
2008 * This allows an intervening SIGCONT to be posted.
2009 * We need to check for that and bail out if necessary.
2010 */
2011 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002012 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013
2014 /* signals can be posted during this window */
2015
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002016 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 goto relock;
2018
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002019 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 }
2021
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002022 if (likely(do_signal_stop(info->si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 /* It released the siglock. */
2024 goto relock;
2025 }
2026
2027 /*
2028 * We didn't actually stop, due to a race
2029 * with SIGCONT or something like that.
2030 */
2031 continue;
2032 }
2033
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002034 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036 /*
2037 * Anything else is fatal, maybe with a core dump.
2038 */
2039 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002040
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002042 if (print_fatal_signals)
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002043 print_fatal_signal(regs, info->si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 /*
2045 * If it was able to dump core, this kills all
2046 * other threads in the group and synchronizes with
2047 * their demise. If we lost the race with another
2048 * thread getting here, it set group_exit_code
2049 * first and our do_group_exit call below will use
2050 * that value and ignore the one we pass it.
2051 */
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002052 do_coredump(info->si_signo, info->si_signo, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 }
2054
2055 /*
2056 * Death signals, no core dump.
2057 */
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002058 do_group_exit(info->si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 /* NOTREACHED */
2060 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002061 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 return signr;
2063}
2064
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002065void exit_signals(struct task_struct *tsk)
2066{
2067 int group_stop = 0;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002068 struct task_struct *t;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002069
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002070 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2071 tsk->flags |= PF_EXITING;
2072 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002073 }
2074
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002075 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002076 /*
2077 * From now this task is not visible for group-wide signals,
2078 * see wants_signal(), do_signal_stop().
2079 */
2080 tsk->flags |= PF_EXITING;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002081 if (!signal_pending(tsk))
2082 goto out;
2083
2084 /* It could be that __group_complete_signal() choose us to
2085 * notify about group-wide signal. Another thread should be
2086 * woken now to take the signal since we will not.
2087 */
2088 for (t = tsk; (t = next_thread(t)) != tsk; )
2089 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2090 recalc_sigpending_and_wake(t);
2091
Tejun Heo39efa3e2011-03-23 10:37:00 +01002092 if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
Tejun Heoe5c19022011-03-23 10:37:00 +01002093 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002094 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002095out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002096 spin_unlock_irq(&tsk->sighand->siglock);
2097
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002098 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002099 read_lock(&tasklist_lock);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002100 do_notify_parent_cldstop(tsk, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002101 read_unlock(&tasklist_lock);
2102 }
2103}
2104
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105EXPORT_SYMBOL(recalc_sigpending);
2106EXPORT_SYMBOL_GPL(dequeue_signal);
2107EXPORT_SYMBOL(flush_signals);
2108EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109EXPORT_SYMBOL(send_sig);
2110EXPORT_SYMBOL(send_sig_info);
2111EXPORT_SYMBOL(sigprocmask);
2112EXPORT_SYMBOL(block_all_signals);
2113EXPORT_SYMBOL(unblock_all_signals);
2114
2115
2116/*
2117 * System call entry points.
2118 */
2119
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002120SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121{
2122 struct restart_block *restart = &current_thread_info()->restart_block;
2123 return restart->fn(restart);
2124}
2125
2126long do_no_restart_syscall(struct restart_block *param)
2127{
2128 return -EINTR;
2129}
2130
2131/*
2132 * We don't need to get the kernel lock - this is all local to this
2133 * particular thread.. (and that's good, because this is _heavily_
2134 * used by various programs)
2135 */
2136
2137/*
2138 * This is also useful for kernel threads that want to temporarily
2139 * (or permanently) block certain signals.
2140 *
2141 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2142 * interface happily blocks "unblockable" signals like SIGKILL
2143 * and friends.
2144 */
2145int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2146{
2147 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
2149 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08002150 if (oldset)
2151 *oldset = current->blocked;
2152
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 error = 0;
2154 switch (how) {
2155 case SIG_BLOCK:
2156 sigorsets(&current->blocked, &current->blocked, set);
2157 break;
2158 case SIG_UNBLOCK:
2159 signandsets(&current->blocked, &current->blocked, set);
2160 break;
2161 case SIG_SETMASK:
2162 current->blocked = *set;
2163 break;
2164 default:
2165 error = -EINVAL;
2166 }
2167 recalc_sigpending();
2168 spin_unlock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08002169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 return error;
2171}
2172
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002173SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2174 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175{
2176 int error = -EINVAL;
2177 sigset_t old_set, new_set;
2178
2179 /* XXX: Don't preclude handling different sized sigset_t's. */
2180 if (sigsetsize != sizeof(sigset_t))
2181 goto out;
2182
2183 if (set) {
2184 error = -EFAULT;
2185 if (copy_from_user(&new_set, set, sizeof(*set)))
2186 goto out;
2187 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2188
2189 error = sigprocmask(how, &new_set, &old_set);
2190 if (error)
2191 goto out;
2192 if (oset)
2193 goto set_old;
2194 } else if (oset) {
2195 spin_lock_irq(&current->sighand->siglock);
2196 old_set = current->blocked;
2197 spin_unlock_irq(&current->sighand->siglock);
2198
2199 set_old:
2200 error = -EFAULT;
2201 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2202 goto out;
2203 }
2204 error = 0;
2205out:
2206 return error;
2207}
2208
2209long do_sigpending(void __user *set, unsigned long sigsetsize)
2210{
2211 long error = -EINVAL;
2212 sigset_t pending;
2213
2214 if (sigsetsize > sizeof(sigset_t))
2215 goto out;
2216
2217 spin_lock_irq(&current->sighand->siglock);
2218 sigorsets(&pending, &current->pending.signal,
2219 &current->signal->shared_pending.signal);
2220 spin_unlock_irq(&current->sighand->siglock);
2221
2222 /* Outside the lock because only this thread touches it. */
2223 sigandsets(&pending, &current->blocked, &pending);
2224
2225 error = -EFAULT;
2226 if (!copy_to_user(set, &pending, sigsetsize))
2227 error = 0;
2228
2229out:
2230 return error;
2231}
2232
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002233SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234{
2235 return do_sigpending(set, sigsetsize);
2236}
2237
2238#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2239
2240int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2241{
2242 int err;
2243
2244 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2245 return -EFAULT;
2246 if (from->si_code < 0)
2247 return __copy_to_user(to, from, sizeof(siginfo_t))
2248 ? -EFAULT : 0;
2249 /*
2250 * If you change siginfo_t structure, please be sure
2251 * this code is fixed accordingly.
Davide Libenzifba2afa2007-05-10 22:23:13 -07002252 * Please remember to update the signalfd_copyinfo() function
2253 * inside fs/signalfd.c too, in case siginfo_t changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 * It should never copy any pad contained in the structure
2255 * to avoid security leaks, but must copy the generic
2256 * 3 ints plus the relevant union member.
2257 */
2258 err = __put_user(from->si_signo, &to->si_signo);
2259 err |= __put_user(from->si_errno, &to->si_errno);
2260 err |= __put_user((short)from->si_code, &to->si_code);
2261 switch (from->si_code & __SI_MASK) {
2262 case __SI_KILL:
2263 err |= __put_user(from->si_pid, &to->si_pid);
2264 err |= __put_user(from->si_uid, &to->si_uid);
2265 break;
2266 case __SI_TIMER:
2267 err |= __put_user(from->si_tid, &to->si_tid);
2268 err |= __put_user(from->si_overrun, &to->si_overrun);
2269 err |= __put_user(from->si_ptr, &to->si_ptr);
2270 break;
2271 case __SI_POLL:
2272 err |= __put_user(from->si_band, &to->si_band);
2273 err |= __put_user(from->si_fd, &to->si_fd);
2274 break;
2275 case __SI_FAULT:
2276 err |= __put_user(from->si_addr, &to->si_addr);
2277#ifdef __ARCH_SI_TRAPNO
2278 err |= __put_user(from->si_trapno, &to->si_trapno);
2279#endif
Andi Kleena337fda2010-09-27 20:32:19 +02002280#ifdef BUS_MCEERR_AO
2281 /*
2282 * Other callers might not initialize the si_lsb field,
2283 * so check explicitely for the right codes here.
2284 */
2285 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2286 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2287#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 break;
2289 case __SI_CHLD:
2290 err |= __put_user(from->si_pid, &to->si_pid);
2291 err |= __put_user(from->si_uid, &to->si_uid);
2292 err |= __put_user(from->si_status, &to->si_status);
2293 err |= __put_user(from->si_utime, &to->si_utime);
2294 err |= __put_user(from->si_stime, &to->si_stime);
2295 break;
2296 case __SI_RT: /* This is not generated by the kernel as of now. */
2297 case __SI_MESGQ: /* But this is */
2298 err |= __put_user(from->si_pid, &to->si_pid);
2299 err |= __put_user(from->si_uid, &to->si_uid);
2300 err |= __put_user(from->si_ptr, &to->si_ptr);
2301 break;
2302 default: /* this is just in case for now ... */
2303 err |= __put_user(from->si_pid, &to->si_pid);
2304 err |= __put_user(from->si_uid, &to->si_uid);
2305 break;
2306 }
2307 return err;
2308}
2309
2310#endif
2311
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002312SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2313 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2314 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315{
2316 int ret, sig;
2317 sigset_t these;
2318 struct timespec ts;
2319 siginfo_t info;
2320 long timeout = 0;
2321
2322 /* XXX: Don't preclude handling different sized sigset_t's. */
2323 if (sigsetsize != sizeof(sigset_t))
2324 return -EINVAL;
2325
2326 if (copy_from_user(&these, uthese, sizeof(these)))
2327 return -EFAULT;
2328
2329 /*
2330 * Invert the set of allowed signals to get those we
2331 * want to block.
2332 */
2333 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2334 signotset(&these);
2335
2336 if (uts) {
2337 if (copy_from_user(&ts, uts, sizeof(ts)))
2338 return -EFAULT;
2339 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2340 || ts.tv_sec < 0)
2341 return -EINVAL;
2342 }
2343
2344 spin_lock_irq(&current->sighand->siglock);
2345 sig = dequeue_signal(current, &these, &info);
2346 if (!sig) {
2347 timeout = MAX_SCHEDULE_TIMEOUT;
2348 if (uts)
2349 timeout = (timespec_to_jiffies(&ts)
2350 + (ts.tv_sec || ts.tv_nsec));
2351
2352 if (timeout) {
2353 /* None ready -- temporarily unblock those we're
2354 * interested while we are sleeping in so that we'll
2355 * be awakened when they arrive. */
2356 current->real_blocked = current->blocked;
2357 sigandsets(&current->blocked, &current->blocked, &these);
2358 recalc_sigpending();
2359 spin_unlock_irq(&current->sighand->siglock);
2360
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07002361 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 spin_lock_irq(&current->sighand->siglock);
2364 sig = dequeue_signal(current, &these, &info);
2365 current->blocked = current->real_blocked;
2366 siginitset(&current->real_blocked, 0);
2367 recalc_sigpending();
2368 }
2369 }
2370 spin_unlock_irq(&current->sighand->siglock);
2371
2372 if (sig) {
2373 ret = sig;
2374 if (uinfo) {
2375 if (copy_siginfo_to_user(uinfo, &info))
2376 ret = -EFAULT;
2377 }
2378 } else {
2379 ret = -EAGAIN;
2380 if (timeout)
2381 ret = -EINTR;
2382 }
2383
2384 return ret;
2385}
2386
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002387SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388{
2389 struct siginfo info;
2390
2391 info.si_signo = sig;
2392 info.si_errno = 0;
2393 info.si_code = SI_USER;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002394 info.si_pid = task_tgid_vnr(current);
David Howells76aac0e2008-11-14 10:39:12 +11002395 info.si_uid = current_uid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
2397 return kill_something_info(sig, &info, pid);
2398}
2399
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002400static int
2401do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002402{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002403 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002404 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002405
Oleg Nesterov3547ff32008-04-30 00:52:51 -07002406 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07002407 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002408 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002409 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002410 /*
2411 * The null signal is a permissions and process existence
2412 * probe. No signal is actually delivered.
2413 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07002414 if (!error && sig) {
2415 error = do_send_sig_info(sig, info, p, false);
2416 /*
2417 * If lock_task_sighand() failed we pretend the task
2418 * dies after receiving the signal. The window is tiny,
2419 * and the signal is private anyway.
2420 */
2421 if (unlikely(error == -ESRCH))
2422 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002423 }
2424 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07002425 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002426
2427 return error;
2428}
2429
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002430static int do_tkill(pid_t tgid, pid_t pid, int sig)
2431{
2432 struct siginfo info;
2433
2434 info.si_signo = sig;
2435 info.si_errno = 0;
2436 info.si_code = SI_TKILL;
2437 info.si_pid = task_tgid_vnr(current);
2438 info.si_uid = current_uid();
2439
2440 return do_send_specific(tgid, pid, sig, &info);
2441}
2442
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443/**
2444 * sys_tgkill - send signal to one specific thread
2445 * @tgid: the thread group ID of the thread
2446 * @pid: the PID of the thread
2447 * @sig: signal to be sent
2448 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08002449 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 * exists but it's not belonging to the target process anymore. This
2451 * method solves the problem of threads exiting and PIDs getting reused.
2452 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002453SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 /* This is only valid for single tasks */
2456 if (pid <= 0 || tgid <= 0)
2457 return -EINVAL;
2458
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002459 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460}
2461
2462/*
2463 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2464 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002465SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 /* This is only valid for single tasks */
2468 if (pid <= 0)
2469 return -EINVAL;
2470
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002471 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472}
2473
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002474SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2475 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476{
2477 siginfo_t info;
2478
2479 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2480 return -EFAULT;
2481
2482 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07002483 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2484 */
2485 if (info.si_code != SI_QUEUE) {
2486 /* We used to allow any < 0 si_code */
2487 WARN_ON_ONCE(info.si_code < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 return -EPERM;
Julien Tinnesda485242011-03-18 15:05:21 -07002489 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 info.si_signo = sig;
2491
2492 /* POSIX.1b doesn't mention process groups. */
2493 return kill_proc_info(sig, &info, pid);
2494}
2495
Thomas Gleixner62ab4502009-04-04 21:01:06 +00002496long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2497{
2498 /* This is only valid for single tasks */
2499 if (pid <= 0 || tgid <= 0)
2500 return -EINVAL;
2501
2502 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07002503 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2504 */
2505 if (info->si_code != SI_QUEUE) {
2506 /* We used to allow any < 0 si_code */
2507 WARN_ON_ONCE(info->si_code < 0);
Thomas Gleixner62ab4502009-04-04 21:01:06 +00002508 return -EPERM;
Julien Tinnesda485242011-03-18 15:05:21 -07002509 }
Thomas Gleixner62ab4502009-04-04 21:01:06 +00002510 info->si_signo = sig;
2511
2512 return do_send_specific(tgid, pid, sig, info);
2513}
2514
2515SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2516 siginfo_t __user *, uinfo)
2517{
2518 siginfo_t info;
2519
2520 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2521 return -EFAULT;
2522
2523 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2524}
2525
Oleg Nesterov88531f72006-03-28 16:11:24 -08002526int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -07002528 struct task_struct *t = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 struct k_sigaction *k;
George Anzinger71fabd52006-01-08 01:02:48 -08002530 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531
Jesper Juhl7ed20e12005-05-01 08:59:14 -07002532 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 return -EINVAL;
2534
Pavel Emelyanov93585ee2008-04-30 00:52:39 -07002535 k = &t->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
2537 spin_lock_irq(&current->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 if (oact)
2539 *oact = *k;
2540
2541 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03002542 sigdelsetmask(&act->sa.sa_mask,
2543 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08002544 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 /*
2546 * POSIX 3.3.1.3:
2547 * "Setting a signal action to SIG_IGN for a signal that is
2548 * pending shall cause the pending signal to be discarded,
2549 * whether or not it is blocked."
2550 *
2551 * "Setting a signal action to SIG_DFL for a signal that is
2552 * pending and whose default action is to ignore the signal
2553 * (for example, SIGCHLD), shall cause the pending signal to
2554 * be discarded, whether or not it is blocked"
2555 */
Roland McGrath35de2542008-07-25 19:45:51 -07002556 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
George Anzinger71fabd52006-01-08 01:02:48 -08002557 sigemptyset(&mask);
2558 sigaddset(&mask, sig);
2559 rm_from_queue_full(&mask, &t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 do {
George Anzinger71fabd52006-01-08 01:02:48 -08002561 rm_from_queue_full(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 t = next_thread(t);
2563 } while (t != current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 }
2566
2567 spin_unlock_irq(&current->sighand->siglock);
2568 return 0;
2569}
2570
2571int
2572do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2573{
2574 stack_t oss;
2575 int error;
2576
Linus Torvalds0083fc22009-08-01 10:34:56 -07002577 oss.ss_sp = (void __user *) current->sas_ss_sp;
2578 oss.ss_size = current->sas_ss_size;
2579 oss.ss_flags = sas_ss_flags(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580
2581 if (uss) {
2582 void __user *ss_sp;
2583 size_t ss_size;
2584 int ss_flags;
2585
2586 error = -EFAULT;
Linus Torvalds0dd84862009-08-01 11:18:56 -07002587 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2588 goto out;
2589 error = __get_user(ss_sp, &uss->ss_sp) |
2590 __get_user(ss_flags, &uss->ss_flags) |
2591 __get_user(ss_size, &uss->ss_size);
2592 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 goto out;
2594
2595 error = -EPERM;
2596 if (on_sig_stack(sp))
2597 goto out;
2598
2599 error = -EINVAL;
2600 /*
2601 *
2602 * Note - this code used to test ss_flags incorrectly
2603 * old code may have been written using ss_flags==0
2604 * to mean ss_flags==SS_ONSTACK (as this was the only
2605 * way that worked) - this fix preserves that older
2606 * mechanism
2607 */
2608 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2609 goto out;
2610
2611 if (ss_flags == SS_DISABLE) {
2612 ss_size = 0;
2613 ss_sp = NULL;
2614 } else {
2615 error = -ENOMEM;
2616 if (ss_size < MINSIGSTKSZ)
2617 goto out;
2618 }
2619
2620 current->sas_ss_sp = (unsigned long) ss_sp;
2621 current->sas_ss_size = ss_size;
2622 }
2623
Linus Torvalds0083fc22009-08-01 10:34:56 -07002624 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 if (uoss) {
2626 error = -EFAULT;
Linus Torvalds0083fc22009-08-01 10:34:56 -07002627 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 goto out;
Linus Torvalds0083fc22009-08-01 10:34:56 -07002629 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2630 __put_user(oss.ss_size, &uoss->ss_size) |
2631 __put_user(oss.ss_flags, &uoss->ss_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 }
2633
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634out:
2635 return error;
2636}
2637
2638#ifdef __ARCH_WANT_SYS_SIGPENDING
2639
Heiko Carstensb290ebe2009-01-14 14:14:06 +01002640SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641{
2642 return do_sigpending(set, sizeof(*set));
2643}
2644
2645#endif
2646
2647#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2648/* Some platforms have their own version with special arguments others
2649 support only sys_rt_sigprocmask. */
2650
Heiko Carstensb290ebe2009-01-14 14:14:06 +01002651SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2652 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653{
2654 int error;
2655 old_sigset_t old_set, new_set;
2656
2657 if (set) {
2658 error = -EFAULT;
2659 if (copy_from_user(&new_set, set, sizeof(*set)))
2660 goto out;
2661 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2662
2663 spin_lock_irq(&current->sighand->siglock);
2664 old_set = current->blocked.sig[0];
2665
2666 error = 0;
2667 switch (how) {
2668 default:
2669 error = -EINVAL;
2670 break;
2671 case SIG_BLOCK:
2672 sigaddsetmask(&current->blocked, new_set);
2673 break;
2674 case SIG_UNBLOCK:
2675 sigdelsetmask(&current->blocked, new_set);
2676 break;
2677 case SIG_SETMASK:
2678 current->blocked.sig[0] = new_set;
2679 break;
2680 }
2681
2682 recalc_sigpending();
2683 spin_unlock_irq(&current->sighand->siglock);
2684 if (error)
2685 goto out;
2686 if (oset)
2687 goto set_old;
2688 } else if (oset) {
2689 old_set = current->blocked.sig[0];
2690 set_old:
2691 error = -EFAULT;
2692 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2693 goto out;
2694 }
2695 error = 0;
2696out:
2697 return error;
2698}
2699#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2700
2701#ifdef __ARCH_WANT_SYS_RT_SIGACTION
Heiko Carstensd4e82042009-01-14 14:14:34 +01002702SYSCALL_DEFINE4(rt_sigaction, int, sig,
2703 const struct sigaction __user *, act,
2704 struct sigaction __user *, oact,
2705 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706{
2707 struct k_sigaction new_sa, old_sa;
2708 int ret = -EINVAL;
2709
2710 /* XXX: Don't preclude handling different sized sigset_t's. */
2711 if (sigsetsize != sizeof(sigset_t))
2712 goto out;
2713
2714 if (act) {
2715 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2716 return -EFAULT;
2717 }
2718
2719 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2720
2721 if (!ret && oact) {
2722 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2723 return -EFAULT;
2724 }
2725out:
2726 return ret;
2727}
2728#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2729
2730#ifdef __ARCH_WANT_SYS_SGETMASK
2731
2732/*
2733 * For backwards compatibility. Functionality superseded by sigprocmask.
2734 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002735SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736{
2737 /* SMP safe */
2738 return current->blocked.sig[0];
2739}
2740
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002741SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742{
2743 int old;
2744
2745 spin_lock_irq(&current->sighand->siglock);
2746 old = current->blocked.sig[0];
2747
2748 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2749 sigmask(SIGSTOP)));
2750 recalc_sigpending();
2751 spin_unlock_irq(&current->sighand->siglock);
2752
2753 return old;
2754}
2755#endif /* __ARCH_WANT_SGETMASK */
2756
2757#ifdef __ARCH_WANT_SYS_SIGNAL
2758/*
2759 * For backwards compatibility. Functionality superseded by sigaction.
2760 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002761SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762{
2763 struct k_sigaction new_sa, old_sa;
2764 int ret;
2765
2766 new_sa.sa.sa_handler = handler;
2767 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d72006-02-09 22:41:41 +03002768 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769
2770 ret = do_sigaction(sig, &new_sa, &old_sa);
2771
2772 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2773}
2774#endif /* __ARCH_WANT_SYS_SIGNAL */
2775
2776#ifdef __ARCH_WANT_SYS_PAUSE
2777
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002778SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779{
2780 current->state = TASK_INTERRUPTIBLE;
2781 schedule();
2782 return -ERESTARTNOHAND;
2783}
2784
2785#endif
2786
David Woodhouse150256d2006-01-18 17:43:57 -08002787#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
Heiko Carstensd4e82042009-01-14 14:14:34 +01002788SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08002789{
2790 sigset_t newset;
2791
2792 /* XXX: Don't preclude handling different sized sigset_t's. */
2793 if (sigsetsize != sizeof(sigset_t))
2794 return -EINVAL;
2795
2796 if (copy_from_user(&newset, unewset, sizeof(newset)))
2797 return -EFAULT;
2798 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2799
2800 spin_lock_irq(&current->sighand->siglock);
2801 current->saved_sigmask = current->blocked;
2802 current->blocked = newset;
2803 recalc_sigpending();
2804 spin_unlock_irq(&current->sighand->siglock);
2805
2806 current->state = TASK_INTERRUPTIBLE;
2807 schedule();
Roland McGrath4e4c22c2008-04-30 00:53:06 -07002808 set_restore_sigmask();
David Woodhouse150256d2006-01-18 17:43:57 -08002809 return -ERESTARTNOHAND;
2810}
2811#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2812
David Howellsf269fdd2006-09-27 01:50:23 -07002813__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2814{
2815 return NULL;
2816}
2817
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818void __init signals_init(void)
2819{
Christoph Lameter0a31bd52007-05-06 14:49:57 -07002820 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821}
Jason Wessel67fc4e02010-05-20 21:04:21 -05002822
2823#ifdef CONFIG_KGDB_KDB
2824#include <linux/kdb.h>
2825/*
2826 * kdb_send_sig_info - Allows kdb to send signals without exposing
2827 * signal internals. This function checks if the required locks are
2828 * available before calling the main signal code, to avoid kdb
2829 * deadlocks.
2830 */
2831void
2832kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2833{
2834 static struct task_struct *kdb_prev_t;
2835 int sig, new_t;
2836 if (!spin_trylock(&t->sighand->siglock)) {
2837 kdb_printf("Can't do kill command now.\n"
2838 "The sigmask lock is held somewhere else in "
2839 "kernel, try again later\n");
2840 return;
2841 }
2842 spin_unlock(&t->sighand->siglock);
2843 new_t = kdb_prev_t != t;
2844 kdb_prev_t = t;
2845 if (t->state != TASK_RUNNING && new_t) {
2846 kdb_printf("Process is not RUNNING, sending a signal from "
2847 "kdb risks deadlock\n"
2848 "on the run queue locks. "
2849 "The signal has _not_ been sent.\n"
2850 "Reissue the kill command if you want to risk "
2851 "the deadlock.\n");
2852 return;
2853 }
2854 sig = info->si_signo;
2855 if (send_sig_info(sig, info, t))
2856 kdb_printf("Fail to deliver Signal %d to process %d.\n",
2857 sig, t->pid);
2858 else
2859 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2860}
2861#endif /* CONFIG_KGDB_KDB */