blob: 4f7312b49b2d3815e00a2c3251995ed89ca3e8a8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
14#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070023#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070024#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090025#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070026#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080027#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080028#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080029#include <linux/pid_namespace.h>
30#include <linux/nsproxy.h>
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050031#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/param.h>
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/siginfo.h>
Al Viroe1396062006-05-25 10:19:47 -040038#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/*
41 * SLAB caches for signal bits.
42 */
43
Christoph Lametere18b8902006-12-06 20:33:20 -080044static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090046int print_fatal_signals __read_mostly;
47
Roland McGrath35de2542008-07-25 19:45:51 -070048static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070049{
Roland McGrath35de2542008-07-25 19:45:51 -070050 return t->sighand->action[sig - 1].sa.sa_handler;
51}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070052
Roland McGrath35de2542008-07-25 19:45:51 -070053static int sig_handler_ignored(void __user *handler, int sig)
54{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070055 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070056 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
58}
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -070060static int sig_task_ignored(struct task_struct *t, int sig,
61 int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
Roland McGrath35de2542008-07-25 19:45:51 -070063 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Oleg Nesterovf008faf2009-04-02 16:58:02 -070065 handler = sig_handler(t, sig);
66
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -070068 handler == SIG_DFL && !from_ancestor_ns)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070069 return 1;
70
71 return sig_handler_ignored(handler, sig);
72}
73
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -070074static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070075{
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 /*
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
79 * unblocked.
80 */
Roland McGrath325d22d2007-11-12 15:41:55 -080081 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 return 0;
83
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -070084 if (!sig_task_ignored(t, sig, from_ancestor_ns))
Roland McGrath35de2542008-07-25 19:45:51 -070085 return 0;
86
87 /*
88 * Tracers may want to know about even ignored signals.
89 */
Oleg Nesterov43918f22009-04-02 16:58:00 -070090 return !tracehook_consider_ignored_signal(t, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
93/*
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
96 */
97static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98{
99 unsigned long ready;
100 long i;
101
102 switch (_NSIG_WORDS) {
103 default:
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
106 break;
107
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
112 break;
113
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
116 break;
117
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
119 }
120 return ready != 0;
121}
122
123#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700125static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
Tejun Heo39efa3e2011-03-23 10:37:00 +0100127 if ((t->group_stop & GROUP_STOP_PENDING) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700129 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 set_tsk_thread_flag(t, TIF_SIGPENDING);
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700131 return 1;
132 }
Roland McGrathb74d0de2007-06-06 03:59:00 -0700133 /*
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
137 */
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700138 return 0;
139}
140
141/*
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
144 */
145void recalc_sigpending_and_wake(struct task_struct *t)
146{
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149}
150
151void recalc_sigpending(void)
152{
Roland McGrathb787f7b2008-07-25 19:45:55 -0700153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700156 clear_thread_flag(TIF_SIGPENDING);
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158}
159
160/* Given the mask, find the first available signal that should be serviced. */
161
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800162#define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
165
Davide Libenzifba2afa2007-05-10 22:23:13 -0700166int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 unsigned long i, *s, *m, x;
169 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 s = pending->signal.sig;
172 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800173
174 /*
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
177 */
178 x = *s &~ *m;
179 if (x) {
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
182 sig = ffz(~x) + 1;
183 return sig;
184 }
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 switch (_NSIG_WORDS) {
187 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800188 for (i = 1; i < _NSIG_WORDS; ++i) {
189 x = *++s &~ *++m;
190 if (!x)
191 continue;
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 break;
196
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800197 case 2:
198 x = s[1] &~ m[1];
199 if (!x)
200 break;
201 sig = ffz(~x) + _NSIG_BPW + 1;
202 break;
203
204 case 1:
205 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 break;
207 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 return sig;
210}
211
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900212static inline void print_dropped_signal(int sig)
213{
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216 if (!print_fatal_signals)
217 return;
218
219 if (!__ratelimit(&ratelimit_state))
220 return;
221
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
224}
225
Tejun Heoe5c19022011-03-23 10:37:00 +0100226/**
Tejun Heod79fdd62011-03-23 10:37:00 +0100227 * task_clear_group_stop_trapping - clear group stop trapping bit
228 * @task: target task
229 *
230 * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
231 * and wake up the ptracer. Note that we don't need any further locking.
232 * @task->siglock guarantees that @task->parent points to the ptracer.
233 *
234 * CONTEXT:
235 * Must be called with @task->sighand->siglock held.
236 */
237static void task_clear_group_stop_trapping(struct task_struct *task)
238{
239 if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
240 task->group_stop &= ~GROUP_STOP_TRAPPING;
241 __wake_up_sync(&task->parent->signal->wait_chldexit,
242 TASK_UNINTERRUPTIBLE, 1);
243 }
244}
245
246/**
Tejun Heoe5c19022011-03-23 10:37:00 +0100247 * task_clear_group_stop_pending - clear pending group stop
248 * @task: target task
249 *
250 * Clear group stop states for @task.
251 *
252 * CONTEXT:
253 * Must be called with @task->sighand->siglock held.
254 */
Tejun Heo39efa3e2011-03-23 10:37:00 +0100255void task_clear_group_stop_pending(struct task_struct *task)
Tejun Heoe5c19022011-03-23 10:37:00 +0100256{
Oleg Nesterovee77f072011-04-01 20:12:38 +0200257 task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
258 GROUP_STOP_DEQUEUED);
Tejun Heoe5c19022011-03-23 10:37:00 +0100259}
260
261/**
262 * task_participate_group_stop - participate in a group stop
263 * @task: task participating in a group stop
264 *
Tejun Heo39efa3e2011-03-23 10:37:00 +0100265 * @task has GROUP_STOP_PENDING set and is participating in a group stop.
266 * Group stop states are cleared and the group stop count is consumed if
267 * %GROUP_STOP_CONSUME was set. If the consumption completes the group
268 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c19022011-03-23 10:37:00 +0100269 *
270 * CONTEXT:
271 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100272 *
273 * RETURNS:
274 * %true if group stop completion should be notified to the parent, %false
275 * otherwise.
Tejun Heoe5c19022011-03-23 10:37:00 +0100276 */
277static bool task_participate_group_stop(struct task_struct *task)
278{
279 struct signal_struct *sig = task->signal;
280 bool consume = task->group_stop & GROUP_STOP_CONSUME;
281
Tejun Heo39efa3e2011-03-23 10:37:00 +0100282 WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
283
Tejun Heoe5c19022011-03-23 10:37:00 +0100284 task_clear_group_stop_pending(task);
285
286 if (!consume)
287 return false;
288
289 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
290 sig->group_stop_count--;
291
Tejun Heo244056f2011-03-23 10:37:01 +0100292 /*
293 * Tell the caller to notify completion iff we are entering into a
294 * fresh group stop. Read comment in do_signal_stop() for details.
295 */
296 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Tejun Heoe5c19022011-03-23 10:37:00 +0100297 sig->flags = SIGNAL_STOP_STOPPED;
298 return true;
299 }
300 return false;
301}
302
David Howellsc69e8d92008-11-14 10:39:19 +1100303/*
304 * allocate a new signal queue record
305 * - this may be called without locks if and only if t == current, otherwise an
David Howellsd84f4f92008-11-14 10:39:23 +1100306 * appopriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100307 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900308static struct sigqueue *
309__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
311 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800312 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800314 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000315 * Protect access to @t credentials. This can go away when all
316 * callers hold rcu read lock.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800317 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000318 rcu_read_lock();
David Howellsd84f4f92008-11-14 10:39:23 +1100319 user = get_uid(__task_cred(t)->user);
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800320 atomic_inc(&user->sigpending);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000321 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800324 atomic_read(&user->sigpending) <=
Jiri Slaby78d7d402010-03-05 13:42:54 -0800325 task_rlimit(t, RLIMIT_SIGPENDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900327 } else {
328 print_dropped_signal(sig);
329 }
330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800332 atomic_dec(&user->sigpending);
David Howellsd84f4f92008-11-14 10:39:23 +1100333 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 } else {
335 INIT_LIST_HEAD(&q->list);
336 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100337 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 }
David Howellsd84f4f92008-11-14 10:39:23 +1100339
340 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341}
342
Andrew Morton514a01b2006-02-03 03:04:41 -0800343static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
345 if (q->flags & SIGQUEUE_PREALLOC)
346 return;
347 atomic_dec(&q->user->sigpending);
348 free_uid(q->user);
349 kmem_cache_free(sigqueue_cachep, q);
350}
351
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800352void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
354 struct sigqueue *q;
355
356 sigemptyset(&queue->signal);
357 while (!list_empty(&queue->list)) {
358 q = list_entry(queue->list.next, struct sigqueue , list);
359 list_del_init(&q->list);
360 __sigqueue_free(q);
361 }
362}
363
364/*
365 * Flush all pending signals for a task.
366 */
David Howells3bcac022009-04-29 13:45:05 +0100367void __flush_signals(struct task_struct *t)
368{
369 clear_tsk_thread_flag(t, TIF_SIGPENDING);
370 flush_sigqueue(&t->pending);
371 flush_sigqueue(&t->signal->shared_pending);
372}
373
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800374void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
376 unsigned long flags;
377
378 spin_lock_irqsave(&t->sighand->siglock, flags);
David Howells3bcac022009-04-29 13:45:05 +0100379 __flush_signals(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 spin_unlock_irqrestore(&t->sighand->siglock, flags);
381}
382
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400383static void __flush_itimer_signals(struct sigpending *pending)
384{
385 sigset_t signal, retain;
386 struct sigqueue *q, *n;
387
388 signal = pending->signal;
389 sigemptyset(&retain);
390
391 list_for_each_entry_safe(q, n, &pending->list, list) {
392 int sig = q->info.si_signo;
393
394 if (likely(q->info.si_code != SI_TIMER)) {
395 sigaddset(&retain, sig);
396 } else {
397 sigdelset(&signal, sig);
398 list_del_init(&q->list);
399 __sigqueue_free(q);
400 }
401 }
402
403 sigorsets(&pending->signal, &signal, &retain);
404}
405
406void flush_itimer_signals(void)
407{
408 struct task_struct *tsk = current;
409 unsigned long flags;
410
411 spin_lock_irqsave(&tsk->sighand->siglock, flags);
412 __flush_itimer_signals(&tsk->pending);
413 __flush_itimer_signals(&tsk->signal->shared_pending);
414 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
415}
416
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700417void ignore_signals(struct task_struct *t)
418{
419 int i;
420
421 for (i = 0; i < _NSIG; ++i)
422 t->sighand->action[i].sa.sa_handler = SIG_IGN;
423
424 flush_signals(t);
425}
426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 * Flush all handlers for a task.
429 */
430
431void
432flush_signal_handlers(struct task_struct *t, int force_default)
433{
434 int i;
435 struct k_sigaction *ka = &t->sighand->action[0];
436 for (i = _NSIG ; i != 0 ; i--) {
437 if (force_default || ka->sa.sa_handler != SIG_IGN)
438 ka->sa.sa_handler = SIG_DFL;
439 ka->sa.sa_flags = 0;
440 sigemptyset(&ka->sa.sa_mask);
441 ka++;
442 }
443}
444
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200445int unhandled_signal(struct task_struct *tsk, int sig)
446{
Roland McGrath445a91d2008-07-25 19:45:52 -0700447 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700448 if (is_global_init(tsk))
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200449 return 1;
Roland McGrath445a91d2008-07-25 19:45:52 -0700450 if (handler != SIG_IGN && handler != SIG_DFL)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200451 return 0;
Oleg Nesterov43918f22009-04-02 16:58:00 -0700452 return !tracehook_consider_fatal_signal(tsk, sig);
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200453}
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456/* Notify the system that a driver wants to block all signals for this
457 * process, and wants to be notified if any signals at all were to be
458 * sent/acted upon. If the notifier routine returns non-zero, then the
459 * signal will be acted upon after all. If the notifier routine returns 0,
460 * then then signal will be blocked. Only one block per process is
461 * allowed. priv is a pointer to private data that the notifier routine
462 * can use to determine if the signal should be blocked or not. */
463
464void
465block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
466{
467 unsigned long flags;
468
469 spin_lock_irqsave(&current->sighand->siglock, flags);
470 current->notifier_mask = mask;
471 current->notifier_data = priv;
472 current->notifier = notifier;
473 spin_unlock_irqrestore(&current->sighand->siglock, flags);
474}
475
476/* Notify the system that blocking has ended. */
477
478void
479unblock_all_signals(void)
480{
481 unsigned long flags;
482
483 spin_lock_irqsave(&current->sighand->siglock, flags);
484 current->notifier = NULL;
485 current->notifier_data = NULL;
486 recalc_sigpending();
487 spin_unlock_irqrestore(&current->sighand->siglock, flags);
488}
489
Oleg Nesterov100360f2008-07-25 01:47:29 -0700490static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
492 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 /*
495 * Collect the siginfo appropriate to this signal. Check if
496 * there is another siginfo for the same signal.
497 */
498 list_for_each_entry(q, &list->list, list) {
499 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700500 if (first)
501 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 first = q;
503 }
504 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700505
506 sigdelset(&list->signal, sig);
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700509still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 list_del_init(&first->list);
511 copy_siginfo(info, &first->info);
512 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 /* Ok, it wasn't in the queue. This must be
515 a fast-pathed signal or we must have been
516 out of queue space. So zero out the info.
517 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 info->si_signo = sig;
519 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800520 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 info->si_pid = 0;
522 info->si_uid = 0;
523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524}
525
526static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
527 siginfo_t *info)
528{
Roland McGrath27d91e02006-09-29 02:00:31 -0700529 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 if (sig) {
532 if (current->notifier) {
533 if (sigismember(current->notifier_mask, sig)) {
534 if (!(current->notifier)(current->notifier_data)) {
535 clear_thread_flag(TIF_SIGPENDING);
536 return 0;
537 }
538 }
539 }
540
Oleg Nesterov100360f2008-07-25 01:47:29 -0700541 collect_signal(sig, pending, info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
544 return sig;
545}
546
547/*
548 * Dequeue a signal and return the element to the caller, which is
549 * expected to free it.
550 *
551 * All callers have to hold the siglock.
552 */
553int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
554{
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700555 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000556
557 /* We only dequeue private signals from ourselves, we don't let
558 * signalfd steal them
559 */
Davide Libenzib8fceee2007-09-20 12:40:16 -0700560 signr = __dequeue_signal(&tsk->pending, mask, info);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800561 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 signr = __dequeue_signal(&tsk->signal->shared_pending,
563 mask, info);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800564 /*
565 * itimer signal ?
566 *
567 * itimers are process shared and we restart periodic
568 * itimers in the signal delivery path to prevent DoS
569 * attacks in the high resolution timer case. This is
570 * compliant with the old way of self restarting
571 * itimers, as the SIGALRM is a legacy signal and only
572 * queued once. Changing the restart behaviour to
573 * restart the timer in the signal dequeue path is
574 * reducing the timer noise on heavy loaded !highres
575 * systems too.
576 */
577 if (unlikely(signr == SIGALRM)) {
578 struct hrtimer *tmr = &tsk->signal->real_timer;
579
580 if (!hrtimer_is_queued(tmr) &&
581 tsk->signal->it_real_incr.tv64 != 0) {
582 hrtimer_forward(tmr, tmr->base->get_time(),
583 tsk->signal->it_real_incr);
584 hrtimer_restart(tmr);
585 }
586 }
587 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700588
Davide Libenzib8fceee2007-09-20 12:40:16 -0700589 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700590 if (!signr)
591 return 0;
592
593 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800594 /*
595 * Set a marker that we have dequeued a stop signal. Our
596 * caller might release the siglock and then the pending
597 * stop signal it is about to process is no longer in the
598 * pending bitmasks, but must still be cleared by a SIGCONT
599 * (and overruled by a SIGKILL). So those cases clear this
600 * shared flag after we've set it. Note that this flag may
601 * remain set after the signal we return is ignored or
602 * handled. That doesn't matter because its only purpose
603 * is to alert stop-signal processing code when another
604 * processor has come along and cleared the flag.
605 */
Oleg Nesterovee77f072011-04-01 20:12:38 +0200606 current->group_stop |= GROUP_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800607 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700608 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 /*
610 * Release the siglock to ensure proper locking order
611 * of timer locks outside of siglocks. Note, we leave
612 * irqs disabled here, since the posix-timers code is
613 * about to disable them again anyway.
614 */
615 spin_unlock(&tsk->sighand->siglock);
616 do_schedule_next_timer(info);
617 spin_lock(&tsk->sighand->siglock);
618 }
619 return signr;
620}
621
622/*
623 * Tell a process that it has a new active signal..
624 *
625 * NOTE! we rely on the previous spin_lock to
626 * lock interrupts for us! We can only be called with
627 * "siglock" held, and the local interrupt must
628 * have been disabled when that got acquired!
629 *
630 * No need to set need_resched since signal event passing
631 * goes through ->blocked
632 */
633void signal_wake_up(struct task_struct *t, int resume)
634{
635 unsigned int mask;
636
637 set_tsk_thread_flag(t, TIF_SIGPENDING);
638
639 /*
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500640 * For SIGKILL, we want to wake it up in the stopped/traced/killable
641 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 * executing another processor and just now entering stopped state.
643 * By using wake_up_state, we ensure the process will wake up and
644 * handle its death signal.
645 */
646 mask = TASK_INTERRUPTIBLE;
647 if (resume)
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500648 mask |= TASK_WAKEKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if (!wake_up_state(t, mask))
650 kick_process(t);
651}
652
653/*
654 * Remove signals in mask from the pending set and queue.
655 * Returns 1 if any signals were found.
656 *
657 * All callers must be holding the siglock.
George Anzinger71fabd52006-01-08 01:02:48 -0800658 *
659 * This version takes a sigset mask and looks at all signals,
660 * not just those in the first mask word.
661 */
662static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
663{
664 struct sigqueue *q, *n;
665 sigset_t m;
666
667 sigandsets(&m, mask, &s->signal);
668 if (sigisemptyset(&m))
669 return 0;
670
671 signandsets(&s->signal, &s->signal, mask);
672 list_for_each_entry_safe(q, n, &s->list, list) {
673 if (sigismember(mask, q->info.si_signo)) {
674 list_del_init(&q->list);
675 __sigqueue_free(q);
676 }
677 }
678 return 1;
679}
680/*
681 * Remove signals in mask from the pending set and queue.
682 * Returns 1 if any signals were found.
683 *
684 * All callers must be holding the siglock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 */
686static int rm_from_queue(unsigned long mask, struct sigpending *s)
687{
688 struct sigqueue *q, *n;
689
690 if (!sigtestsetmask(&s->signal, mask))
691 return 0;
692
693 sigdelsetmask(&s->signal, mask);
694 list_for_each_entry_safe(q, n, &s->list, list) {
695 if (q->info.si_signo < SIGRTMIN &&
696 (mask & sigmask(q->info.si_signo))) {
697 list_del_init(&q->list);
698 __sigqueue_free(q);
699 }
700 }
701 return 1;
702}
703
Oleg Nesterov614c5172009-12-15 16:47:22 -0800704static inline int is_si_special(const struct siginfo *info)
705{
706 return info <= SEND_SIG_FORCED;
707}
708
709static inline bool si_fromuser(const struct siginfo *info)
710{
711 return info == SEND_SIG_NOINFO ||
712 (!is_si_special(info) && SI_FROMUSER(info));
713}
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715/*
716 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100717 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 */
719static int check_kill_permission(int sig, struct siginfo *info,
720 struct task_struct *t)
721{
Oleg Nesterov065add32010-05-26 14:42:54 -0700722 const struct cred *cred, *tcred;
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700723 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700724 int error;
725
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700726 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700727 return -EINVAL;
728
Oleg Nesterov614c5172009-12-15 16:47:22 -0800729 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700730 return 0;
731
732 error = audit_signal_info(sig, t); /* Let audit system see the signal */
733 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400735
Oleg Nesterov065add32010-05-26 14:42:54 -0700736 cred = current_cred();
David Howellsc69e8d92008-11-14 10:39:19 +1100737 tcred = __task_cred(t);
Oleg Nesterov065add32010-05-26 14:42:54 -0700738 if (!same_thread_group(current, t) &&
739 (cred->euid ^ tcred->suid) &&
David Howellsc69e8d92008-11-14 10:39:19 +1100740 (cred->euid ^ tcred->uid) &&
741 (cred->uid ^ tcred->suid) &&
742 (cred->uid ^ tcred->uid) &&
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700743 !capable(CAP_KILL)) {
744 switch (sig) {
745 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700746 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700747 /*
748 * We don't return the error if sid == NULL. The
749 * task was unhashed, the caller must notice this.
750 */
751 if (!sid || sid == task_session(current))
752 break;
753 default:
754 return -EPERM;
755 }
756 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100757
Amy Griffise54dc242007-03-29 18:01:04 -0400758 return security_task_kill(t, info, sig, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759}
760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700762 * Handle magic process-wide effects of stop/continue signals. Unlike
763 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * time regardless of blocking, ignoring, or handling. This does the
765 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700766 * signals. The process stop is done as a signal action for SIG_DFL.
767 *
768 * Returns true if the signal should be actually delivered, otherwise
769 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 */
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -0700771static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
Oleg Nesterovad16a462008-04-30 00:52:46 -0700773 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 struct task_struct *t;
775
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700776 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700778 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700780 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 /*
782 * This is a stop signal. Remove SIGCONT from all queues.
783 */
Oleg Nesterovad16a462008-04-30 00:52:46 -0700784 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 t = p;
786 do {
787 rm_from_queue(sigmask(SIGCONT), &t->pending);
Oleg Nesterovad16a462008-04-30 00:52:46 -0700788 } while_each_thread(p, t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700790 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200792 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 */
Oleg Nesterovad16a462008-04-30 00:52:46 -0700794 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 t = p;
796 do {
Tejun Heo39efa3e2011-03-23 10:37:00 +0100797 task_clear_group_stop_pending(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
Oleg Nesterov1deac632011-04-01 20:11:50 +0200799 wake_up_state(t, __TASK_STOPPED);
Oleg Nesterovad16a462008-04-30 00:52:46 -0700800 } while_each_thread(p, t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700802 /*
803 * Notify the parent with CLD_CONTINUED if we were stopped.
804 *
805 * If we were in the middle of a group stop, we pretend it
806 * was already finished, and then continued. Since SIGCHLD
807 * doesn't queue we report only CLD_STOPPED, as if the next
808 * CLD_CONTINUED was dropped.
809 */
810 why = 0;
Oleg Nesterovad16a462008-04-30 00:52:46 -0700811 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700812 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a462008-04-30 00:52:46 -0700813 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700814 why |= SIGNAL_CLD_STOPPED;
815
816 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700817 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700818 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700819 * will take ->siglock, notice SIGNAL_CLD_MASK, and
820 * notify its parent. See get_signal_to_deliver().
821 */
Oleg Nesterovad16a462008-04-30 00:52:46 -0700822 signal->flags = why | SIGNAL_STOP_CONTINUED;
823 signal->group_stop_count = 0;
824 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700827
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -0700828 return !sig_ignored(p, sig, from_ancestor_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829}
830
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700831/*
832 * Test if P wants to take SIG. After we've checked all threads with this,
833 * it's equivalent to finding no threads not blocking SIG. Any threads not
834 * blocking SIG were ruled out because they are not running and already
835 * have pending signals. Such threads will dequeue from the shared queue
836 * as soon as they're available, so putting the signal on the shared queue
837 * will be equivalent to sending it to one such thread.
838 */
839static inline int wants_signal(int sig, struct task_struct *p)
840{
841 if (sigismember(&p->blocked, sig))
842 return 0;
843 if (p->flags & PF_EXITING)
844 return 0;
845 if (sig == SIGKILL)
846 return 1;
847 if (task_is_stopped_or_traced(p))
848 return 0;
849 return task_curr(p) || !signal_pending(p);
850}
851
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700852static void complete_signal(int sig, struct task_struct *p, int group)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700853{
854 struct signal_struct *signal = p->signal;
855 struct task_struct *t;
856
857 /*
858 * Now find a thread we can wake up to take the signal off the queue.
859 *
860 * If the main thread wants the signal, it gets first crack.
861 * Probably the least surprising to the average bear.
862 */
863 if (wants_signal(sig, p))
864 t = p;
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700865 else if (!group || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700866 /*
867 * There is just one thread and it does not need to be woken.
868 * It will dequeue unblocked signals before it runs again.
869 */
870 return;
871 else {
872 /*
873 * Otherwise try to find a suitable thread.
874 */
875 t = signal->curr_target;
876 while (!wants_signal(sig, t)) {
877 t = next_thread(t);
878 if (t == signal->curr_target)
879 /*
880 * No thread needs to be woken.
881 * Any eligible threads will see
882 * the signal in the queue soon.
883 */
884 return;
885 }
886 signal->curr_target = t;
887 }
888
889 /*
890 * Found a killable thread. If the signal will be fatal,
891 * then start taking the whole group down immediately.
892 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700893 if (sig_fatal(p, sig) &&
894 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700895 !sigismember(&t->real_blocked, sig) &&
Roland McGrath445a91d2008-07-25 19:45:52 -0700896 (sig == SIGKILL ||
Oleg Nesterov43918f22009-04-02 16:58:00 -0700897 !tracehook_consider_fatal_signal(t, sig))) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700898 /*
899 * This signal will be fatal to the whole group.
900 */
901 if (!sig_kernel_coredump(sig)) {
902 /*
903 * Start a group exit and wake everybody up.
904 * This way we don't have other threads
905 * running and doing things after a slower
906 * thread has the fatal signal pending.
907 */
908 signal->flags = SIGNAL_GROUP_EXIT;
909 signal->group_exit_code = sig;
910 signal->group_stop_count = 0;
911 t = p;
912 do {
Tejun Heo39efa3e2011-03-23 10:37:00 +0100913 task_clear_group_stop_pending(t);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700914 sigaddset(&t->pending.signal, SIGKILL);
915 signal_wake_up(t, 1);
916 } while_each_thread(p, t);
917 return;
918 }
919 }
920
921 /*
922 * The signal is already in the shared-pending queue.
923 * Tell the chosen thread to wake up and dequeue it.
924 */
925 signal_wake_up(t, sig == SIGKILL);
926 return;
927}
928
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -0700929static inline int legacy_queue(struct sigpending *signals, int sig)
930{
931 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
932}
933
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -0700934static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
935 int group, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
Oleg Nesterov2ca35152008-04-30 00:52:54 -0700937 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -0700938 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +0200939 int override_rlimit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
Masami Hiramatsud1eb6502009-11-24 16:56:45 -0500941 trace_signal_generate(sig, info, t);
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -0400942
Oleg Nesterov6e65acb2008-04-30 00:52:50 -0700943 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -0700944
945 if (!prepare_signal(sig, t, from_ancestor_ns))
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700946 return 0;
Oleg Nesterov2ca35152008-04-30 00:52:54 -0700947
948 pending = group ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -0700950 * Short-circuit ignored signals and support queuing
951 * exactly one non-rt signal, so that we can get more
952 * detailed information about the cause of the signal.
953 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700954 if (legacy_queue(pending, sig))
Pavel Emelyanov2acb0242008-04-30 00:52:35 -0700955 return 0;
Davide Libenzifba2afa2007-05-10 22:23:13 -0700956 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 * fast-pathed signals for kernel-internal things like SIGSTOP
958 * or SIGKILL.
959 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800960 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 goto out_set;
962
963 /* Real-time signals must be queued if sent by sigqueue, or
964 some other real-time mechanism. It is implementation
965 defined whether kill() does so. We attempt to do so, on
966 the principle of least surprise, but since kill is not
967 allowed to fail with EAGAIN when low on memory we just
968 make sure at least one signal gets delivered and don't
969 pass on the info struct. */
970
Vegard Nossum7a0aeb12009-05-16 11:28:33 +0200971 if (sig < SIGRTMIN)
972 override_rlimit = (is_si_special(info) || info->si_code >= 0);
973 else
974 override_rlimit = 0;
975
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900976 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
Vegard Nossum7a0aeb12009-05-16 11:28:33 +0200977 override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -0700979 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800981 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 q->info.si_signo = sig;
983 q->info.si_errno = 0;
984 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -0800985 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -0800986 task_active_pid_ns(t));
David Howells76aac0e2008-11-14 10:39:12 +1100987 q->info.si_uid = current_uid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800989 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 q->info.si_signo = sig;
991 q->info.si_errno = 0;
992 q->info.si_code = SI_KERNEL;
993 q->info.si_pid = 0;
994 q->info.si_uid = 0;
995 break;
996 default:
997 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -0700998 if (from_ancestor_ns)
999 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 break;
1001 }
Oleg Nesterov621d3122005-10-30 15:03:45 -08001002 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001003 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1004 /*
1005 * Queue overflow, abort. We may abort if the
1006 * signal was rt and sent by user using something
1007 * other than kill().
1008 */
1009 trace_signal_overflow_fail(sig, group, info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 return -EAGAIN;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001011 } else {
1012 /*
1013 * This is a silent loss of information. We still
1014 * send the signal, but the *info bits are lost.
1015 */
1016 trace_signal_lose_info(sig, group, info);
1017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 }
1019
1020out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001021 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001022 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001023 complete_signal(sig, t, group);
1024 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025}
1026
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001027static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1028 int group)
1029{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001030 int from_ancestor_ns = 0;
1031
1032#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001033 from_ancestor_ns = si_fromuser(info) &&
1034 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001035#endif
1036
1037 return __send_signal(sig, info, t, group, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001038}
1039
Ingo Molnar45807a12007-07-15 23:40:10 -07001040static void print_fatal_signal(struct pt_regs *regs, int signr)
1041{
1042 printk("%s/%d: potentially unexpected fatal signal %d.\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001043 current->comm, task_pid_nr(current), signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001044
Al Viroca5cd872007-10-29 04:31:16 +00001045#if defined(__i386__) && !defined(__arch_um__)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +01001046 printk("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001047 {
1048 int i;
1049 for (i = 0; i < 16; i++) {
1050 unsigned char insn;
1051
Andi Kleenb45c6e72010-01-08 14:42:52 -08001052 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1053 break;
Ingo Molnar45807a12007-07-15 23:40:10 -07001054 printk("%02x ", insn);
1055 }
1056 }
1057#endif
1058 printk("\n");
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001059 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001060 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001061 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001062}
1063
1064static int __init setup_print_fatal_signals(char *str)
1065{
1066 get_option (&str, &print_fatal_signals);
1067
1068 return 1;
1069}
1070
1071__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001073int
1074__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1075{
1076 return send_signal(sig, info, p, 1);
1077}
1078
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079static int
1080specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1081{
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001082 return send_signal(sig, info, t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083}
1084
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001085int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1086 bool group)
1087{
1088 unsigned long flags;
1089 int ret = -ESRCH;
1090
1091 if (lock_task_sighand(p, &flags)) {
1092 ret = send_signal(sig, info, p, group);
1093 unlock_task_sighand(p, &flags);
1094 }
1095
1096 return ret;
1097}
1098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099/*
1100 * Force a signal that the process can't ignore: if necessary
1101 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001102 *
1103 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1104 * since we do not want to have a signal handler that was blocked
1105 * be invoked when user space had explicitly blocked it.
1106 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001107 * We don't want to have recursive SIGSEGV's etc, for example,
1108 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110int
1111force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1112{
1113 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001114 int ret, blocked, ignored;
1115 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
1117 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001118 action = &t->sighand->action[sig-1];
1119 ignored = action->sa.sa_handler == SIG_IGN;
1120 blocked = sigismember(&t->blocked, sig);
1121 if (blocked || ignored) {
1122 action->sa.sa_handler = SIG_DFL;
1123 if (blocked) {
1124 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001125 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 }
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001128 if (action->sa.sa_handler == SIG_DFL)
1129 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 ret = specific_send_sig_info(sig, info, t);
1131 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1132
1133 return ret;
1134}
1135
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136/*
1137 * Nuke all other threads in the group.
1138 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001139int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001141 struct task_struct *t = p;
1142 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 p->signal->group_stop_count = 0;
1145
Oleg Nesterov09faef12010-05-26 14:43:11 -07001146 while_each_thread(p, t) {
Tejun Heo39efa3e2011-03-23 10:37:00 +01001147 task_clear_group_stop_pending(t);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001148 count++;
1149
1150 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 if (t->exit_state)
1152 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 signal_wake_up(t, 1);
1155 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001156
1157 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158}
1159
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001160struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1161 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001162{
1163 struct sighand_struct *sighand;
1164
Oleg Nesterov1406f2d2008-04-30 00:52:37 -07001165 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001166 for (;;) {
1167 sighand = rcu_dereference(tsk->sighand);
1168 if (unlikely(sighand == NULL))
1169 break;
1170
1171 spin_lock_irqsave(&sighand->siglock, *flags);
1172 if (likely(sighand == tsk->sighand))
1173 break;
1174 spin_unlock_irqrestore(&sighand->siglock, *flags);
1175 }
Oleg Nesterov1406f2d2008-04-30 00:52:37 -07001176 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001177
1178 return sighand;
1179}
1180
David Howellsc69e8d92008-11-14 10:39:19 +11001181/*
1182 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001183 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1185{
David Howells694f6902010-08-04 16:59:14 +01001186 int ret;
1187
1188 rcu_read_lock();
1189 ret = check_kill_permission(sig, info, p);
1190 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001192 if (!ret && sig)
1193 ret = do_send_sig_info(sig, info, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
1195 return ret;
1196}
1197
1198/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001199 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001201 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001203int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204{
1205 struct task_struct *p = NULL;
1206 int retval, success;
1207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 success = 0;
1209 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001210 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 int err = group_send_sig_info(sig, info, p);
1212 success |= !err;
1213 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001214 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 return success ? 0 : retval;
1216}
1217
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001218int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001220 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 struct task_struct *p;
1222
Ingo Molnare56d0902006-01-08 01:01:37 -08001223 rcu_read_lock();
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001224retry:
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001225 p = pid_task(pid, PIDTYPE_PID);
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001226 if (p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 error = group_send_sig_info(sig, info, p);
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001228 if (unlikely(error == -ESRCH))
1229 /*
1230 * The task was unhashed in between, try again.
1231 * If it is dead, pid_task() will return NULL,
1232 * if we race with de_thread() it will find the
1233 * new leader.
1234 */
1235 goto retry;
1236 }
Ingo Molnare56d0902006-01-08 01:01:37 -08001237 rcu_read_unlock();
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001238
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 return error;
1240}
1241
Matthew Wilcoxc3de4b32007-02-09 08:11:47 -07001242int
1243kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001244{
1245 int error;
1246 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001247 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001248 rcu_read_unlock();
1249 return error;
1250}
1251
Eric W. Biederman2425c082006-10-02 02:17:28 -07001252/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1253int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
David Quigley8f95dc52006-06-30 01:55:47 -07001254 uid_t uid, uid_t euid, u32 secid)
Harald Welte46113832005-10-10 19:44:29 +02001255{
1256 int ret = -EINVAL;
1257 struct task_struct *p;
David Howellsc69e8d92008-11-14 10:39:19 +11001258 const struct cred *pcred;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001259 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001260
1261 if (!valid_signal(sig))
1262 return ret;
1263
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001264 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001265 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001266 if (!p) {
1267 ret = -ESRCH;
1268 goto out_unlock;
1269 }
David Howellsc69e8d92008-11-14 10:39:19 +11001270 pcred = __task_cred(p);
Oleg Nesterov614c5172009-12-15 16:47:22 -08001271 if (si_fromuser(info) &&
David Howellsc69e8d92008-11-14 10:39:19 +11001272 euid != pcred->suid && euid != pcred->uid &&
1273 uid != pcred->suid && uid != pcred->uid) {
Harald Welte46113832005-10-10 19:44:29 +02001274 ret = -EPERM;
1275 goto out_unlock;
1276 }
David Quigley8f95dc52006-06-30 01:55:47 -07001277 ret = security_task_kill(p, info, sig, secid);
1278 if (ret)
1279 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001280
1281 if (sig) {
1282 if (lock_task_sighand(p, &flags)) {
1283 ret = __send_signal(sig, info, p, 1, 0);
1284 unlock_task_sighand(p, &flags);
1285 } else
1286 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001287 }
1288out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001289 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001290 return ret;
1291}
Eric W. Biederman2425c082006-10-02 02:17:28 -07001292EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
1294/*
1295 * kill_something_info() interprets pid in interesting ways just like kill(2).
1296 *
1297 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1298 * is probably wrong. Should make it like BSD or SYSV.
1299 */
1300
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001301static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001303 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001304
1305 if (pid > 0) {
1306 rcu_read_lock();
1307 ret = kill_pid_info(sig, info, find_vpid(pid));
1308 rcu_read_unlock();
1309 return ret;
1310 }
1311
1312 read_lock(&tasklist_lock);
1313 if (pid != -1) {
1314 ret = __kill_pgrp_info(sig, info,
1315 pid ? find_vpid(-pid) : task_pgrp(current));
1316 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 int retval = 0, count = 0;
1318 struct task_struct * p;
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001321 if (task_pid_vnr(p) > 1 &&
1322 !same_thread_group(p, current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 int err = group_send_sig_info(sig, info, p);
1324 ++count;
1325 if (err != -EPERM)
1326 retval = err;
1327 }
1328 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001329 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001331 read_unlock(&tasklist_lock);
1332
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001333 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334}
1335
1336/*
1337 * These are for backward compatibility with the rest of the kernel source.
1338 */
1339
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340int
1341send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1342{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 /*
1344 * Make sure legacy kernel users don't send in bad values
1345 * (normal paths check this in check_kill_permission).
1346 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001347 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 return -EINVAL;
1349
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001350 return do_send_sig_info(sig, info, p, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351}
1352
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001353#define __si_special(priv) \
1354 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1355
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356int
1357send_sig(int sig, struct task_struct *p, int priv)
1358{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001359 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360}
1361
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362void
1363force_sig(int sig, struct task_struct *p)
1364{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001365 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366}
1367
1368/*
1369 * When things go south during signal handling, we
1370 * will force a SIGSEGV. And if the signal that caused
1371 * the problem was already a SIGSEGV, we'll want to
1372 * make sure we don't even try to deliver the signal..
1373 */
1374int
1375force_sigsegv(int sig, struct task_struct *p)
1376{
1377 if (sig == SIGSEGV) {
1378 unsigned long flags;
1379 spin_lock_irqsave(&p->sighand->siglock, flags);
1380 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1381 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1382 }
1383 force_sig(SIGSEGV, p);
1384 return 0;
1385}
1386
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001387int kill_pgrp(struct pid *pid, int sig, int priv)
1388{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001389 int ret;
1390
1391 read_lock(&tasklist_lock);
1392 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1393 read_unlock(&tasklist_lock);
1394
1395 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001396}
1397EXPORT_SYMBOL(kill_pgrp);
1398
1399int kill_pid(struct pid *pid, int sig, int priv)
1400{
1401 return kill_pid_info(sig, __si_special(priv), pid);
1402}
1403EXPORT_SYMBOL(kill_pid);
1404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405/*
1406 * These functions support sending signals using preallocated sigqueue
1407 * structures. This is needed "because realtime applications cannot
1408 * afford to lose notifications of asynchronous events, like timer
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001409 * expirations or I/O completions". In the case of Posix Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 * we allocate the sigqueue structure from the timer_create. If this
1411 * allocation fails we are able to report the failure to the application
1412 * with an EAGAIN error.
1413 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414struct sigqueue *sigqueue_alloc(void)
1415{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001416 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001418 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001420
1421 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422}
1423
1424void sigqueue_free(struct sigqueue *q)
1425{
1426 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001427 spinlock_t *lock = &current->sighand->siglock;
1428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1430 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001431 * We must hold ->siglock while testing q->list
1432 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001433 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001435 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001436 q->flags &= ~SIGQUEUE_PREALLOC;
1437 /*
1438 * If it is queued it will be freed when dequeued,
1439 * like the "regular" sigqueue.
1440 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001441 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001442 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001443 spin_unlock_irqrestore(lock, flags);
1444
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001445 if (q)
1446 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447}
1448
Oleg Nesterovac5c2152008-04-30 00:52:57 -07001449int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001450{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001451 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001452 struct sigpending *pending;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001453 unsigned long flags;
1454 int ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001455
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001456 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001457
1458 ret = -1;
1459 if (!likely(lock_task_sighand(t, &flags)))
1460 goto ret;
1461
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001462 ret = 1; /* the signal is ignored */
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001463 if (!prepare_signal(sig, t, 0))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001464 goto out;
1465
1466 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001467 if (unlikely(!list_empty(&q->list))) {
1468 /*
1469 * If an SI_TIMER entry is already queue just increment
1470 * the overrun count.
1471 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001472 BUG_ON(q->info.si_code != SI_TIMER);
1473 q->info.si_overrun++;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001474 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001475 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001476 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001477
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001478 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001479 pending = group ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001480 list_add_tail(&q->list, &pending->list);
1481 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001482 complete_signal(sig, t, group);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001483out:
1484 unlock_task_sighand(t, &flags);
1485ret:
1486 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001487}
1488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 * Let a parent know about the death of a child.
1491 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001492 *
1493 * Returns -1 if our parent ignored us and so we've switched to
1494 * self-reaping, or else @sig.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 */
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001496int do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497{
1498 struct siginfo info;
1499 unsigned long flags;
1500 struct sighand_struct *psig;
Roland McGrath1b046242008-08-19 20:37:07 -07001501 int ret = sig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
1503 BUG_ON(sig == -1);
1504
1505 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001506 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Oleg Nesterov5cb11442009-06-17 16:27:30 -07001508 BUG_ON(!task_ptrace(tsk) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1510
1511 info.si_signo = sig;
1512 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001513 /*
1514 * we are under tasklist_lock here so our parent is tied to
1515 * us and cannot exit and release its namespace.
1516 *
1517 * the only it can is to switch its nsproxy with sys_unshare,
1518 * bu uncharing pid namespaces is not allowed, so we'll always
1519 * see relevant namespace
1520 *
1521 * write_lock() currently calls preempt_disable() which is the
1522 * same as rcu_read_lock(), but according to Oleg, this is not
1523 * correct to rely on this
1524 */
1525 rcu_read_lock();
1526 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
David Howellsc69e8d92008-11-14 10:39:19 +11001527 info.si_uid = __task_cred(tsk)->uid;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001528 rcu_read_unlock();
1529
Peter Zijlstra32bd6712009-02-05 12:24:15 +01001530 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1531 tsk->signal->utime));
1532 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1533 tsk->signal->stime));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535 info.si_status = tsk->exit_code & 0x7f;
1536 if (tsk->exit_code & 0x80)
1537 info.si_code = CLD_DUMPED;
1538 else if (tsk->exit_code & 0x7f)
1539 info.si_code = CLD_KILLED;
1540 else {
1541 info.si_code = CLD_EXITED;
1542 info.si_status = tsk->exit_code >> 8;
1543 }
1544
1545 psig = tsk->parent->sighand;
1546 spin_lock_irqsave(&psig->siglock, flags);
Oleg Nesterov5cb11442009-06-17 16:27:30 -07001547 if (!task_ptrace(tsk) && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1549 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1550 /*
1551 * We are exiting and our parent doesn't care. POSIX.1
1552 * defines special semantics for setting SIGCHLD to SIG_IGN
1553 * or setting the SA_NOCLDWAIT flag: we should be reaped
1554 * automatically and not left for our parent's wait4 call.
1555 * Rather than having the parent do it as a magic kind of
1556 * signal handler, we just set this to tell do_exit that we
1557 * can be cleaned up without becoming a zombie. Note that
1558 * we still call __wake_up_parent in this case, because a
1559 * blocked sys_wait4 might now return -ECHILD.
1560 *
1561 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1562 * is implementation-defined: we do (if you don't want
1563 * it, just use SIG_IGN instead).
1564 */
Roland McGrath1b046242008-08-19 20:37:07 -07001565 ret = tsk->exit_signal = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001567 sig = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 }
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001569 if (valid_signal(sig) && sig > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 __group_send_sig_info(sig, &info, tsk->parent);
1571 __wake_up_parent(tsk, tsk->parent);
1572 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001573
Roland McGrath1b046242008-08-19 20:37:07 -07001574 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576
Tejun Heo75b95952011-03-23 10:37:01 +01001577/**
1578 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1579 * @tsk: task reporting the state change
1580 * @for_ptracer: the notification is for ptracer
1581 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1582 *
1583 * Notify @tsk's parent that the stopped/continued state has changed. If
1584 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1585 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1586 *
1587 * CONTEXT:
1588 * Must be called with tasklist_lock at least read locked.
1589 */
1590static void do_notify_parent_cldstop(struct task_struct *tsk,
1591 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592{
1593 struct siginfo info;
1594 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001595 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 struct sighand_struct *sighand;
1597
Tejun Heo75b95952011-03-23 10:37:01 +01001598 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001599 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01001600 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001601 tsk = tsk->group_leader;
1602 parent = tsk->real_parent;
1603 }
1604
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 info.si_signo = SIGCHLD;
1606 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001607 /*
1608 * see comment in do_notify_parent() abot the following 3 lines
1609 */
1610 rcu_read_lock();
Oleg Nesterovd9265662009-06-17 16:27:35 -07001611 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
David Howellsc69e8d92008-11-14 10:39:19 +11001612 info.si_uid = __task_cred(tsk)->uid;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001613 rcu_read_unlock();
1614
Michael Kerriskd8878ba2008-07-25 01:47:32 -07001615 info.si_utime = cputime_to_clock_t(tsk->utime);
1616 info.si_stime = cputime_to_clock_t(tsk->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
1618 info.si_code = why;
1619 switch (why) {
1620 case CLD_CONTINUED:
1621 info.si_status = SIGCONT;
1622 break;
1623 case CLD_STOPPED:
1624 info.si_status = tsk->signal->group_exit_code & 0x7f;
1625 break;
1626 case CLD_TRAPPED:
1627 info.si_status = tsk->exit_code & 0x7f;
1628 break;
1629 default:
1630 BUG();
1631 }
1632
1633 sighand = parent->sighand;
1634 spin_lock_irqsave(&sighand->siglock, flags);
1635 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1636 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1637 __group_send_sig_info(SIGCHLD, &info, parent);
1638 /*
1639 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1640 */
1641 __wake_up_parent(tsk, parent);
1642 spin_unlock_irqrestore(&sighand->siglock, flags);
1643}
1644
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001645static inline int may_ptrace_stop(void)
1646{
Oleg Nesterov5cb11442009-06-17 16:27:30 -07001647 if (!likely(task_ptrace(current)))
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001648 return 0;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001649 /*
1650 * Are we in the middle of do_coredump?
1651 * If so and our tracer is also part of the coredump stopping
1652 * is a deadlock situation, and pointless because our tracer
1653 * is dead so don't allow us to stop.
1654 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001655 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001656 * is safe to enter schedule().
1657 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001658 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001659 unlikely(current->mm == current->parent->mm))
1660 return 0;
1661
1662 return 1;
1663}
1664
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665/*
Roland McGrath1a669c22008-02-06 01:37:37 -08001666 * Return nonzero if there is a SIGKILL that should be waking us up.
1667 * Called with the siglock held.
1668 */
1669static int sigkill_pending(struct task_struct *tsk)
1670{
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001671 return sigismember(&tsk->pending.signal, SIGKILL) ||
1672 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08001673}
1674
1675/*
Tejun Heoceb6bd62011-03-23 10:37:01 +01001676 * Test whether the target task of the usual cldstop notification - the
1677 * real_parent of @child - is in the same group as the ptracer.
1678 */
1679static bool real_parent_is_ptracer(struct task_struct *child)
1680{
1681 return same_thread_group(child->parent, child->real_parent);
1682}
1683
1684/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 * This must be called with current->sighand->siglock held.
1686 *
1687 * This should be the path for all ptrace stops.
1688 * We always set current->last_siginfo while stopped here.
1689 * That makes it a way to test a stopped process for
1690 * being ptrace-stopped vs being job-control-stopped.
1691 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001692 * If we actually decide not to stop at all because the tracer
1693 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001695static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001696 __releases(&current->sighand->siglock)
1697 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698{
Tejun Heoceb6bd62011-03-23 10:37:01 +01001699 bool gstop_done = false;
1700
Roland McGrath1a669c22008-02-06 01:37:37 -08001701 if (arch_ptrace_stop_needed(exit_code, info)) {
1702 /*
1703 * The arch code has something special to do before a
1704 * ptrace stop. This is allowed to block, e.g. for faults
1705 * on user stack pages. We can't keep the siglock while
1706 * calling arch_ptrace_stop, so we must release it now.
1707 * To preserve proper semantics, we must do this before
1708 * any signal bookkeeping like checking group_stop_count.
1709 * Meanwhile, a SIGKILL could come in before we retake the
1710 * siglock. That must prevent us from sleeping in TASK_TRACED.
1711 * So after regaining the lock, we must check for SIGKILL.
1712 */
1713 spin_unlock_irq(&current->sighand->siglock);
1714 arch_ptrace_stop(exit_code, info);
1715 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001716 if (sigkill_pending(current))
1717 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08001718 }
1719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 /*
Tejun Heo0ae8ce12011-03-23 10:37:00 +01001721 * If @why is CLD_STOPPED, we're trapping to participate in a group
1722 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1723 * while siglock was released for the arch hook, PENDING could be
1724 * clear now. We act as if SIGCONT is received after TASK_TRACED
1725 * is entered - ignore it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 */
Tejun Heo0ae8ce12011-03-23 10:37:00 +01001727 if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
Tejun Heoceb6bd62011-03-23 10:37:01 +01001728 gstop_done = task_participate_group_stop(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
1730 current->last_siginfo = info;
1731 current->exit_code = exit_code;
1732
Tejun Heod79fdd62011-03-23 10:37:00 +01001733 /*
1734 * TRACED should be visible before TRAPPING is cleared; otherwise,
1735 * the tracer might fail do_wait().
1736 */
1737 set_current_state(TASK_TRACED);
1738
1739 /*
1740 * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
1741 * transition to TASK_TRACED should be atomic with respect to
1742 * siglock. This hsould be done after the arch hook as siglock is
1743 * released and regrabbed across it.
1744 */
1745 task_clear_group_stop_trapping(current);
1746
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 spin_unlock_irq(&current->sighand->siglock);
1748 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001749 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01001750 /*
1751 * Notify parents of the stop.
1752 *
1753 * While ptraced, there are two parents - the ptracer and
1754 * the real_parent of the group_leader. The ptracer should
1755 * know about every stop while the real parent is only
1756 * interested in the completion of group stop. The states
1757 * for the two don't interact with each other. Notify
1758 * separately unless they're gonna be duplicates.
1759 */
1760 do_notify_parent_cldstop(current, true, why);
1761 if (gstop_done && !real_parent_is_ptracer(current))
1762 do_notify_parent_cldstop(current, false, why);
1763
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001764 /*
1765 * Don't want to allow preemption here, because
1766 * sys_ptrace() needs this task to be inactive.
1767 *
1768 * XXX: implement read_unlock_no_resched().
1769 */
1770 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001772 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 schedule();
1774 } else {
1775 /*
1776 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001777 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01001778 *
1779 * If @gstop_done, the ptracer went away between group stop
1780 * completion and here. During detach, it would have set
1781 * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
1782 * in do_signal_stop() on return, so notifying the real
1783 * parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01001785 if (gstop_done)
1786 do_notify_parent_cldstop(current, false, why);
1787
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001788 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08001789 if (clear_code)
1790 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001791 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 }
1793
1794 /*
Roland McGrath13b1c3d2008-03-03 20:22:05 -08001795 * While in TASK_TRACED, we were considered "frozen enough".
1796 * Now that we woke up, it's crucial if we're supposed to be
1797 * frozen that we freeze now before running anything substantial.
1798 */
1799 try_to_freeze();
1800
1801 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 * We are back. Now reacquire the siglock before touching
1803 * last_siginfo, so that we are sure to have synchronized with
1804 * any signal-sending on another CPU that wants to examine it.
1805 */
1806 spin_lock_irq(&current->sighand->siglock);
1807 current->last_siginfo = NULL;
1808
1809 /*
1810 * Queued signals ignored us while we were stopped for tracing.
1811 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07001812 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07001814 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815}
1816
1817void ptrace_notify(int exit_code)
1818{
1819 siginfo_t info;
1820
1821 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1822
1823 memset(&info, 0, sizeof info);
1824 info.si_signo = SIGTRAP;
1825 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001826 info.si_pid = task_pid_vnr(current);
David Howells76aac0e2008-11-14 10:39:12 +11001827 info.si_uid = current_uid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
1829 /* Let the debugger run. */
1830 spin_lock_irq(&current->sighand->siglock);
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001831 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 spin_unlock_irq(&current->sighand->siglock);
1833}
1834
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835/*
1836 * This performs the stopping for SIGSTOP and other stop signals.
1837 * We have to stop all threads in the thread group.
1838 * Returns nonzero if we've actually stopped and released the siglock.
1839 * Returns zero if we didn't stop and still hold the siglock.
1840 */
Oleg Nesterova122b342006-03-28 16:11:22 -08001841static int do_signal_stop(int signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842{
1843 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
Tejun Heo39efa3e2011-03-23 10:37:00 +01001845 if (!(current->group_stop & GROUP_STOP_PENDING)) {
1846 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08001847 struct task_struct *t;
1848
Tejun Heod79fdd62011-03-23 10:37:00 +01001849 /* signr will be recorded in task->group_stop for retries */
1850 WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
1851
Oleg Nesterovee77f072011-04-01 20:12:38 +02001852 if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07001853 unlikely(signal_group_exit(sig)))
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08001854 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01001856 * There is no group stop already in progress. We must
1857 * initiate one now.
1858 *
1859 * While ptraced, a task may be resumed while group stop is
1860 * still in effect and then receive a stop signal and
1861 * initiate another group stop. This deviates from the
1862 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02001863 * cause two group stops when !ptraced. That is why we
1864 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01001865 *
1866 * The condition can be distinguished by testing whether
1867 * SIGNAL_STOP_STOPPED is already set. Don't generate
1868 * group_exit_code in such case.
1869 *
1870 * This is not necessary for SIGNAL_STOP_CONTINUED because
1871 * an intervening stop signal is required to cause two
1872 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 */
Tejun Heo408a37d2011-03-23 10:37:01 +01001874 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1875 sig->group_exit_code = signr;
1876 else
1877 WARN_ON_ONCE(!task_ptrace(current));
Oleg Nesterova122b342006-03-28 16:11:22 -08001878
Tejun Heod79fdd62011-03-23 10:37:00 +01001879 current->group_stop &= ~GROUP_STOP_SIGMASK;
1880 current->group_stop |= signr | gstop;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001881 sig->group_stop_count = 1;
Tejun Heod79fdd62011-03-23 10:37:00 +01001882 for (t = next_thread(current); t != current;
1883 t = next_thread(t)) {
1884 t->group_stop &= ~GROUP_STOP_SIGMASK;
Oleg Nesterova122b342006-03-28 16:11:22 -08001885 /*
1886 * Setting state to TASK_STOPPED for a group
1887 * stop is always done with the siglock held,
1888 * so this check has no races.
1889 */
Tejun Heo39efa3e2011-03-23 10:37:00 +01001890 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
Tejun Heod79fdd62011-03-23 10:37:00 +01001891 t->group_stop |= signr | gstop;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001892 sig->group_stop_count++;
Oleg Nesterova122b342006-03-28 16:11:22 -08001893 signal_wake_up(t, 0);
Tejun Heod79fdd62011-03-23 10:37:00 +01001894 }
1895 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001896 }
Tejun Heod79fdd62011-03-23 10:37:00 +01001897retry:
Tejun Heo5224fa32011-03-23 10:37:00 +01001898 if (likely(!task_ptrace(current))) {
1899 int notify = 0;
1900
1901 /*
1902 * If there are no other threads in the group, or if there
1903 * is a group stop in progress and we are the last to stop,
1904 * report to the parent.
1905 */
1906 if (task_participate_group_stop(current))
1907 notify = CLD_STOPPED;
1908
Tejun Heod79fdd62011-03-23 10:37:00 +01001909 __set_current_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01001910 spin_unlock_irq(&current->sighand->siglock);
1911
Tejun Heo62bcf9d2011-03-23 10:37:01 +01001912 /*
1913 * Notify the parent of the group stop completion. Because
1914 * we're not holding either the siglock or tasklist_lock
1915 * here, ptracer may attach inbetween; however, this is for
1916 * group stop and should always be delivered to the real
1917 * parent of the group leader. The new ptracer will get
1918 * its notification when this task transitions into
1919 * TASK_TRACED.
1920 */
Tejun Heo5224fa32011-03-23 10:37:00 +01001921 if (notify) {
1922 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01001923 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01001924 read_unlock(&tasklist_lock);
1925 }
1926
1927 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1928 schedule();
1929
1930 spin_lock_irq(&current->sighand->siglock);
Tejun Heod79fdd62011-03-23 10:37:00 +01001931 } else {
1932 ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
1933 CLD_STOPPED, 0, NULL);
1934 current->exit_code = 0;
1935 }
1936
1937 /*
1938 * GROUP_STOP_PENDING could be set if another group stop has
1939 * started since being woken up or ptrace wants us to transit
1940 * between TASK_STOPPED and TRACED. Retry group stop.
1941 */
1942 if (current->group_stop & GROUP_STOP_PENDING) {
1943 WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
1944 goto retry;
1945 }
1946
1947 /* PTRACE_ATTACH might have raced with task killing, clear trapping */
1948 task_clear_group_stop_trapping(current);
Tejun Heo5224fa32011-03-23 10:37:00 +01001949
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001950 spin_unlock_irq(&current->sighand->siglock);
1951
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001952 tracehook_finish_jctl();
Roland McGrathae6d2ed2009-09-23 15:56:53 -07001953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 return 1;
1955}
1956
Roland McGrath18c98b62008-04-17 18:44:38 -07001957static int ptrace_signal(int signr, siginfo_t *info,
1958 struct pt_regs *regs, void *cookie)
1959{
Oleg Nesterov5cb11442009-06-17 16:27:30 -07001960 if (!task_ptrace(current))
Roland McGrath18c98b62008-04-17 18:44:38 -07001961 return signr;
1962
1963 ptrace_signal_deliver(regs, cookie);
1964
1965 /* Let the debugger run. */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001966 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07001967
1968 /* We're back. Did the debugger cancel the sig? */
1969 signr = current->exit_code;
1970 if (signr == 0)
1971 return signr;
1972
1973 current->exit_code = 0;
1974
1975 /* Update the siginfo structure if the signal has
1976 changed. If the debugger wanted something
1977 specific in the siginfo structure then it should
1978 have updated *info via PTRACE_SETSIGINFO. */
1979 if (signr != info->si_signo) {
1980 info->si_signo = signr;
1981 info->si_errno = 0;
1982 info->si_code = SI_USER;
1983 info->si_pid = task_pid_vnr(current->parent);
David Howellsc69e8d92008-11-14 10:39:19 +11001984 info->si_uid = task_uid(current->parent);
Roland McGrath18c98b62008-04-17 18:44:38 -07001985 }
1986
1987 /* If the (new) signal is now blocked, requeue it. */
1988 if (sigismember(&current->blocked, signr)) {
1989 specific_send_sig_info(signr, info, current);
1990 signr = 0;
1991 }
1992
1993 return signr;
1994}
1995
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1997 struct pt_regs *regs, void *cookie)
1998{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07001999 struct sighand_struct *sighand = current->sighand;
2000 struct signal_struct *signal = current->signal;
2001 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002003relock:
2004 /*
2005 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2006 * While in TASK_STOPPED, we were considered "frozen enough".
2007 * Now that we woke up, it's crucial if we're supposed to be
2008 * frozen that we freeze now before running anything substantial.
2009 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002010 try_to_freeze();
2011
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002012 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002013 /*
2014 * Every stopped thread goes here after wakeup. Check to see if
2015 * we should notify the parent, prepare_signal(SIGCONT) encodes
2016 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2017 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002018 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heo75b95952011-03-23 10:37:01 +01002019 struct task_struct *leader;
Tejun Heoc672af32011-03-23 10:36:59 +01002020 int why;
2021
2022 if (signal->flags & SIGNAL_CLD_CONTINUED)
2023 why = CLD_CONTINUED;
2024 else
2025 why = CLD_STOPPED;
2026
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002027 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002028
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002029 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002030
Tejun Heoceb6bd62011-03-23 10:37:01 +01002031 /*
2032 * Notify the parent that we're continuing. This event is
2033 * always per-process and doesn't make whole lot of sense
2034 * for ptracers, who shouldn't consume the state via
2035 * wait(2) either, but, for backward compatibility, notify
2036 * the ptracer of the group leader too unless it's gonna be
2037 * a duplicate.
2038 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002039 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002040
2041 do_notify_parent_cldstop(current, false, why);
2042
Tejun Heo75b95952011-03-23 10:37:01 +01002043 leader = current->group_leader;
Tejun Heoceb6bd62011-03-23 10:37:01 +01002044 if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
2045 do_notify_parent_cldstop(leader, true, why);
2046
Tejun Heoedf2ed12011-03-23 10:37:00 +01002047 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002048
Oleg Nesterove4420552008-04-30 00:52:44 -07002049 goto relock;
2050 }
2051
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 for (;;) {
2053 struct k_sigaction *ka;
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002054 /*
2055 * Tracing can induce an artifical signal and choose sigaction.
2056 * The return value in @signr determines the default action,
2057 * but @info->si_signo is the signal number we will report.
2058 */
2059 signr = tracehook_get_signal(current, regs, info, return_ka);
2060 if (unlikely(signr < 0))
2061 goto relock;
2062 if (unlikely(signr != 0))
2063 ka = return_ka;
2064 else {
Tejun Heo39efa3e2011-03-23 10:37:00 +01002065 if (unlikely(current->group_stop &
2066 GROUP_STOP_PENDING) && do_signal_stop(0))
Oleg Nesterov1be53962009-12-15 16:47:26 -08002067 goto relock;
2068
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002069 signr = dequeue_signal(current, &current->blocked,
2070 info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Roland McGrath18c98b62008-04-17 18:44:38 -07002072 if (!signr)
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002073 break; /* will return 0 */
2074
2075 if (signr != SIGKILL) {
2076 signr = ptrace_signal(signr, info,
2077 regs, cookie);
2078 if (!signr)
2079 continue;
2080 }
2081
2082 ka = &sighand->action[signr-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 }
2084
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002085 /* Trace actually delivered signals. */
2086 trace_signal_deliver(signr, info, ka);
2087
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2089 continue;
2090 if (ka->sa.sa_handler != SIG_DFL) {
2091 /* Run the handler. */
2092 *return_ka = *ka;
2093
2094 if (ka->sa.sa_flags & SA_ONESHOT)
2095 ka->sa.sa_handler = SIG_DFL;
2096
2097 break; /* will return non-zero "signr" value */
2098 }
2099
2100 /*
2101 * Now we are doing the default action for this signal.
2102 */
2103 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2104 continue;
2105
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002106 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002107 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002108 * Container-init gets no signals it doesn't want from same
2109 * container.
2110 *
2111 * Note that if global/container-init sees a sig_kernel_only()
2112 * signal here, the signal must have been generated internally
2113 * or must have come from an ancestor namespace. In either
2114 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002115 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002116 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002117 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 continue;
2119
2120 if (sig_kernel_stop(signr)) {
2121 /*
2122 * The default action is to stop all threads in
2123 * the thread group. The job control signals
2124 * do nothing in an orphaned pgrp, but SIGSTOP
2125 * always works. Note that siglock needs to be
2126 * dropped during the call to is_orphaned_pgrp()
2127 * because of lock ordering with tasklist_lock.
2128 * This allows an intervening SIGCONT to be posted.
2129 * We need to check for that and bail out if necessary.
2130 */
2131 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002132 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
2134 /* signals can be posted during this window */
2135
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002136 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 goto relock;
2138
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002139 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 }
2141
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002142 if (likely(do_signal_stop(info->si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 /* It released the siglock. */
2144 goto relock;
2145 }
2146
2147 /*
2148 * We didn't actually stop, due to a race
2149 * with SIGCONT or something like that.
2150 */
2151 continue;
2152 }
2153
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002154 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156 /*
2157 * Anything else is fatal, maybe with a core dump.
2158 */
2159 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002160
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002162 if (print_fatal_signals)
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002163 print_fatal_signal(regs, info->si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 /*
2165 * If it was able to dump core, this kills all
2166 * other threads in the group and synchronizes with
2167 * their demise. If we lost the race with another
2168 * thread getting here, it set group_exit_code
2169 * first and our do_group_exit call below will use
2170 * that value and ignore the one we pass it.
2171 */
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002172 do_coredump(info->si_signo, info->si_signo, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 }
2174
2175 /*
2176 * Death signals, no core dump.
2177 */
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002178 do_group_exit(info->si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 /* NOTREACHED */
2180 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002181 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 return signr;
2183}
2184
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002185void exit_signals(struct task_struct *tsk)
2186{
2187 int group_stop = 0;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002188 struct task_struct *t;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002189
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002190 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2191 tsk->flags |= PF_EXITING;
2192 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002193 }
2194
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002195 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002196 /*
2197 * From now this task is not visible for group-wide signals,
2198 * see wants_signal(), do_signal_stop().
2199 */
2200 tsk->flags |= PF_EXITING;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002201 if (!signal_pending(tsk))
2202 goto out;
2203
2204 /* It could be that __group_complete_signal() choose us to
2205 * notify about group-wide signal. Another thread should be
2206 * woken now to take the signal since we will not.
2207 */
2208 for (t = tsk; (t = next_thread(t)) != tsk; )
2209 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2210 recalc_sigpending_and_wake(t);
2211
Tejun Heo39efa3e2011-03-23 10:37:00 +01002212 if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
Tejun Heoe5c19022011-03-23 10:37:00 +01002213 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002214 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002215out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002216 spin_unlock_irq(&tsk->sighand->siglock);
2217
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002218 /*
2219 * If group stop has completed, deliver the notification. This
2220 * should always go to the real parent of the group leader.
2221 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002222 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002223 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002224 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002225 read_unlock(&tasklist_lock);
2226 }
2227}
2228
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229EXPORT_SYMBOL(recalc_sigpending);
2230EXPORT_SYMBOL_GPL(dequeue_signal);
2231EXPORT_SYMBOL(flush_signals);
2232EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233EXPORT_SYMBOL(send_sig);
2234EXPORT_SYMBOL(send_sig_info);
2235EXPORT_SYMBOL(sigprocmask);
2236EXPORT_SYMBOL(block_all_signals);
2237EXPORT_SYMBOL(unblock_all_signals);
2238
2239
2240/*
2241 * System call entry points.
2242 */
2243
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002244SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245{
2246 struct restart_block *restart = &current_thread_info()->restart_block;
2247 return restart->fn(restart);
2248}
2249
2250long do_no_restart_syscall(struct restart_block *param)
2251{
2252 return -EINTR;
2253}
2254
2255/*
2256 * We don't need to get the kernel lock - this is all local to this
2257 * particular thread.. (and that's good, because this is _heavily_
2258 * used by various programs)
2259 */
2260
2261/*
2262 * This is also useful for kernel threads that want to temporarily
2263 * (or permanently) block certain signals.
2264 *
2265 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2266 * interface happily blocks "unblockable" signals like SIGKILL
2267 * and friends.
2268 */
2269int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2270{
2271 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
2273 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08002274 if (oldset)
2275 *oldset = current->blocked;
2276
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 error = 0;
2278 switch (how) {
2279 case SIG_BLOCK:
2280 sigorsets(&current->blocked, &current->blocked, set);
2281 break;
2282 case SIG_UNBLOCK:
2283 signandsets(&current->blocked, &current->blocked, set);
2284 break;
2285 case SIG_SETMASK:
2286 current->blocked = *set;
2287 break;
2288 default:
2289 error = -EINVAL;
2290 }
2291 recalc_sigpending();
2292 spin_unlock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08002293
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 return error;
2295}
2296
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002297SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2298 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299{
2300 int error = -EINVAL;
2301 sigset_t old_set, new_set;
2302
2303 /* XXX: Don't preclude handling different sized sigset_t's. */
2304 if (sigsetsize != sizeof(sigset_t))
2305 goto out;
2306
2307 if (set) {
2308 error = -EFAULT;
2309 if (copy_from_user(&new_set, set, sizeof(*set)))
2310 goto out;
2311 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2312
2313 error = sigprocmask(how, &new_set, &old_set);
2314 if (error)
2315 goto out;
2316 if (oset)
2317 goto set_old;
2318 } else if (oset) {
2319 spin_lock_irq(&current->sighand->siglock);
2320 old_set = current->blocked;
2321 spin_unlock_irq(&current->sighand->siglock);
2322
2323 set_old:
2324 error = -EFAULT;
2325 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2326 goto out;
2327 }
2328 error = 0;
2329out:
2330 return error;
2331}
2332
2333long do_sigpending(void __user *set, unsigned long sigsetsize)
2334{
2335 long error = -EINVAL;
2336 sigset_t pending;
2337
2338 if (sigsetsize > sizeof(sigset_t))
2339 goto out;
2340
2341 spin_lock_irq(&current->sighand->siglock);
2342 sigorsets(&pending, &current->pending.signal,
2343 &current->signal->shared_pending.signal);
2344 spin_unlock_irq(&current->sighand->siglock);
2345
2346 /* Outside the lock because only this thread touches it. */
2347 sigandsets(&pending, &current->blocked, &pending);
2348
2349 error = -EFAULT;
2350 if (!copy_to_user(set, &pending, sigsetsize))
2351 error = 0;
2352
2353out:
2354 return error;
2355}
2356
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002357SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358{
2359 return do_sigpending(set, sigsetsize);
2360}
2361
2362#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2363
2364int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2365{
2366 int err;
2367
2368 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2369 return -EFAULT;
2370 if (from->si_code < 0)
2371 return __copy_to_user(to, from, sizeof(siginfo_t))
2372 ? -EFAULT : 0;
2373 /*
2374 * If you change siginfo_t structure, please be sure
2375 * this code is fixed accordingly.
Davide Libenzifba2afa2007-05-10 22:23:13 -07002376 * Please remember to update the signalfd_copyinfo() function
2377 * inside fs/signalfd.c too, in case siginfo_t changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 * It should never copy any pad contained in the structure
2379 * to avoid security leaks, but must copy the generic
2380 * 3 ints plus the relevant union member.
2381 */
2382 err = __put_user(from->si_signo, &to->si_signo);
2383 err |= __put_user(from->si_errno, &to->si_errno);
2384 err |= __put_user((short)from->si_code, &to->si_code);
2385 switch (from->si_code & __SI_MASK) {
2386 case __SI_KILL:
2387 err |= __put_user(from->si_pid, &to->si_pid);
2388 err |= __put_user(from->si_uid, &to->si_uid);
2389 break;
2390 case __SI_TIMER:
2391 err |= __put_user(from->si_tid, &to->si_tid);
2392 err |= __put_user(from->si_overrun, &to->si_overrun);
2393 err |= __put_user(from->si_ptr, &to->si_ptr);
2394 break;
2395 case __SI_POLL:
2396 err |= __put_user(from->si_band, &to->si_band);
2397 err |= __put_user(from->si_fd, &to->si_fd);
2398 break;
2399 case __SI_FAULT:
2400 err |= __put_user(from->si_addr, &to->si_addr);
2401#ifdef __ARCH_SI_TRAPNO
2402 err |= __put_user(from->si_trapno, &to->si_trapno);
2403#endif
Andi Kleena337fda2010-09-27 20:32:19 +02002404#ifdef BUS_MCEERR_AO
2405 /*
2406 * Other callers might not initialize the si_lsb field,
2407 * so check explicitely for the right codes here.
2408 */
2409 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2410 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2411#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 break;
2413 case __SI_CHLD:
2414 err |= __put_user(from->si_pid, &to->si_pid);
2415 err |= __put_user(from->si_uid, &to->si_uid);
2416 err |= __put_user(from->si_status, &to->si_status);
2417 err |= __put_user(from->si_utime, &to->si_utime);
2418 err |= __put_user(from->si_stime, &to->si_stime);
2419 break;
2420 case __SI_RT: /* This is not generated by the kernel as of now. */
2421 case __SI_MESGQ: /* But this is */
2422 err |= __put_user(from->si_pid, &to->si_pid);
2423 err |= __put_user(from->si_uid, &to->si_uid);
2424 err |= __put_user(from->si_ptr, &to->si_ptr);
2425 break;
2426 default: /* this is just in case for now ... */
2427 err |= __put_user(from->si_pid, &to->si_pid);
2428 err |= __put_user(from->si_uid, &to->si_uid);
2429 break;
2430 }
2431 return err;
2432}
2433
2434#endif
2435
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002436SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2437 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2438 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439{
2440 int ret, sig;
2441 sigset_t these;
2442 struct timespec ts;
2443 siginfo_t info;
2444 long timeout = 0;
2445
2446 /* XXX: Don't preclude handling different sized sigset_t's. */
2447 if (sigsetsize != sizeof(sigset_t))
2448 return -EINVAL;
2449
2450 if (copy_from_user(&these, uthese, sizeof(these)))
2451 return -EFAULT;
2452
2453 /*
2454 * Invert the set of allowed signals to get those we
2455 * want to block.
2456 */
2457 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2458 signotset(&these);
2459
2460 if (uts) {
2461 if (copy_from_user(&ts, uts, sizeof(ts)))
2462 return -EFAULT;
2463 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2464 || ts.tv_sec < 0)
2465 return -EINVAL;
2466 }
2467
2468 spin_lock_irq(&current->sighand->siglock);
2469 sig = dequeue_signal(current, &these, &info);
2470 if (!sig) {
2471 timeout = MAX_SCHEDULE_TIMEOUT;
2472 if (uts)
2473 timeout = (timespec_to_jiffies(&ts)
2474 + (ts.tv_sec || ts.tv_nsec));
2475
2476 if (timeout) {
2477 /* None ready -- temporarily unblock those we're
2478 * interested while we are sleeping in so that we'll
2479 * be awakened when they arrive. */
2480 current->real_blocked = current->blocked;
2481 sigandsets(&current->blocked, &current->blocked, &these);
2482 recalc_sigpending();
2483 spin_unlock_irq(&current->sighand->siglock);
2484
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07002485 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 spin_lock_irq(&current->sighand->siglock);
2488 sig = dequeue_signal(current, &these, &info);
2489 current->blocked = current->real_blocked;
2490 siginitset(&current->real_blocked, 0);
2491 recalc_sigpending();
2492 }
2493 }
2494 spin_unlock_irq(&current->sighand->siglock);
2495
2496 if (sig) {
2497 ret = sig;
2498 if (uinfo) {
2499 if (copy_siginfo_to_user(uinfo, &info))
2500 ret = -EFAULT;
2501 }
2502 } else {
2503 ret = -EAGAIN;
2504 if (timeout)
2505 ret = -EINTR;
2506 }
2507
2508 return ret;
2509}
2510
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002511SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512{
2513 struct siginfo info;
2514
2515 info.si_signo = sig;
2516 info.si_errno = 0;
2517 info.si_code = SI_USER;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002518 info.si_pid = task_tgid_vnr(current);
David Howells76aac0e2008-11-14 10:39:12 +11002519 info.si_uid = current_uid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520
2521 return kill_something_info(sig, &info, pid);
2522}
2523
Thomas Gleixner30b4ae8a2009-04-04 21:01:01 +00002524static int
2525do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002526{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002527 struct task_struct *p;
Thomas Gleixner30b4ae8a2009-04-04 21:01:01 +00002528 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002529
Oleg Nesterov3547ff32008-04-30 00:52:51 -07002530 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07002531 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002532 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae8a2009-04-04 21:01:01 +00002533 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002534 /*
2535 * The null signal is a permissions and process existence
2536 * probe. No signal is actually delivered.
2537 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07002538 if (!error && sig) {
2539 error = do_send_sig_info(sig, info, p, false);
2540 /*
2541 * If lock_task_sighand() failed we pretend the task
2542 * dies after receiving the signal. The window is tiny,
2543 * and the signal is private anyway.
2544 */
2545 if (unlikely(error == -ESRCH))
2546 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002547 }
2548 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07002549 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002550
2551 return error;
2552}
2553
Thomas Gleixner30b4ae8a2009-04-04 21:01:01 +00002554static int do_tkill(pid_t tgid, pid_t pid, int sig)
2555{
2556 struct siginfo info;
2557
2558 info.si_signo = sig;
2559 info.si_errno = 0;
2560 info.si_code = SI_TKILL;
2561 info.si_pid = task_tgid_vnr(current);
2562 info.si_uid = current_uid();
2563
2564 return do_send_specific(tgid, pid, sig, &info);
2565}
2566
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567/**
2568 * sys_tgkill - send signal to one specific thread
2569 * @tgid: the thread group ID of the thread
2570 * @pid: the PID of the thread
2571 * @sig: signal to be sent
2572 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08002573 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 * exists but it's not belonging to the target process anymore. This
2575 * method solves the problem of threads exiting and PIDs getting reused.
2576 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002577SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 /* This is only valid for single tasks */
2580 if (pid <= 0 || tgid <= 0)
2581 return -EINVAL;
2582
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002583 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584}
2585
2586/*
2587 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2588 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002589SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 /* This is only valid for single tasks */
2592 if (pid <= 0)
2593 return -EINVAL;
2594
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002595 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596}
2597
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002598SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2599 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600{
2601 siginfo_t info;
2602
2603 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2604 return -EFAULT;
2605
2606 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07002607 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2608 */
2609 if (info.si_code != SI_QUEUE) {
2610 /* We used to allow any < 0 si_code */
2611 WARN_ON_ONCE(info.si_code < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 return -EPERM;
Julien Tinnesda485242011-03-18 15:05:21 -07002613 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 info.si_signo = sig;
2615
2616 /* POSIX.1b doesn't mention process groups. */
2617 return kill_proc_info(sig, &info, pid);
2618}
2619
Thomas Gleixner62ab4502009-04-04 21:01:06 +00002620long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2621{
2622 /* This is only valid for single tasks */
2623 if (pid <= 0 || tgid <= 0)
2624 return -EINVAL;
2625
2626 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07002627 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2628 */
2629 if (info->si_code != SI_QUEUE) {
2630 /* We used to allow any < 0 si_code */
2631 WARN_ON_ONCE(info->si_code < 0);
Thomas Gleixner62ab4502009-04-04 21:01:06 +00002632 return -EPERM;
Julien Tinnesda485242011-03-18 15:05:21 -07002633 }
Thomas Gleixner62ab4502009-04-04 21:01:06 +00002634 info->si_signo = sig;
2635
2636 return do_send_specific(tgid, pid, sig, info);
2637}
2638
2639SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2640 siginfo_t __user *, uinfo)
2641{
2642 siginfo_t info;
2643
2644 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2645 return -EFAULT;
2646
2647 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2648}
2649
Oleg Nesterov88531f72006-03-28 16:11:24 -08002650int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -07002652 struct task_struct *t = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 struct k_sigaction *k;
George Anzinger71fabd52006-01-08 01:02:48 -08002654 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655
Jesper Juhl7ed20e12005-05-01 08:59:14 -07002656 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 return -EINVAL;
2658
Pavel Emelyanov93585ee2008-04-30 00:52:39 -07002659 k = &t->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
2661 spin_lock_irq(&current->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 if (oact)
2663 *oact = *k;
2664
2665 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03002666 sigdelsetmask(&act->sa.sa_mask,
2667 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08002668 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 /*
2670 * POSIX 3.3.1.3:
2671 * "Setting a signal action to SIG_IGN for a signal that is
2672 * pending shall cause the pending signal to be discarded,
2673 * whether or not it is blocked."
2674 *
2675 * "Setting a signal action to SIG_DFL for a signal that is
2676 * pending and whose default action is to ignore the signal
2677 * (for example, SIGCHLD), shall cause the pending signal to
2678 * be discarded, whether or not it is blocked"
2679 */
Roland McGrath35de2542008-07-25 19:45:51 -07002680 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
George Anzinger71fabd52006-01-08 01:02:48 -08002681 sigemptyset(&mask);
2682 sigaddset(&mask, sig);
2683 rm_from_queue_full(&mask, &t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 do {
George Anzinger71fabd52006-01-08 01:02:48 -08002685 rm_from_queue_full(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 t = next_thread(t);
2687 } while (t != current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 }
2690
2691 spin_unlock_irq(&current->sighand->siglock);
2692 return 0;
2693}
2694
2695int
2696do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2697{
2698 stack_t oss;
2699 int error;
2700
Linus Torvalds0083fc22009-08-01 10:34:56 -07002701 oss.ss_sp = (void __user *) current->sas_ss_sp;
2702 oss.ss_size = current->sas_ss_size;
2703 oss.ss_flags = sas_ss_flags(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
2705 if (uss) {
2706 void __user *ss_sp;
2707 size_t ss_size;
2708 int ss_flags;
2709
2710 error = -EFAULT;
Linus Torvalds0dd84862009-08-01 11:18:56 -07002711 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2712 goto out;
2713 error = __get_user(ss_sp, &uss->ss_sp) |
2714 __get_user(ss_flags, &uss->ss_flags) |
2715 __get_user(ss_size, &uss->ss_size);
2716 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 goto out;
2718
2719 error = -EPERM;
2720 if (on_sig_stack(sp))
2721 goto out;
2722
2723 error = -EINVAL;
2724 /*
2725 *
2726 * Note - this code used to test ss_flags incorrectly
2727 * old code may have been written using ss_flags==0
2728 * to mean ss_flags==SS_ONSTACK (as this was the only
2729 * way that worked) - this fix preserves that older
2730 * mechanism
2731 */
2732 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2733 goto out;
2734
2735 if (ss_flags == SS_DISABLE) {
2736 ss_size = 0;
2737 ss_sp = NULL;
2738 } else {
2739 error = -ENOMEM;
2740 if (ss_size < MINSIGSTKSZ)
2741 goto out;
2742 }
2743
2744 current->sas_ss_sp = (unsigned long) ss_sp;
2745 current->sas_ss_size = ss_size;
2746 }
2747
Linus Torvalds0083fc22009-08-01 10:34:56 -07002748 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 if (uoss) {
2750 error = -EFAULT;
Linus Torvalds0083fc22009-08-01 10:34:56 -07002751 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 goto out;
Linus Torvalds0083fc22009-08-01 10:34:56 -07002753 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2754 __put_user(oss.ss_size, &uoss->ss_size) |
2755 __put_user(oss.ss_flags, &uoss->ss_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 }
2757
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758out:
2759 return error;
2760}
2761
2762#ifdef __ARCH_WANT_SYS_SIGPENDING
2763
Heiko Carstensb290ebe2009-01-14 14:14:06 +01002764SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765{
2766 return do_sigpending(set, sizeof(*set));
2767}
2768
2769#endif
2770
2771#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2772/* Some platforms have their own version with special arguments others
2773 support only sys_rt_sigprocmask. */
2774
Heiko Carstensb290ebe2009-01-14 14:14:06 +01002775SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2776 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777{
2778 int error;
2779 old_sigset_t old_set, new_set;
2780
2781 if (set) {
2782 error = -EFAULT;
2783 if (copy_from_user(&new_set, set, sizeof(*set)))
2784 goto out;
2785 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2786
2787 spin_lock_irq(&current->sighand->siglock);
2788 old_set = current->blocked.sig[0];
2789
2790 error = 0;
2791 switch (how) {
2792 default:
2793 error = -EINVAL;
2794 break;
2795 case SIG_BLOCK:
2796 sigaddsetmask(&current->blocked, new_set);
2797 break;
2798 case SIG_UNBLOCK:
2799 sigdelsetmask(&current->blocked, new_set);
2800 break;
2801 case SIG_SETMASK:
2802 current->blocked.sig[0] = new_set;
2803 break;
2804 }
2805
2806 recalc_sigpending();
2807 spin_unlock_irq(&current->sighand->siglock);
2808 if (error)
2809 goto out;
2810 if (oset)
2811 goto set_old;
2812 } else if (oset) {
2813 old_set = current->blocked.sig[0];
2814 set_old:
2815 error = -EFAULT;
2816 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2817 goto out;
2818 }
2819 error = 0;
2820out:
2821 return error;
2822}
2823#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2824
2825#ifdef __ARCH_WANT_SYS_RT_SIGACTION
Heiko Carstensd4e82042009-01-14 14:14:34 +01002826SYSCALL_DEFINE4(rt_sigaction, int, sig,
2827 const struct sigaction __user *, act,
2828 struct sigaction __user *, oact,
2829 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830{
2831 struct k_sigaction new_sa, old_sa;
2832 int ret = -EINVAL;
2833
2834 /* XXX: Don't preclude handling different sized sigset_t's. */
2835 if (sigsetsize != sizeof(sigset_t))
2836 goto out;
2837
2838 if (act) {
2839 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2840 return -EFAULT;
2841 }
2842
2843 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2844
2845 if (!ret && oact) {
2846 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2847 return -EFAULT;
2848 }
2849out:
2850 return ret;
2851}
2852#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2853
2854#ifdef __ARCH_WANT_SYS_SGETMASK
2855
2856/*
2857 * For backwards compatibility. Functionality superseded by sigprocmask.
2858 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002859SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860{
2861 /* SMP safe */
2862 return current->blocked.sig[0];
2863}
2864
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002865SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866{
2867 int old;
2868
2869 spin_lock_irq(&current->sighand->siglock);
2870 old = current->blocked.sig[0];
2871
2872 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2873 sigmask(SIGSTOP)));
2874 recalc_sigpending();
2875 spin_unlock_irq(&current->sighand->siglock);
2876
2877 return old;
2878}
2879#endif /* __ARCH_WANT_SGETMASK */
2880
2881#ifdef __ARCH_WANT_SYS_SIGNAL
2882/*
2883 * For backwards compatibility. Functionality superseded by sigaction.
2884 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002885SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886{
2887 struct k_sigaction new_sa, old_sa;
2888 int ret;
2889
2890 new_sa.sa.sa_handler = handler;
2891 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d72006-02-09 22:41:41 +03002892 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893
2894 ret = do_sigaction(sig, &new_sa, &old_sa);
2895
2896 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2897}
2898#endif /* __ARCH_WANT_SYS_SIGNAL */
2899
2900#ifdef __ARCH_WANT_SYS_PAUSE
2901
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002902SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903{
2904 current->state = TASK_INTERRUPTIBLE;
2905 schedule();
2906 return -ERESTARTNOHAND;
2907}
2908
2909#endif
2910
David Woodhouse150256d2006-01-18 17:43:57 -08002911#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
Heiko Carstensd4e82042009-01-14 14:14:34 +01002912SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08002913{
2914 sigset_t newset;
2915
2916 /* XXX: Don't preclude handling different sized sigset_t's. */
2917 if (sigsetsize != sizeof(sigset_t))
2918 return -EINVAL;
2919
2920 if (copy_from_user(&newset, unewset, sizeof(newset)))
2921 return -EFAULT;
2922 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2923
2924 spin_lock_irq(&current->sighand->siglock);
2925 current->saved_sigmask = current->blocked;
2926 current->blocked = newset;
2927 recalc_sigpending();
2928 spin_unlock_irq(&current->sighand->siglock);
2929
2930 current->state = TASK_INTERRUPTIBLE;
2931 schedule();
Roland McGrath4e4c22c2008-04-30 00:53:06 -07002932 set_restore_sigmask();
David Woodhouse150256d2006-01-18 17:43:57 -08002933 return -ERESTARTNOHAND;
2934}
2935#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2936
David Howellsf269fdd2006-09-27 01:50:23 -07002937__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2938{
2939 return NULL;
2940}
2941
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942void __init signals_init(void)
2943{
Christoph Lameter0a31bd52007-05-06 14:49:57 -07002944 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945}
Jason Wessel67fc4e02010-05-20 21:04:21 -05002946
2947#ifdef CONFIG_KGDB_KDB
2948#include <linux/kdb.h>
2949/*
2950 * kdb_send_sig_info - Allows kdb to send signals without exposing
2951 * signal internals. This function checks if the required locks are
2952 * available before calling the main signal code, to avoid kdb
2953 * deadlocks.
2954 */
2955void
2956kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2957{
2958 static struct task_struct *kdb_prev_t;
2959 int sig, new_t;
2960 if (!spin_trylock(&t->sighand->siglock)) {
2961 kdb_printf("Can't do kill command now.\n"
2962 "The sigmask lock is held somewhere else in "
2963 "kernel, try again later\n");
2964 return;
2965 }
2966 spin_unlock(&t->sighand->siglock);
2967 new_t = kdb_prev_t != t;
2968 kdb_prev_t = t;
2969 if (t->state != TASK_RUNNING && new_t) {
2970 kdb_printf("Process is not RUNNING, sending a signal from "
2971 "kdb risks deadlock\n"
2972 "on the run queue locks. "
2973 "The signal has _not_ been sent.\n"
2974 "Reissue the kill command if you want to risk "
2975 "the deadlock.\n");
2976 return;
2977 }
2978 sig = info->si_signo;
2979 if (send_sig_info(sig, info, t))
2980 kdb_printf("Fail to deliver Signal %d to process %d.\n",
2981 sig, t->pid);
2982 else
2983 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2984}
2985#endif /* CONFIG_KGDB_KDB */