2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache *sigqueue_cachep;
42 static int __sig_ignored(struct task_struct *t, int sig)
46 /* Is it explicitly or implicitly ignored? */
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig));
53 static int sig_ignored(struct task_struct *t, int sig)
56 * Tracers always want to know about signals..
58 if (t->ptrace & PT_PTRACED)
62 * Blocked signals are never ignored, since the
63 * signal handler may change by the time it is
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
69 return __sig_ignored(t, sig);
73 * Re-calculate pending state from the set of locally pending
74 * signals, globally pending signals, and blocked signals.
76 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
81 switch (_NSIG_WORDS) {
83 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
84 ready |= signal->sig[i] &~ blocked->sig[i];
87 case 4: ready = signal->sig[3] &~ blocked->sig[3];
88 ready |= signal->sig[2] &~ blocked->sig[2];
89 ready |= signal->sig[1] &~ blocked->sig[1];
90 ready |= signal->sig[0] &~ blocked->sig[0];
93 case 2: ready = signal->sig[1] &~ blocked->sig[1];
94 ready |= signal->sig[0] &~ blocked->sig[0];
97 case 1: ready = signal->sig[0] &~ blocked->sig[0];
102 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
104 static int recalc_sigpending_tsk(struct task_struct *t)
106 if (t->signal->group_stop_count > 0 ||
107 PENDING(&t->pending, &t->blocked) ||
108 PENDING(&t->signal->shared_pending, &t->blocked)) {
109 set_tsk_thread_flag(t, TIF_SIGPENDING);
113 * We must never clear the flag in another thread, or in current
114 * when it's possible the current syscall is returning -ERESTART*.
115 * So we don't clear it here, and only callers who know they should do.
121 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
122 * This is superfluous when called on current, the wakeup is a harmless no-op.
124 void recalc_sigpending_and_wake(struct task_struct *t)
126 if (recalc_sigpending_tsk(t))
127 signal_wake_up(t, 0);
130 void recalc_sigpending(void)
132 if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING);
137 /* Given the mask, find the first available signal that should be serviced. */
139 int next_signal(struct sigpending *pending, sigset_t *mask)
141 unsigned long i, *s, *m, x;
144 s = pending->signal.sig;
146 switch (_NSIG_WORDS) {
148 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
149 if ((x = *s &~ *m) != 0) {
150 sig = ffz(~x) + i*_NSIG_BPW + 1;
155 case 2: if ((x = s[0] &~ m[0]) != 0)
157 else if ((x = s[1] &~ m[1]) != 0)
164 case 1: if ((x = *s &~ *m) != 0)
172 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
175 struct sigqueue *q = NULL;
176 struct user_struct *user;
179 * In order to avoid problems with "switch_user()", we want to make
180 * sure that the compiler doesn't re-load "t->user"
184 atomic_inc(&user->sigpending);
185 if (override_rlimit ||
186 atomic_read(&user->sigpending) <=
187 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
188 q = kmem_cache_alloc(sigqueue_cachep, flags);
189 if (unlikely(q == NULL)) {
190 atomic_dec(&user->sigpending);
192 INIT_LIST_HEAD(&q->list);
194 q->user = get_uid(user);
199 static void __sigqueue_free(struct sigqueue *q)
201 if (q->flags & SIGQUEUE_PREALLOC)
203 atomic_dec(&q->user->sigpending);
205 kmem_cache_free(sigqueue_cachep, q);
208 void flush_sigqueue(struct sigpending *queue)
212 sigemptyset(&queue->signal);
213 while (!list_empty(&queue->list)) {
214 q = list_entry(queue->list.next, struct sigqueue , list);
215 list_del_init(&q->list);
221 * Flush all pending signals for a task.
223 void flush_signals(struct task_struct *t)
227 spin_lock_irqsave(&t->sighand->siglock, flags);
228 clear_tsk_thread_flag(t, TIF_SIGPENDING);
229 flush_sigqueue(&t->pending);
230 flush_sigqueue(&t->signal->shared_pending);
231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
234 void ignore_signals(struct task_struct *t)
238 for (i = 0; i < _NSIG; ++i)
239 t->sighand->action[i].sa.sa_handler = SIG_IGN;
245 * Flush all handlers for a task.
249 flush_signal_handlers(struct task_struct *t, int force_default)
252 struct k_sigaction *ka = &t->sighand->action[0];
253 for (i = _NSIG ; i != 0 ; i--) {
254 if (force_default || ka->sa.sa_handler != SIG_IGN)
255 ka->sa.sa_handler = SIG_DFL;
257 sigemptyset(&ka->sa.sa_mask);
262 int unhandled_signal(struct task_struct *tsk, int sig)
264 if (is_global_init(tsk))
266 if (tsk->ptrace & PT_PTRACED)
268 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
269 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
273 /* Notify the system that a driver wants to block all signals for this
274 * process, and wants to be notified if any signals at all were to be
275 * sent/acted upon. If the notifier routine returns non-zero, then the
276 * signal will be acted upon after all. If the notifier routine returns 0,
277 * then then signal will be blocked. Only one block per process is
278 * allowed. priv is a pointer to private data that the notifier routine
279 * can use to determine if the signal should be blocked or not. */
282 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
286 spin_lock_irqsave(¤t->sighand->siglock, flags);
287 current->notifier_mask = mask;
288 current->notifier_data = priv;
289 current->notifier = notifier;
290 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
293 /* Notify the system that blocking has ended. */
296 unblock_all_signals(void)
300 spin_lock_irqsave(¤t->sighand->siglock, flags);
301 current->notifier = NULL;
302 current->notifier_data = NULL;
304 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
307 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
309 struct sigqueue *q, *first = NULL;
310 int still_pending = 0;
312 if (unlikely(!sigismember(&list->signal, sig)))
316 * Collect the siginfo appropriate to this signal. Check if
317 * there is another siginfo for the same signal.
319 list_for_each_entry(q, &list->list, list) {
320 if (q->info.si_signo == sig) {
329 list_del_init(&first->list);
330 copy_siginfo(info, &first->info);
331 __sigqueue_free(first);
333 sigdelset(&list->signal, sig);
336 /* Ok, it wasn't in the queue. This must be
337 a fast-pathed signal or we must have been
338 out of queue space. So zero out the info.
340 sigdelset(&list->signal, sig);
341 info->si_signo = sig;
350 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
353 int sig = next_signal(pending, mask);
356 if (current->notifier) {
357 if (sigismember(current->notifier_mask, sig)) {
358 if (!(current->notifier)(current->notifier_data)) {
359 clear_thread_flag(TIF_SIGPENDING);
365 if (!collect_signal(sig, pending, info))
373 * Dequeue a signal and return the element to the caller, which is
374 * expected to free it.
376 * All callers have to hold the siglock.
378 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
382 /* We only dequeue private signals from ourselves, we don't let
383 * signalfd steal them
385 signr = __dequeue_signal(&tsk->pending, mask, info);
387 signr = __dequeue_signal(&tsk->signal->shared_pending,
392 * itimers are process shared and we restart periodic
393 * itimers in the signal delivery path to prevent DoS
394 * attacks in the high resolution timer case. This is
395 * compliant with the old way of self restarting
396 * itimers, as the SIGALRM is a legacy signal and only
397 * queued once. Changing the restart behaviour to
398 * restart the timer in the signal dequeue path is
399 * reducing the timer noise on heavy loaded !highres
402 if (unlikely(signr == SIGALRM)) {
403 struct hrtimer *tmr = &tsk->signal->real_timer;
405 if (!hrtimer_is_queued(tmr) &&
406 tsk->signal->it_real_incr.tv64 != 0) {
407 hrtimer_forward(tmr, tmr->base->get_time(),
408 tsk->signal->it_real_incr);
409 hrtimer_restart(tmr);
414 if (signr && unlikely(sig_kernel_stop(signr))) {
416 * Set a marker that we have dequeued a stop signal. Our
417 * caller might release the siglock and then the pending
418 * stop signal it is about to process is no longer in the
419 * pending bitmasks, but must still be cleared by a SIGCONT
420 * (and overruled by a SIGKILL). So those cases clear this
421 * shared flag after we've set it. Note that this flag may
422 * remain set after the signal we return is ignored or
423 * handled. That doesn't matter because its only purpose
424 * is to alert stop-signal processing code when another
425 * processor has come along and cleared the flag.
427 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
428 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
431 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
432 info->si_sys_private) {
434 * Release the siglock to ensure proper locking order
435 * of timer locks outside of siglocks. Note, we leave
436 * irqs disabled here, since the posix-timers code is
437 * about to disable them again anyway.
439 spin_unlock(&tsk->sighand->siglock);
440 do_schedule_next_timer(info);
441 spin_lock(&tsk->sighand->siglock);
447 * Tell a process that it has a new active signal..
449 * NOTE! we rely on the previous spin_lock to
450 * lock interrupts for us! We can only be called with
451 * "siglock" held, and the local interrupt must
452 * have been disabled when that got acquired!
454 * No need to set need_resched since signal event passing
455 * goes through ->blocked
457 void signal_wake_up(struct task_struct *t, int resume)
461 set_tsk_thread_flag(t, TIF_SIGPENDING);
464 * For SIGKILL, we want to wake it up in the stopped/traced/killable
465 * case. We don't check t->state here because there is a race with it
466 * executing another processor and just now entering stopped state.
467 * By using wake_up_state, we ensure the process will wake up and
468 * handle its death signal.
470 mask = TASK_INTERRUPTIBLE;
472 mask |= TASK_WAKEKILL;
473 if (!wake_up_state(t, mask))
478 * Remove signals in mask from the pending set and queue.
479 * Returns 1 if any signals were found.
481 * All callers must be holding the siglock.
483 * This version takes a sigset mask and looks at all signals,
484 * not just those in the first mask word.
486 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
488 struct sigqueue *q, *n;
491 sigandsets(&m, mask, &s->signal);
492 if (sigisemptyset(&m))
495 signandsets(&s->signal, &s->signal, mask);
496 list_for_each_entry_safe(q, n, &s->list, list) {
497 if (sigismember(mask, q->info.si_signo)) {
498 list_del_init(&q->list);
505 * Remove signals in mask from the pending set and queue.
506 * Returns 1 if any signals were found.
508 * All callers must be holding the siglock.
510 static int rm_from_queue(unsigned long mask, struct sigpending *s)
512 struct sigqueue *q, *n;
514 if (!sigtestsetmask(&s->signal, mask))
517 sigdelsetmask(&s->signal, mask);
518 list_for_each_entry_safe(q, n, &s->list, list) {
519 if (q->info.si_signo < SIGRTMIN &&
520 (mask & sigmask(q->info.si_signo))) {
521 list_del_init(&q->list);
529 * Bad permissions for sending the signal
531 static int check_kill_permission(int sig, struct siginfo *info,
532 struct task_struct *t)
535 if (!valid_signal(sig))
538 if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
539 error = audit_signal_info(sig, t); /* Let audit system see the signal */
543 if (((sig != SIGCONT) ||
544 (task_session_nr(current) != task_session_nr(t)))
545 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
546 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
547 && !capable(CAP_KILL))
551 return security_task_kill(t, info, sig, 0);
555 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
558 * Handle magic process-wide effects of stop/continue signals.
559 * Unlike the signal actions, these happen immediately at signal-generation
560 * time regardless of blocking, ignoring, or handling. This does the
561 * actual continuing for SIGCONT, but not the actual stopping for stop
562 * signals. The process stop is done as a signal action for SIG_DFL.
564 static void handle_stop_signal(int sig, struct task_struct *p)
566 struct task_struct *t;
568 if (p->signal->flags & SIGNAL_GROUP_EXIT)
570 * The process is in the middle of dying already.
574 if (sig_kernel_stop(sig)) {
576 * This is a stop signal. Remove SIGCONT from all queues.
578 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
581 rm_from_queue(sigmask(SIGCONT), &t->pending);
584 } else if (sig == SIGCONT) {
586 * Remove all stop signals from all queues,
587 * and wake all threads.
589 if (unlikely(p->signal->group_stop_count > 0)) {
591 * There was a group stop in progress. We'll
592 * pretend it finished before we got here. We are
593 * obliged to report it to the parent: if the
594 * SIGSTOP happened "after" this SIGCONT, then it
595 * would have cleared this pending SIGCONT. If it
596 * happened "before" this SIGCONT, then the parent
597 * got the SIGCHLD about the stop finishing before
598 * the continue happened. We do the notification
599 * now, and it's as if the stop had finished and
600 * the SIGCHLD was pending on entry to this kill.
602 p->signal->group_stop_count = 0;
603 p->signal->flags = SIGNAL_STOP_CONTINUED;
604 spin_unlock(&p->sighand->siglock);
605 do_notify_parent_cldstop(p, CLD_STOPPED);
606 spin_lock(&p->sighand->siglock);
608 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
612 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
615 * If there is a handler for SIGCONT, we must make
616 * sure that no thread returns to user mode before
617 * we post the signal, in case it was the only
618 * thread eligible to run the signal handler--then
619 * it must not do anything between resuming and
620 * running the handler. With the TIF_SIGPENDING
621 * flag set, the thread will pause and acquire the
622 * siglock that we hold now and until we've queued
623 * the pending signal.
625 * Wake up the stopped thread _after_ setting
628 state = __TASK_STOPPED;
629 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
630 set_tsk_thread_flag(t, TIF_SIGPENDING);
631 state |= TASK_INTERRUPTIBLE;
633 wake_up_state(t, state);
638 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
640 * We were in fact stopped, and are now continued.
641 * Notify the parent with CLD_CONTINUED.
643 p->signal->flags = SIGNAL_STOP_CONTINUED;
644 p->signal->group_exit_code = 0;
645 spin_unlock(&p->sighand->siglock);
646 do_notify_parent_cldstop(p, CLD_CONTINUED);
647 spin_lock(&p->sighand->siglock);
650 * We are not stopped, but there could be a stop
651 * signal in the middle of being processed after
652 * being removed from the queue. Clear that too.
654 p->signal->flags = 0;
656 } else if (sig == SIGKILL) {
658 * Make sure that any pending stop signal already dequeued
659 * is undone by the wakeup for SIGKILL.
661 p->signal->flags = 0;
665 static inline int legacy_queue(struct sigpending *signals, int sig)
667 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
670 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
671 struct sigpending *signals)
673 struct sigqueue * q = NULL;
676 * Short-circuit ignored signals and support queuing
677 * exactly one non-rt signal, so that we can get more
678 * detailed information about the cause of the signal.
680 if (sig_ignored(t, sig) || legacy_queue(signals, sig))
684 * Deliver the signal to listening signalfds. This must be called
685 * with the sighand lock held.
687 signalfd_notify(t, sig);
690 * fast-pathed signals for kernel-internal things like SIGSTOP
693 if (info == SEND_SIG_FORCED)
696 /* Real-time signals must be queued if sent by sigqueue, or
697 some other real-time mechanism. It is implementation
698 defined whether kill() does so. We attempt to do so, on
699 the principle of least surprise, but since kill is not
700 allowed to fail with EAGAIN when low on memory we just
701 make sure at least one signal gets delivered and don't
702 pass on the info struct. */
704 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
705 (is_si_special(info) ||
706 info->si_code >= 0)));
708 list_add_tail(&q->list, &signals->list);
709 switch ((unsigned long) info) {
710 case (unsigned long) SEND_SIG_NOINFO:
711 q->info.si_signo = sig;
712 q->info.si_errno = 0;
713 q->info.si_code = SI_USER;
714 q->info.si_pid = task_pid_vnr(current);
715 q->info.si_uid = current->uid;
717 case (unsigned long) SEND_SIG_PRIV:
718 q->info.si_signo = sig;
719 q->info.si_errno = 0;
720 q->info.si_code = SI_KERNEL;
725 copy_siginfo(&q->info, info);
728 } else if (!is_si_special(info)) {
729 if (sig >= SIGRTMIN && info->si_code != SI_USER)
731 * Queue overflow, abort. We may abort if the signal was rt
732 * and sent by user using something other than kill().
738 sigaddset(&signals->signal, sig);
742 int print_fatal_signals;
744 static void print_fatal_signal(struct pt_regs *regs, int signr)
746 printk("%s/%d: potentially unexpected fatal signal %d.\n",
747 current->comm, task_pid_nr(current), signr);
749 #if defined(__i386__) && !defined(__arch_um__)
750 printk("code at %08lx: ", regs->ip);
753 for (i = 0; i < 16; i++) {
756 __get_user(insn, (unsigned char *)(regs->ip + i));
757 printk("%02x ", insn);
765 static int __init setup_print_fatal_signals(char *str)
767 get_option (&str, &print_fatal_signals);
772 __setup("print-fatal-signals=", setup_print_fatal_signals);
775 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
779 BUG_ON(!irqs_disabled());
780 assert_spin_locked(&t->sighand->siglock);
782 ret = send_signal(sig, info, t, &t->pending);
786 if (!sigismember(&t->blocked, sig))
787 signal_wake_up(t, sig == SIGKILL);
792 * Force a signal that the process can't ignore: if necessary
793 * we unblock the signal and change any SIG_IGN to SIG_DFL.
795 * Note: If we unblock the signal, we always reset it to SIG_DFL,
796 * since we do not want to have a signal handler that was blocked
797 * be invoked when user space had explicitly blocked it.
799 * We don't want to have recursive SIGSEGV's etc, for example.
802 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
804 unsigned long int flags;
805 int ret, blocked, ignored;
806 struct k_sigaction *action;
808 spin_lock_irqsave(&t->sighand->siglock, flags);
809 action = &t->sighand->action[sig-1];
810 ignored = action->sa.sa_handler == SIG_IGN;
811 blocked = sigismember(&t->blocked, sig);
812 if (blocked || ignored) {
813 action->sa.sa_handler = SIG_DFL;
815 sigdelset(&t->blocked, sig);
816 recalc_sigpending_and_wake(t);
819 ret = specific_send_sig_info(sig, info, t);
820 spin_unlock_irqrestore(&t->sighand->siglock, flags);
826 force_sig_specific(int sig, struct task_struct *t)
828 force_sig_info(sig, SEND_SIG_FORCED, t);
832 * Test if P wants to take SIG. After we've checked all threads with this,
833 * it's equivalent to finding no threads not blocking SIG. Any threads not
834 * blocking SIG were ruled out because they are not running and already
835 * have pending signals. Such threads will dequeue from the shared queue
836 * as soon as they're available, so putting the signal on the shared queue
837 * will be equivalent to sending it to one such thread.
839 static inline int wants_signal(int sig, struct task_struct *p)
841 if (sigismember(&p->blocked, sig))
843 if (p->flags & PF_EXITING)
847 if (task_is_stopped_or_traced(p))
849 return task_curr(p) || !signal_pending(p);
853 __group_complete_signal(int sig, struct task_struct *p)
855 struct task_struct *t;
858 * Now find a thread we can wake up to take the signal off the queue.
860 * If the main thread wants the signal, it gets first crack.
861 * Probably the least surprising to the average bear.
863 if (wants_signal(sig, p))
865 else if (thread_group_empty(p))
867 * There is just one thread and it does not need to be woken.
868 * It will dequeue unblocked signals before it runs again.
873 * Otherwise try to find a suitable thread.
875 t = p->signal->curr_target;
877 /* restart balancing at this thread */
878 t = p->signal->curr_target = p;
880 while (!wants_signal(sig, t)) {
882 if (t == p->signal->curr_target)
884 * No thread needs to be woken.
885 * Any eligible threads will see
886 * the signal in the queue soon.
890 p->signal->curr_target = t;
894 * Found a killable thread. If the signal will be fatal,
895 * then start taking the whole group down immediately.
897 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
898 !sigismember(&t->real_blocked, sig) &&
899 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
901 * This signal will be fatal to the whole group.
903 if (!sig_kernel_coredump(sig)) {
905 * Start a group exit and wake everybody up.
906 * This way we don't have other threads
907 * running and doing things after a slower
908 * thread has the fatal signal pending.
910 p->signal->flags = SIGNAL_GROUP_EXIT;
911 p->signal->group_exit_code = sig;
912 p->signal->group_stop_count = 0;
915 sigaddset(&t->pending.signal, SIGKILL);
916 signal_wake_up(t, 1);
917 } while_each_thread(p, t);
923 * The signal is already in the shared-pending queue.
924 * Tell the chosen thread to wake up and dequeue it.
926 signal_wake_up(t, sig == SIGKILL);
931 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
935 assert_spin_locked(&p->sighand->siglock);
936 handle_stop_signal(sig, p);
939 * Put this signal on the shared-pending queue, or fail with EAGAIN.
940 * We always use the shared queue for process-wide signals,
941 * to avoid several races.
943 ret = send_signal(sig, info, p, &p->signal->shared_pending);
947 __group_complete_signal(sig, p);
952 * Nuke all other threads in the group.
954 void zap_other_threads(struct task_struct *p)
956 struct task_struct *t;
958 p->signal->group_stop_count = 0;
960 for (t = next_thread(p); t != p; t = next_thread(t)) {
962 * Don't bother with already dead threads
967 /* SIGKILL will be handled before any pending SIGSTOP */
968 sigaddset(&t->pending.signal, SIGKILL);
969 signal_wake_up(t, 1);
973 int __fatal_signal_pending(struct task_struct *tsk)
975 return sigismember(&tsk->pending.signal, SIGKILL);
977 EXPORT_SYMBOL(__fatal_signal_pending);
979 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
981 struct sighand_struct *sighand;
985 sighand = rcu_dereference(tsk->sighand);
986 if (unlikely(sighand == NULL))
989 spin_lock_irqsave(&sighand->siglock, *flags);
990 if (likely(sighand == tsk->sighand))
992 spin_unlock_irqrestore(&sighand->siglock, *flags);
999 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1001 unsigned long flags;
1004 ret = check_kill_permission(sig, info, p);
1008 if (lock_task_sighand(p, &flags)) {
1009 ret = __group_send_sig_info(sig, info, p);
1010 unlock_task_sighand(p, &flags);
1018 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1019 * control characters do (^C, ^Z etc)
1022 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1024 struct task_struct *p = NULL;
1025 int retval, success;
1029 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1030 int err = group_send_sig_info(sig, info, p);
1033 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1034 return success ? 0 : retval;
1037 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1040 struct task_struct *p;
1043 if (unlikely(sig_needs_tasklist(sig)))
1044 read_lock(&tasklist_lock);
1047 p = pid_task(pid, PIDTYPE_PID);
1049 error = group_send_sig_info(sig, info, p);
1050 if (unlikely(error == -ESRCH))
1052 * The task was unhashed in between, try again.
1053 * If it is dead, pid_task() will return NULL,
1054 * if we race with de_thread() it will find the
1060 if (unlikely(sig_needs_tasklist(sig)))
1061 read_unlock(&tasklist_lock);
1067 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1071 error = kill_pid_info(sig, info, find_vpid(pid));
1076 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1077 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1078 uid_t uid, uid_t euid, u32 secid)
1081 struct task_struct *p;
1083 if (!valid_signal(sig))
1086 read_lock(&tasklist_lock);
1087 p = pid_task(pid, PIDTYPE_PID);
1092 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1093 && (euid != p->suid) && (euid != p->uid)
1094 && (uid != p->suid) && (uid != p->uid)) {
1098 ret = security_task_kill(p, info, sig, secid);
1101 if (sig && p->sighand) {
1102 unsigned long flags;
1103 spin_lock_irqsave(&p->sighand->siglock, flags);
1104 ret = __group_send_sig_info(sig, info, p);
1105 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1108 read_unlock(&tasklist_lock);
1111 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1114 * kill_something_info() interprets pid in interesting ways just like kill(2).
1116 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1117 * is probably wrong. Should make it like BSD or SYSV.
1120 static int kill_something_info(int sig, struct siginfo *info, int pid)
1126 ret = kill_pid_info(sig, info, find_vpid(pid));
1131 read_lock(&tasklist_lock);
1133 ret = __kill_pgrp_info(sig, info,
1134 pid ? find_vpid(-pid) : task_pgrp(current));
1136 int retval = 0, count = 0;
1137 struct task_struct * p;
1139 for_each_process(p) {
1140 if (p->pid > 1 && !same_thread_group(p, current)) {
1141 int err = group_send_sig_info(sig, info, p);
1147 ret = count ? retval : -ESRCH;
1149 read_unlock(&tasklist_lock);
1155 * These are for backward compatibility with the rest of the kernel source.
1159 * These two are the most common entry points. They send a signal
1160 * just to the specific thread.
1163 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1166 unsigned long flags;
1169 * Make sure legacy kernel users don't send in bad values
1170 * (normal paths check this in check_kill_permission).
1172 if (!valid_signal(sig))
1176 * We need the tasklist lock even for the specific
1177 * thread case (when we don't need to follow the group
1178 * lists) in order to avoid races with "p->sighand"
1179 * going away or changing from under us.
1181 read_lock(&tasklist_lock);
1182 spin_lock_irqsave(&p->sighand->siglock, flags);
1183 ret = specific_send_sig_info(sig, info, p);
1184 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1185 read_unlock(&tasklist_lock);
1189 #define __si_special(priv) \
1190 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1193 send_sig(int sig, struct task_struct *p, int priv)
1195 return send_sig_info(sig, __si_special(priv), p);
1199 force_sig(int sig, struct task_struct *p)
1201 force_sig_info(sig, SEND_SIG_PRIV, p);
1205 * When things go south during signal handling, we
1206 * will force a SIGSEGV. And if the signal that caused
1207 * the problem was already a SIGSEGV, we'll want to
1208 * make sure we don't even try to deliver the signal..
1211 force_sigsegv(int sig, struct task_struct *p)
1213 if (sig == SIGSEGV) {
1214 unsigned long flags;
1215 spin_lock_irqsave(&p->sighand->siglock, flags);
1216 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1217 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1219 force_sig(SIGSEGV, p);
1223 int kill_pgrp(struct pid *pid, int sig, int priv)
1227 read_lock(&tasklist_lock);
1228 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1229 read_unlock(&tasklist_lock);
1233 EXPORT_SYMBOL(kill_pgrp);
1235 int kill_pid(struct pid *pid, int sig, int priv)
1237 return kill_pid_info(sig, __si_special(priv), pid);
1239 EXPORT_SYMBOL(kill_pid);
1242 kill_proc(pid_t pid, int sig, int priv)
1247 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1253 * These functions support sending signals using preallocated sigqueue
1254 * structures. This is needed "because realtime applications cannot
1255 * afford to lose notifications of asynchronous events, like timer
1256 * expirations or I/O completions". In the case of Posix Timers
1257 * we allocate the sigqueue structure from the timer_create. If this
1258 * allocation fails we are able to report the failure to the application
1259 * with an EAGAIN error.
1262 struct sigqueue *sigqueue_alloc(void)
1266 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1267 q->flags |= SIGQUEUE_PREALLOC;
1271 void sigqueue_free(struct sigqueue *q)
1273 unsigned long flags;
1274 spinlock_t *lock = ¤t->sighand->siglock;
1276 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1278 * If the signal is still pending remove it from the
1279 * pending queue. We must hold ->siglock while testing
1280 * q->list to serialize with collect_signal().
1282 spin_lock_irqsave(lock, flags);
1283 if (!list_empty(&q->list))
1284 list_del_init(&q->list);
1285 spin_unlock_irqrestore(lock, flags);
1287 q->flags &= ~SIGQUEUE_PREALLOC;
1291 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1293 unsigned long flags;
1296 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1299 * The rcu based delayed sighand destroy makes it possible to
1300 * run this without tasklist lock held. The task struct itself
1301 * cannot go away as create_timer did get_task_struct().
1303 * We return -1, when the task is marked exiting, so
1304 * posix_timer_event can redirect it to the group leader
1308 if (!likely(lock_task_sighand(p, &flags))) {
1313 if (unlikely(!list_empty(&q->list))) {
1315 * If an SI_TIMER entry is already queue just increment
1316 * the overrun count.
1318 BUG_ON(q->info.si_code != SI_TIMER);
1319 q->info.si_overrun++;
1322 /* Short-circuit ignored signals. */
1323 if (sig_ignored(p, sig)) {
1328 * Deliver the signal to listening signalfds. This must be called
1329 * with the sighand lock held.
1331 signalfd_notify(p, sig);
1333 list_add_tail(&q->list, &p->pending.list);
1334 sigaddset(&p->pending.signal, sig);
1335 if (!sigismember(&p->blocked, sig))
1336 signal_wake_up(p, sig == SIGKILL);
1339 unlock_task_sighand(p, &flags);
1347 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1349 unsigned long flags;
1352 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1354 read_lock(&tasklist_lock);
1355 /* Since it_lock is held, p->sighand cannot be NULL. */
1356 spin_lock_irqsave(&p->sighand->siglock, flags);
1357 handle_stop_signal(sig, p);
1359 /* Short-circuit ignored signals. */
1360 if (sig_ignored(p, sig)) {
1365 if (unlikely(!list_empty(&q->list))) {
1367 * If an SI_TIMER entry is already queue just increment
1368 * the overrun count. Other uses should not try to
1369 * send the signal multiple times.
1371 BUG_ON(q->info.si_code != SI_TIMER);
1372 q->info.si_overrun++;
1376 * Deliver the signal to listening signalfds. This must be called
1377 * with the sighand lock held.
1379 signalfd_notify(p, sig);
1382 * Put this signal on the shared-pending queue.
1383 * We always use the shared queue for process-wide signals,
1384 * to avoid several races.
1386 list_add_tail(&q->list, &p->signal->shared_pending.list);
1387 sigaddset(&p->signal->shared_pending.signal, sig);
1389 __group_complete_signal(sig, p);
1391 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1392 read_unlock(&tasklist_lock);
1397 * Wake up any threads in the parent blocked in wait* syscalls.
1399 static inline void __wake_up_parent(struct task_struct *p,
1400 struct task_struct *parent)
1402 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1406 * Let a parent know about the death of a child.
1407 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1410 void do_notify_parent(struct task_struct *tsk, int sig)
1412 struct siginfo info;
1413 unsigned long flags;
1414 struct sighand_struct *psig;
1418 /* do_notify_parent_cldstop should have been called instead. */
1419 BUG_ON(task_is_stopped_or_traced(tsk));
1421 BUG_ON(!tsk->ptrace &&
1422 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1424 info.si_signo = sig;
1427 * we are under tasklist_lock here so our parent is tied to
1428 * us and cannot exit and release its namespace.
1430 * the only it can is to switch its nsproxy with sys_unshare,
1431 * bu uncharing pid namespaces is not allowed, so we'll always
1432 * see relevant namespace
1434 * write_lock() currently calls preempt_disable() which is the
1435 * same as rcu_read_lock(), but according to Oleg, this is not
1436 * correct to rely on this
1439 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1442 info.si_uid = tsk->uid;
1444 /* FIXME: find out whether or not this is supposed to be c*time. */
1445 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1446 tsk->signal->utime));
1447 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1448 tsk->signal->stime));
1450 info.si_status = tsk->exit_code & 0x7f;
1451 if (tsk->exit_code & 0x80)
1452 info.si_code = CLD_DUMPED;
1453 else if (tsk->exit_code & 0x7f)
1454 info.si_code = CLD_KILLED;
1456 info.si_code = CLD_EXITED;
1457 info.si_status = tsk->exit_code >> 8;
1460 psig = tsk->parent->sighand;
1461 spin_lock_irqsave(&psig->siglock, flags);
1462 if (!tsk->ptrace && sig == SIGCHLD &&
1463 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1464 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1466 * We are exiting and our parent doesn't care. POSIX.1
1467 * defines special semantics for setting SIGCHLD to SIG_IGN
1468 * or setting the SA_NOCLDWAIT flag: we should be reaped
1469 * automatically and not left for our parent's wait4 call.
1470 * Rather than having the parent do it as a magic kind of
1471 * signal handler, we just set this to tell do_exit that we
1472 * can be cleaned up without becoming a zombie. Note that
1473 * we still call __wake_up_parent in this case, because a
1474 * blocked sys_wait4 might now return -ECHILD.
1476 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1477 * is implementation-defined: we do (if you don't want
1478 * it, just use SIG_IGN instead).
1480 tsk->exit_signal = -1;
1481 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1484 if (valid_signal(sig) && sig > 0)
1485 __group_send_sig_info(sig, &info, tsk->parent);
1486 __wake_up_parent(tsk, tsk->parent);
1487 spin_unlock_irqrestore(&psig->siglock, flags);
1490 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1492 struct siginfo info;
1493 unsigned long flags;
1494 struct task_struct *parent;
1495 struct sighand_struct *sighand;
1497 if (tsk->ptrace & PT_PTRACED)
1498 parent = tsk->parent;
1500 tsk = tsk->group_leader;
1501 parent = tsk->real_parent;
1504 info.si_signo = SIGCHLD;
1507 * see comment in do_notify_parent() abot the following 3 lines
1510 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1513 info.si_uid = tsk->uid;
1515 /* FIXME: find out whether or not this is supposed to be c*time. */
1516 info.si_utime = cputime_to_jiffies(tsk->utime);
1517 info.si_stime = cputime_to_jiffies(tsk->stime);
1522 info.si_status = SIGCONT;
1525 info.si_status = tsk->signal->group_exit_code & 0x7f;
1528 info.si_status = tsk->exit_code & 0x7f;
1534 sighand = parent->sighand;
1535 spin_lock_irqsave(&sighand->siglock, flags);
1536 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1537 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1538 __group_send_sig_info(SIGCHLD, &info, parent);
1540 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1542 __wake_up_parent(tsk, parent);
1543 spin_unlock_irqrestore(&sighand->siglock, flags);
1546 static inline int may_ptrace_stop(void)
1548 if (!likely(current->ptrace & PT_PTRACED))
1551 * Are we in the middle of do_coredump?
1552 * If so and our tracer is also part of the coredump stopping
1553 * is a deadlock situation, and pointless because our tracer
1554 * is dead so don't allow us to stop.
1555 * If SIGKILL was already sent before the caller unlocked
1556 * ->siglock we must see ->core_waiters != 0. Otherwise it
1557 * is safe to enter schedule().
1559 if (unlikely(current->mm->core_waiters) &&
1560 unlikely(current->mm == current->parent->mm))
1567 * Return nonzero if there is a SIGKILL that should be waking us up.
1568 * Called with the siglock held.
1570 static int sigkill_pending(struct task_struct *tsk)
1572 return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1573 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1574 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1578 * This must be called with current->sighand->siglock held.
1580 * This should be the path for all ptrace stops.
1581 * We always set current->last_siginfo while stopped here.
1582 * That makes it a way to test a stopped process for
1583 * being ptrace-stopped vs being job-control-stopped.
1585 * If we actually decide not to stop at all because the tracer
1586 * is gone, we keep current->exit_code unless clear_code.
1588 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1592 if (arch_ptrace_stop_needed(exit_code, info)) {
1594 * The arch code has something special to do before a
1595 * ptrace stop. This is allowed to block, e.g. for faults
1596 * on user stack pages. We can't keep the siglock while
1597 * calling arch_ptrace_stop, so we must release it now.
1598 * To preserve proper semantics, we must do this before
1599 * any signal bookkeeping like checking group_stop_count.
1600 * Meanwhile, a SIGKILL could come in before we retake the
1601 * siglock. That must prevent us from sleeping in TASK_TRACED.
1602 * So after regaining the lock, we must check for SIGKILL.
1604 spin_unlock_irq(¤t->sighand->siglock);
1605 arch_ptrace_stop(exit_code, info);
1606 spin_lock_irq(¤t->sighand->siglock);
1607 killed = sigkill_pending(current);
1611 * If there is a group stop in progress,
1612 * we must participate in the bookkeeping.
1614 if (current->signal->group_stop_count > 0)
1615 --current->signal->group_stop_count;
1617 current->last_siginfo = info;
1618 current->exit_code = exit_code;
1620 /* Let the debugger run. */
1621 __set_current_state(TASK_TRACED);
1622 spin_unlock_irq(¤t->sighand->siglock);
1623 read_lock(&tasklist_lock);
1624 if (!unlikely(killed) && may_ptrace_stop()) {
1625 do_notify_parent_cldstop(current, CLD_TRAPPED);
1626 read_unlock(&tasklist_lock);
1630 * By the time we got the lock, our tracer went away.
1631 * Don't drop the lock yet, another tracer may come.
1633 __set_current_state(TASK_RUNNING);
1635 current->exit_code = 0;
1636 read_unlock(&tasklist_lock);
1640 * While in TASK_TRACED, we were considered "frozen enough".
1641 * Now that we woke up, it's crucial if we're supposed to be
1642 * frozen that we freeze now before running anything substantial.
1647 * We are back. Now reacquire the siglock before touching
1648 * last_siginfo, so that we are sure to have synchronized with
1649 * any signal-sending on another CPU that wants to examine it.
1651 spin_lock_irq(¤t->sighand->siglock);
1652 current->last_siginfo = NULL;
1655 * Queued signals ignored us while we were stopped for tracing.
1656 * So check for any that we should take before resuming user mode.
1657 * This sets TIF_SIGPENDING, but never clears it.
1659 recalc_sigpending_tsk(current);
1662 void ptrace_notify(int exit_code)
1666 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1668 memset(&info, 0, sizeof info);
1669 info.si_signo = SIGTRAP;
1670 info.si_code = exit_code;
1671 info.si_pid = task_pid_vnr(current);
1672 info.si_uid = current->uid;
1674 /* Let the debugger run. */
1675 spin_lock_irq(¤t->sighand->siglock);
1676 ptrace_stop(exit_code, 1, &info);
1677 spin_unlock_irq(¤t->sighand->siglock);
1681 finish_stop(int stop_count)
1684 * If there are no other threads in the group, or if there is
1685 * a group stop in progress and we are the last to stop,
1686 * report to the parent. When ptraced, every thread reports itself.
1688 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1689 read_lock(&tasklist_lock);
1690 do_notify_parent_cldstop(current, CLD_STOPPED);
1691 read_unlock(&tasklist_lock);
1696 } while (try_to_freeze());
1698 * Now we don't run again until continued.
1700 current->exit_code = 0;
1704 * This performs the stopping for SIGSTOP and other stop signals.
1705 * We have to stop all threads in the thread group.
1706 * Returns nonzero if we've actually stopped and released the siglock.
1707 * Returns zero if we didn't stop and still hold the siglock.
1709 static int do_signal_stop(int signr)
1711 struct signal_struct *sig = current->signal;
1714 if (sig->group_stop_count > 0) {
1716 * There is a group stop in progress. We don't need to
1717 * start another one.
1719 stop_count = --sig->group_stop_count;
1721 struct task_struct *t;
1723 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1724 unlikely(signal_group_exit(sig)))
1727 * There is no group stop already in progress.
1728 * We must initiate one now.
1730 sig->group_exit_code = signr;
1733 for (t = next_thread(current); t != current; t = next_thread(t))
1735 * Setting state to TASK_STOPPED for a group
1736 * stop is always done with the siglock held,
1737 * so this check has no races.
1739 if (!(t->flags & PF_EXITING) &&
1740 !task_is_stopped_or_traced(t)) {
1742 signal_wake_up(t, 0);
1744 sig->group_stop_count = stop_count;
1747 if (stop_count == 0)
1748 sig->flags = SIGNAL_STOP_STOPPED;
1749 current->exit_code = sig->group_exit_code;
1750 __set_current_state(TASK_STOPPED);
1752 spin_unlock_irq(¤t->sighand->siglock);
1753 finish_stop(stop_count);
1757 static int ptrace_signal(int signr, siginfo_t *info,
1758 struct pt_regs *regs, void *cookie)
1760 if (!(current->ptrace & PT_PTRACED))
1763 ptrace_signal_deliver(regs, cookie);
1765 /* Let the debugger run. */
1766 ptrace_stop(signr, 0, info);
1768 /* We're back. Did the debugger cancel the sig? */
1769 signr = current->exit_code;
1773 current->exit_code = 0;
1775 /* Update the siginfo structure if the signal has
1776 changed. If the debugger wanted something
1777 specific in the siginfo structure then it should
1778 have updated *info via PTRACE_SETSIGINFO. */
1779 if (signr != info->si_signo) {
1780 info->si_signo = signr;
1782 info->si_code = SI_USER;
1783 info->si_pid = task_pid_vnr(current->parent);
1784 info->si_uid = current->parent->uid;
1787 /* If the (new) signal is now blocked, requeue it. */
1788 if (sigismember(¤t->blocked, signr)) {
1789 specific_send_sig_info(signr, info, current);
1796 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1797 struct pt_regs *regs, void *cookie)
1799 sigset_t *mask = ¤t->blocked;
1804 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1805 * While in TASK_STOPPED, we were considered "frozen enough".
1806 * Now that we woke up, it's crucial if we're supposed to be
1807 * frozen that we freeze now before running anything substantial.
1811 spin_lock_irq(¤t->sighand->siglock);
1813 struct k_sigaction *ka;
1815 if (unlikely(current->signal->group_stop_count > 0) &&
1819 signr = dequeue_signal(current, mask, info);
1822 break; /* will return 0 */
1824 if (signr != SIGKILL) {
1825 signr = ptrace_signal(signr, info, regs, cookie);
1830 ka = ¤t->sighand->action[signr-1];
1831 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1833 if (ka->sa.sa_handler != SIG_DFL) {
1834 /* Run the handler. */
1837 if (ka->sa.sa_flags & SA_ONESHOT)
1838 ka->sa.sa_handler = SIG_DFL;
1840 break; /* will return non-zero "signr" value */
1844 * Now we are doing the default action for this signal.
1846 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1850 * Global init gets no signals it doesn't want.
1852 if (is_global_init(current))
1855 if (sig_kernel_stop(signr)) {
1857 * The default action is to stop all threads in
1858 * the thread group. The job control signals
1859 * do nothing in an orphaned pgrp, but SIGSTOP
1860 * always works. Note that siglock needs to be
1861 * dropped during the call to is_orphaned_pgrp()
1862 * because of lock ordering with tasklist_lock.
1863 * This allows an intervening SIGCONT to be posted.
1864 * We need to check for that and bail out if necessary.
1866 if (signr != SIGSTOP) {
1867 spin_unlock_irq(¤t->sighand->siglock);
1869 /* signals can be posted during this window */
1871 if (is_current_pgrp_orphaned())
1874 spin_lock_irq(¤t->sighand->siglock);
1877 if (likely(do_signal_stop(signr))) {
1878 /* It released the siglock. */
1883 * We didn't actually stop, due to a race
1884 * with SIGCONT or something like that.
1889 spin_unlock_irq(¤t->sighand->siglock);
1892 * Anything else is fatal, maybe with a core dump.
1894 current->flags |= PF_SIGNALED;
1895 if ((signr != SIGKILL) && print_fatal_signals)
1896 print_fatal_signal(regs, signr);
1897 if (sig_kernel_coredump(signr)) {
1899 * If it was able to dump core, this kills all
1900 * other threads in the group and synchronizes with
1901 * their demise. If we lost the race with another
1902 * thread getting here, it set group_exit_code
1903 * first and our do_group_exit call below will use
1904 * that value and ignore the one we pass it.
1906 do_coredump((long)signr, signr, regs);
1910 * Death signals, no core dump.
1912 do_group_exit(signr);
1915 spin_unlock_irq(¤t->sighand->siglock);
1919 void exit_signals(struct task_struct *tsk)
1922 struct task_struct *t;
1924 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1925 tsk->flags |= PF_EXITING;
1929 spin_lock_irq(&tsk->sighand->siglock);
1931 * From now this task is not visible for group-wide signals,
1932 * see wants_signal(), do_signal_stop().
1934 tsk->flags |= PF_EXITING;
1935 if (!signal_pending(tsk))
1938 /* It could be that __group_complete_signal() choose us to
1939 * notify about group-wide signal. Another thread should be
1940 * woken now to take the signal since we will not.
1942 for (t = tsk; (t = next_thread(t)) != tsk; )
1943 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1944 recalc_sigpending_and_wake(t);
1946 if (unlikely(tsk->signal->group_stop_count) &&
1947 !--tsk->signal->group_stop_count) {
1948 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1952 spin_unlock_irq(&tsk->sighand->siglock);
1954 if (unlikely(group_stop)) {
1955 read_lock(&tasklist_lock);
1956 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1957 read_unlock(&tasklist_lock);
1961 EXPORT_SYMBOL(recalc_sigpending);
1962 EXPORT_SYMBOL_GPL(dequeue_signal);
1963 EXPORT_SYMBOL(flush_signals);
1964 EXPORT_SYMBOL(force_sig);
1965 EXPORT_SYMBOL(kill_proc);
1966 EXPORT_SYMBOL(ptrace_notify);
1967 EXPORT_SYMBOL(send_sig);
1968 EXPORT_SYMBOL(send_sig_info);
1969 EXPORT_SYMBOL(sigprocmask);
1970 EXPORT_SYMBOL(block_all_signals);
1971 EXPORT_SYMBOL(unblock_all_signals);
1975 * System call entry points.
1978 asmlinkage long sys_restart_syscall(void)
1980 struct restart_block *restart = ¤t_thread_info()->restart_block;
1981 return restart->fn(restart);
1984 long do_no_restart_syscall(struct restart_block *param)
1990 * We don't need to get the kernel lock - this is all local to this
1991 * particular thread.. (and that's good, because this is _heavily_
1992 * used by various programs)
1996 * This is also useful for kernel threads that want to temporarily
1997 * (or permanently) block certain signals.
1999 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2000 * interface happily blocks "unblockable" signals like SIGKILL
2003 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2007 spin_lock_irq(¤t->sighand->siglock);
2009 *oldset = current->blocked;
2014 sigorsets(¤t->blocked, ¤t->blocked, set);
2017 signandsets(¤t->blocked, ¤t->blocked, set);
2020 current->blocked = *set;
2025 recalc_sigpending();
2026 spin_unlock_irq(¤t->sighand->siglock);
2032 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2034 int error = -EINVAL;
2035 sigset_t old_set, new_set;
2037 /* XXX: Don't preclude handling different sized sigset_t's. */
2038 if (sigsetsize != sizeof(sigset_t))
2043 if (copy_from_user(&new_set, set, sizeof(*set)))
2045 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2047 error = sigprocmask(how, &new_set, &old_set);
2053 spin_lock_irq(¤t->sighand->siglock);
2054 old_set = current->blocked;
2055 spin_unlock_irq(¤t->sighand->siglock);
2059 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2067 long do_sigpending(void __user *set, unsigned long sigsetsize)
2069 long error = -EINVAL;
2072 if (sigsetsize > sizeof(sigset_t))
2075 spin_lock_irq(¤t->sighand->siglock);
2076 sigorsets(&pending, ¤t->pending.signal,
2077 ¤t->signal->shared_pending.signal);
2078 spin_unlock_irq(¤t->sighand->siglock);
2080 /* Outside the lock because only this thread touches it. */
2081 sigandsets(&pending, ¤t->blocked, &pending);
2084 if (!copy_to_user(set, &pending, sigsetsize))
2092 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2094 return do_sigpending(set, sigsetsize);
2097 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2099 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2103 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2105 if (from->si_code < 0)
2106 return __copy_to_user(to, from, sizeof(siginfo_t))
2109 * If you change siginfo_t structure, please be sure
2110 * this code is fixed accordingly.
2111 * Please remember to update the signalfd_copyinfo() function
2112 * inside fs/signalfd.c too, in case siginfo_t changes.
2113 * It should never copy any pad contained in the structure
2114 * to avoid security leaks, but must copy the generic
2115 * 3 ints plus the relevant union member.
2117 err = __put_user(from->si_signo, &to->si_signo);
2118 err |= __put_user(from->si_errno, &to->si_errno);
2119 err |= __put_user((short)from->si_code, &to->si_code);
2120 switch (from->si_code & __SI_MASK) {
2122 err |= __put_user(from->si_pid, &to->si_pid);
2123 err |= __put_user(from->si_uid, &to->si_uid);
2126 err |= __put_user(from->si_tid, &to->si_tid);
2127 err |= __put_user(from->si_overrun, &to->si_overrun);
2128 err |= __put_user(from->si_ptr, &to->si_ptr);
2131 err |= __put_user(from->si_band, &to->si_band);
2132 err |= __put_user(from->si_fd, &to->si_fd);
2135 err |= __put_user(from->si_addr, &to->si_addr);
2136 #ifdef __ARCH_SI_TRAPNO
2137 err |= __put_user(from->si_trapno, &to->si_trapno);
2141 err |= __put_user(from->si_pid, &to->si_pid);
2142 err |= __put_user(from->si_uid, &to->si_uid);
2143 err |= __put_user(from->si_status, &to->si_status);
2144 err |= __put_user(from->si_utime, &to->si_utime);
2145 err |= __put_user(from->si_stime, &to->si_stime);
2147 case __SI_RT: /* This is not generated by the kernel as of now. */
2148 case __SI_MESGQ: /* But this is */
2149 err |= __put_user(from->si_pid, &to->si_pid);
2150 err |= __put_user(from->si_uid, &to->si_uid);
2151 err |= __put_user(from->si_ptr, &to->si_ptr);
2153 default: /* this is just in case for now ... */
2154 err |= __put_user(from->si_pid, &to->si_pid);
2155 err |= __put_user(from->si_uid, &to->si_uid);
2164 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2165 siginfo_t __user *uinfo,
2166 const struct timespec __user *uts,
2175 /* XXX: Don't preclude handling different sized sigset_t's. */
2176 if (sigsetsize != sizeof(sigset_t))
2179 if (copy_from_user(&these, uthese, sizeof(these)))
2183 * Invert the set of allowed signals to get those we
2186 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2190 if (copy_from_user(&ts, uts, sizeof(ts)))
2192 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2197 spin_lock_irq(¤t->sighand->siglock);
2198 sig = dequeue_signal(current, &these, &info);
2200 timeout = MAX_SCHEDULE_TIMEOUT;
2202 timeout = (timespec_to_jiffies(&ts)
2203 + (ts.tv_sec || ts.tv_nsec));
2206 /* None ready -- temporarily unblock those we're
2207 * interested while we are sleeping in so that we'll
2208 * be awakened when they arrive. */
2209 current->real_blocked = current->blocked;
2210 sigandsets(¤t->blocked, ¤t->blocked, &these);
2211 recalc_sigpending();
2212 spin_unlock_irq(¤t->sighand->siglock);
2214 timeout = schedule_timeout_interruptible(timeout);
2216 spin_lock_irq(¤t->sighand->siglock);
2217 sig = dequeue_signal(current, &these, &info);
2218 current->blocked = current->real_blocked;
2219 siginitset(¤t->real_blocked, 0);
2220 recalc_sigpending();
2223 spin_unlock_irq(¤t->sighand->siglock);
2228 if (copy_siginfo_to_user(uinfo, &info))
2241 sys_kill(int pid, int sig)
2243 struct siginfo info;
2245 info.si_signo = sig;
2247 info.si_code = SI_USER;
2248 info.si_pid = task_tgid_vnr(current);
2249 info.si_uid = current->uid;
2251 return kill_something_info(sig, &info, pid);
2254 static int do_tkill(int tgid, int pid, int sig)
2257 struct siginfo info;
2258 struct task_struct *p;
2261 info.si_signo = sig;
2263 info.si_code = SI_TKILL;
2264 info.si_pid = task_tgid_vnr(current);
2265 info.si_uid = current->uid;
2267 read_lock(&tasklist_lock);
2268 p = find_task_by_vpid(pid);
2269 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2270 error = check_kill_permission(sig, &info, p);
2272 * The null signal is a permissions and process existence
2273 * probe. No signal is actually delivered.
2275 if (!error && sig && p->sighand) {
2276 spin_lock_irq(&p->sighand->siglock);
2277 handle_stop_signal(sig, p);
2278 error = specific_send_sig_info(sig, &info, p);
2279 spin_unlock_irq(&p->sighand->siglock);
2282 read_unlock(&tasklist_lock);
2288 * sys_tgkill - send signal to one specific thread
2289 * @tgid: the thread group ID of the thread
2290 * @pid: the PID of the thread
2291 * @sig: signal to be sent
2293 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2294 * exists but it's not belonging to the target process anymore. This
2295 * method solves the problem of threads exiting and PIDs getting reused.
2297 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2299 /* This is only valid for single tasks */
2300 if (pid <= 0 || tgid <= 0)
2303 return do_tkill(tgid, pid, sig);
2307 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2310 sys_tkill(int pid, int sig)
2312 /* This is only valid for single tasks */
2316 return do_tkill(0, pid, sig);
2320 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2324 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2327 /* Not even root can pretend to send signals from the kernel.
2328 Nor can they impersonate a kill(), which adds source info. */
2329 if (info.si_code >= 0)
2331 info.si_signo = sig;
2333 /* POSIX.1b doesn't mention process groups. */
2334 return kill_proc_info(sig, &info, pid);
2337 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2339 struct task_struct *t = current;
2340 struct k_sigaction *k;
2343 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2346 k = &t->sighand->action[sig-1];
2348 spin_lock_irq(¤t->sighand->siglock);
2353 sigdelsetmask(&act->sa.sa_mask,
2354 sigmask(SIGKILL) | sigmask(SIGSTOP));
2358 * "Setting a signal action to SIG_IGN for a signal that is
2359 * pending shall cause the pending signal to be discarded,
2360 * whether or not it is blocked."
2362 * "Setting a signal action to SIG_DFL for a signal that is
2363 * pending and whose default action is to ignore the signal
2364 * (for example, SIGCHLD), shall cause the pending signal to
2365 * be discarded, whether or not it is blocked"
2367 if (__sig_ignored(t, sig)) {
2369 sigaddset(&mask, sig);
2370 rm_from_queue_full(&mask, &t->signal->shared_pending);
2372 rm_from_queue_full(&mask, &t->pending);
2374 } while (t != current);
2378 spin_unlock_irq(¤t->sighand->siglock);
2383 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2389 oss.ss_sp = (void __user *) current->sas_ss_sp;
2390 oss.ss_size = current->sas_ss_size;
2391 oss.ss_flags = sas_ss_flags(sp);
2400 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2401 || __get_user(ss_sp, &uss->ss_sp)
2402 || __get_user(ss_flags, &uss->ss_flags)
2403 || __get_user(ss_size, &uss->ss_size))
2407 if (on_sig_stack(sp))
2413 * Note - this code used to test ss_flags incorrectly
2414 * old code may have been written using ss_flags==0
2415 * to mean ss_flags==SS_ONSTACK (as this was the only
2416 * way that worked) - this fix preserves that older
2419 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2422 if (ss_flags == SS_DISABLE) {
2427 if (ss_size < MINSIGSTKSZ)
2431 current->sas_ss_sp = (unsigned long) ss_sp;
2432 current->sas_ss_size = ss_size;
2437 if (copy_to_user(uoss, &oss, sizeof(oss)))
2446 #ifdef __ARCH_WANT_SYS_SIGPENDING
2449 sys_sigpending(old_sigset_t __user *set)
2451 return do_sigpending(set, sizeof(*set));
2456 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2457 /* Some platforms have their own version with special arguments others
2458 support only sys_rt_sigprocmask. */
2461 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2464 old_sigset_t old_set, new_set;
2468 if (copy_from_user(&new_set, set, sizeof(*set)))
2470 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2472 spin_lock_irq(¤t->sighand->siglock);
2473 old_set = current->blocked.sig[0];
2481 sigaddsetmask(¤t->blocked, new_set);
2484 sigdelsetmask(¤t->blocked, new_set);
2487 current->blocked.sig[0] = new_set;
2491 recalc_sigpending();
2492 spin_unlock_irq(¤t->sighand->siglock);
2498 old_set = current->blocked.sig[0];
2501 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2508 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2510 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2512 sys_rt_sigaction(int sig,
2513 const struct sigaction __user *act,
2514 struct sigaction __user *oact,
2517 struct k_sigaction new_sa, old_sa;
2520 /* XXX: Don't preclude handling different sized sigset_t's. */
2521 if (sigsetsize != sizeof(sigset_t))
2525 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2529 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2532 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2538 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2540 #ifdef __ARCH_WANT_SYS_SGETMASK
2543 * For backwards compatibility. Functionality superseded by sigprocmask.
2549 return current->blocked.sig[0];
2553 sys_ssetmask(int newmask)
2557 spin_lock_irq(¤t->sighand->siglock);
2558 old = current->blocked.sig[0];
2560 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2562 recalc_sigpending();
2563 spin_unlock_irq(¤t->sighand->siglock);
2567 #endif /* __ARCH_WANT_SGETMASK */
2569 #ifdef __ARCH_WANT_SYS_SIGNAL
2571 * For backwards compatibility. Functionality superseded by sigaction.
2573 asmlinkage unsigned long
2574 sys_signal(int sig, __sighandler_t handler)
2576 struct k_sigaction new_sa, old_sa;
2579 new_sa.sa.sa_handler = handler;
2580 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2581 sigemptyset(&new_sa.sa.sa_mask);
2583 ret = do_sigaction(sig, &new_sa, &old_sa);
2585 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2587 #endif /* __ARCH_WANT_SYS_SIGNAL */
2589 #ifdef __ARCH_WANT_SYS_PAUSE
2594 current->state = TASK_INTERRUPTIBLE;
2596 return -ERESTARTNOHAND;
2601 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2602 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2606 /* XXX: Don't preclude handling different sized sigset_t's. */
2607 if (sigsetsize != sizeof(sigset_t))
2610 if (copy_from_user(&newset, unewset, sizeof(newset)))
2612 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2614 spin_lock_irq(¤t->sighand->siglock);
2615 current->saved_sigmask = current->blocked;
2616 current->blocked = newset;
2617 recalc_sigpending();
2618 spin_unlock_irq(¤t->sighand->siglock);
2620 current->state = TASK_INTERRUPTIBLE;
2622 set_thread_flag(TIF_RESTORE_SIGMASK);
2623 return -ERESTARTNOHAND;
2625 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2627 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2632 void __init signals_init(void)
2634 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);