2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache *sigqueue_cachep;
42 static int __sig_ignored(struct task_struct *t, int sig)
46 /* Is it explicitly or implicitly ignored? */
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig));
53 static int sig_ignored(struct task_struct *t, int sig)
56 * Tracers always want to know about signals..
58 if (t->ptrace & PT_PTRACED)
62 * Blocked signals are never ignored, since the
63 * signal handler may change by the time it is
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
69 return __sig_ignored(t, sig);
73 * Re-calculate pending state from the set of locally pending
74 * signals, globally pending signals, and blocked signals.
76 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
81 switch (_NSIG_WORDS) {
83 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
84 ready |= signal->sig[i] &~ blocked->sig[i];
87 case 4: ready = signal->sig[3] &~ blocked->sig[3];
88 ready |= signal->sig[2] &~ blocked->sig[2];
89 ready |= signal->sig[1] &~ blocked->sig[1];
90 ready |= signal->sig[0] &~ blocked->sig[0];
93 case 2: ready = signal->sig[1] &~ blocked->sig[1];
94 ready |= signal->sig[0] &~ blocked->sig[0];
97 case 1: ready = signal->sig[0] &~ blocked->sig[0];
102 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
104 static int recalc_sigpending_tsk(struct task_struct *t)
106 if (t->signal->group_stop_count > 0 ||
107 PENDING(&t->pending, &t->blocked) ||
108 PENDING(&t->signal->shared_pending, &t->blocked)) {
109 set_tsk_thread_flag(t, TIF_SIGPENDING);
113 * We must never clear the flag in another thread, or in current
114 * when it's possible the current syscall is returning -ERESTART*.
115 * So we don't clear it here, and only callers who know they should do.
121 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
122 * This is superfluous when called on current, the wakeup is a harmless no-op.
124 void recalc_sigpending_and_wake(struct task_struct *t)
126 if (recalc_sigpending_tsk(t))
127 signal_wake_up(t, 0);
130 void recalc_sigpending(void)
132 if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING);
137 /* Given the mask, find the first available signal that should be serviced. */
139 int next_signal(struct sigpending *pending, sigset_t *mask)
141 unsigned long i, *s, *m, x;
144 s = pending->signal.sig;
146 switch (_NSIG_WORDS) {
148 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
149 if ((x = *s &~ *m) != 0) {
150 sig = ffz(~x) + i*_NSIG_BPW + 1;
155 case 2: if ((x = s[0] &~ m[0]) != 0)
157 else if ((x = s[1] &~ m[1]) != 0)
164 case 1: if ((x = *s &~ *m) != 0)
172 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
175 struct sigqueue *q = NULL;
176 struct user_struct *user;
179 * In order to avoid problems with "switch_user()", we want to make
180 * sure that the compiler doesn't re-load "t->user"
184 atomic_inc(&user->sigpending);
185 if (override_rlimit ||
186 atomic_read(&user->sigpending) <=
187 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
188 q = kmem_cache_alloc(sigqueue_cachep, flags);
189 if (unlikely(q == NULL)) {
190 atomic_dec(&user->sigpending);
192 INIT_LIST_HEAD(&q->list);
194 q->user = get_uid(user);
199 static void __sigqueue_free(struct sigqueue *q)
201 if (q->flags & SIGQUEUE_PREALLOC)
203 atomic_dec(&q->user->sigpending);
205 kmem_cache_free(sigqueue_cachep, q);
208 void flush_sigqueue(struct sigpending *queue)
212 sigemptyset(&queue->signal);
213 while (!list_empty(&queue->list)) {
214 q = list_entry(queue->list.next, struct sigqueue , list);
215 list_del_init(&q->list);
221 * Flush all pending signals for a task.
223 void flush_signals(struct task_struct *t)
227 spin_lock_irqsave(&t->sighand->siglock, flags);
228 clear_tsk_thread_flag(t, TIF_SIGPENDING);
229 flush_sigqueue(&t->pending);
230 flush_sigqueue(&t->signal->shared_pending);
231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
234 void ignore_signals(struct task_struct *t)
238 for (i = 0; i < _NSIG; ++i)
239 t->sighand->action[i].sa.sa_handler = SIG_IGN;
245 * Flush all handlers for a task.
249 flush_signal_handlers(struct task_struct *t, int force_default)
252 struct k_sigaction *ka = &t->sighand->action[0];
253 for (i = _NSIG ; i != 0 ; i--) {
254 if (force_default || ka->sa.sa_handler != SIG_IGN)
255 ka->sa.sa_handler = SIG_DFL;
257 sigemptyset(&ka->sa.sa_mask);
262 int unhandled_signal(struct task_struct *tsk, int sig)
264 if (is_global_init(tsk))
266 if (tsk->ptrace & PT_PTRACED)
268 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
269 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
273 /* Notify the system that a driver wants to block all signals for this
274 * process, and wants to be notified if any signals at all were to be
275 * sent/acted upon. If the notifier routine returns non-zero, then the
276 * signal will be acted upon after all. If the notifier routine returns 0,
277 * then then signal will be blocked. Only one block per process is
278 * allowed. priv is a pointer to private data that the notifier routine
279 * can use to determine if the signal should be blocked or not. */
282 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
286 spin_lock_irqsave(¤t->sighand->siglock, flags);
287 current->notifier_mask = mask;
288 current->notifier_data = priv;
289 current->notifier = notifier;
290 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
293 /* Notify the system that blocking has ended. */
296 unblock_all_signals(void)
300 spin_lock_irqsave(¤t->sighand->siglock, flags);
301 current->notifier = NULL;
302 current->notifier_data = NULL;
304 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
307 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
309 struct sigqueue *q, *first = NULL;
310 int still_pending = 0;
312 if (unlikely(!sigismember(&list->signal, sig)))
316 * Collect the siginfo appropriate to this signal. Check if
317 * there is another siginfo for the same signal.
319 list_for_each_entry(q, &list->list, list) {
320 if (q->info.si_signo == sig) {
329 list_del_init(&first->list);
330 copy_siginfo(info, &first->info);
331 __sigqueue_free(first);
333 sigdelset(&list->signal, sig);
336 /* Ok, it wasn't in the queue. This must be
337 a fast-pathed signal or we must have been
338 out of queue space. So zero out the info.
340 sigdelset(&list->signal, sig);
341 info->si_signo = sig;
350 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
353 int sig = next_signal(pending, mask);
356 if (current->notifier) {
357 if (sigismember(current->notifier_mask, sig)) {
358 if (!(current->notifier)(current->notifier_data)) {
359 clear_thread_flag(TIF_SIGPENDING);
365 if (!collect_signal(sig, pending, info))
373 * Dequeue a signal and return the element to the caller, which is
374 * expected to free it.
376 * All callers have to hold the siglock.
378 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
382 /* We only dequeue private signals from ourselves, we don't let
383 * signalfd steal them
385 signr = __dequeue_signal(&tsk->pending, mask, info);
387 signr = __dequeue_signal(&tsk->signal->shared_pending,
392 * itimers are process shared and we restart periodic
393 * itimers in the signal delivery path to prevent DoS
394 * attacks in the high resolution timer case. This is
395 * compliant with the old way of self restarting
396 * itimers, as the SIGALRM is a legacy signal and only
397 * queued once. Changing the restart behaviour to
398 * restart the timer in the signal dequeue path is
399 * reducing the timer noise on heavy loaded !highres
402 if (unlikely(signr == SIGALRM)) {
403 struct hrtimer *tmr = &tsk->signal->real_timer;
405 if (!hrtimer_is_queued(tmr) &&
406 tsk->signal->it_real_incr.tv64 != 0) {
407 hrtimer_forward(tmr, tmr->base->get_time(),
408 tsk->signal->it_real_incr);
409 hrtimer_restart(tmr);
418 if (unlikely(sig_kernel_stop(signr))) {
420 * Set a marker that we have dequeued a stop signal. Our
421 * caller might release the siglock and then the pending
422 * stop signal it is about to process is no longer in the
423 * pending bitmasks, but must still be cleared by a SIGCONT
424 * (and overruled by a SIGKILL). So those cases clear this
425 * shared flag after we've set it. Note that this flag may
426 * remain set after the signal we return is ignored or
427 * handled. That doesn't matter because its only purpose
428 * is to alert stop-signal processing code when another
429 * processor has come along and cleared the flag.
431 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
432 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
434 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
436 * Release the siglock to ensure proper locking order
437 * of timer locks outside of siglocks. Note, we leave
438 * irqs disabled here, since the posix-timers code is
439 * about to disable them again anyway.
441 spin_unlock(&tsk->sighand->siglock);
442 do_schedule_next_timer(info);
443 spin_lock(&tsk->sighand->siglock);
449 * Tell a process that it has a new active signal..
451 * NOTE! we rely on the previous spin_lock to
452 * lock interrupts for us! We can only be called with
453 * "siglock" held, and the local interrupt must
454 * have been disabled when that got acquired!
456 * No need to set need_resched since signal event passing
457 * goes through ->blocked
459 void signal_wake_up(struct task_struct *t, int resume)
463 set_tsk_thread_flag(t, TIF_SIGPENDING);
466 * For SIGKILL, we want to wake it up in the stopped/traced/killable
467 * case. We don't check t->state here because there is a race with it
468 * executing another processor and just now entering stopped state.
469 * By using wake_up_state, we ensure the process will wake up and
470 * handle its death signal.
472 mask = TASK_INTERRUPTIBLE;
474 mask |= TASK_WAKEKILL;
475 if (!wake_up_state(t, mask))
480 * Remove signals in mask from the pending set and queue.
481 * Returns 1 if any signals were found.
483 * All callers must be holding the siglock.
485 * This version takes a sigset mask and looks at all signals,
486 * not just those in the first mask word.
488 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
490 struct sigqueue *q, *n;
493 sigandsets(&m, mask, &s->signal);
494 if (sigisemptyset(&m))
497 signandsets(&s->signal, &s->signal, mask);
498 list_for_each_entry_safe(q, n, &s->list, list) {
499 if (sigismember(mask, q->info.si_signo)) {
500 list_del_init(&q->list);
507 * Remove signals in mask from the pending set and queue.
508 * Returns 1 if any signals were found.
510 * All callers must be holding the siglock.
512 static int rm_from_queue(unsigned long mask, struct sigpending *s)
514 struct sigqueue *q, *n;
516 if (!sigtestsetmask(&s->signal, mask))
519 sigdelsetmask(&s->signal, mask);
520 list_for_each_entry_safe(q, n, &s->list, list) {
521 if (q->info.si_signo < SIGRTMIN &&
522 (mask & sigmask(q->info.si_signo))) {
523 list_del_init(&q->list);
531 * Bad permissions for sending the signal
533 static int check_kill_permission(int sig, struct siginfo *info,
534 struct task_struct *t)
539 if (!valid_signal(sig))
542 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
545 error = audit_signal_info(sig, t); /* Let audit system see the signal */
549 if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
550 (current->uid ^ t->suid) && (current->uid ^ t->uid) &&
551 !capable(CAP_KILL)) {
554 sid = task_session(t);
556 * We don't return the error if sid == NULL. The
557 * task was unhashed, the caller must notice this.
559 if (!sid || sid == task_session(current))
566 return security_task_kill(t, info, sig, 0);
570 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
573 * Handle magic process-wide effects of stop/continue signals. Unlike
574 * the signal actions, these happen immediately at signal-generation
575 * time regardless of blocking, ignoring, or handling. This does the
576 * actual continuing for SIGCONT, but not the actual stopping for stop
577 * signals. The process stop is done as a signal action for SIG_DFL.
579 * Returns true if the signal should be actually delivered, otherwise
580 * it should be dropped.
582 static int prepare_signal(int sig, struct task_struct *p)
584 struct signal_struct *signal = p->signal;
585 struct task_struct *t;
587 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
589 * The process is in the middle of dying, nothing to do.
591 } else if (sig_kernel_stop(sig)) {
593 * This is a stop signal. Remove SIGCONT from all queues.
595 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
598 rm_from_queue(sigmask(SIGCONT), &t->pending);
599 } while_each_thread(p, t);
600 } else if (sig == SIGCONT) {
603 * Remove all stop signals from all queues,
604 * and wake all threads.
606 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
610 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
612 * If there is a handler for SIGCONT, we must make
613 * sure that no thread returns to user mode before
614 * we post the signal, in case it was the only
615 * thread eligible to run the signal handler--then
616 * it must not do anything between resuming and
617 * running the handler. With the TIF_SIGPENDING
618 * flag set, the thread will pause and acquire the
619 * siglock that we hold now and until we've queued
620 * the pending signal.
622 * Wake up the stopped thread _after_ setting
625 state = __TASK_STOPPED;
626 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
627 set_tsk_thread_flag(t, TIF_SIGPENDING);
628 state |= TASK_INTERRUPTIBLE;
630 wake_up_state(t, state);
631 } while_each_thread(p, t);
634 * Notify the parent with CLD_CONTINUED if we were stopped.
636 * If we were in the middle of a group stop, we pretend it
637 * was already finished, and then continued. Since SIGCHLD
638 * doesn't queue we report only CLD_STOPPED, as if the next
639 * CLD_CONTINUED was dropped.
642 if (signal->flags & SIGNAL_STOP_STOPPED)
643 why |= SIGNAL_CLD_CONTINUED;
644 else if (signal->group_stop_count)
645 why |= SIGNAL_CLD_STOPPED;
649 * The first thread which returns from finish_stop()
650 * will take ->siglock, notice SIGNAL_CLD_MASK, and
651 * notify its parent. See get_signal_to_deliver().
653 signal->flags = why | SIGNAL_STOP_CONTINUED;
654 signal->group_stop_count = 0;
655 signal->group_exit_code = 0;
658 * We are not stopped, but there could be a stop
659 * signal in the middle of being processed after
660 * being removed from the queue. Clear that too.
662 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
666 return !sig_ignored(p, sig);
670 * Test if P wants to take SIG. After we've checked all threads with this,
671 * it's equivalent to finding no threads not blocking SIG. Any threads not
672 * blocking SIG were ruled out because they are not running and already
673 * have pending signals. Such threads will dequeue from the shared queue
674 * as soon as they're available, so putting the signal on the shared queue
675 * will be equivalent to sending it to one such thread.
677 static inline int wants_signal(int sig, struct task_struct *p)
679 if (sigismember(&p->blocked, sig))
681 if (p->flags & PF_EXITING)
685 if (task_is_stopped_or_traced(p))
687 return task_curr(p) || !signal_pending(p);
690 static void complete_signal(int sig, struct task_struct *p, int group)
692 struct signal_struct *signal = p->signal;
693 struct task_struct *t;
696 * Now find a thread we can wake up to take the signal off the queue.
698 * If the main thread wants the signal, it gets first crack.
699 * Probably the least surprising to the average bear.
701 if (wants_signal(sig, p))
703 else if (!group || thread_group_empty(p))
705 * There is just one thread and it does not need to be woken.
706 * It will dequeue unblocked signals before it runs again.
711 * Otherwise try to find a suitable thread.
713 t = signal->curr_target;
714 while (!wants_signal(sig, t)) {
716 if (t == signal->curr_target)
718 * No thread needs to be woken.
719 * Any eligible threads will see
720 * the signal in the queue soon.
724 signal->curr_target = t;
728 * Found a killable thread. If the signal will be fatal,
729 * then start taking the whole group down immediately.
731 if (sig_fatal(p, sig) &&
732 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
733 !sigismember(&t->real_blocked, sig) &&
734 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
736 * This signal will be fatal to the whole group.
738 if (!sig_kernel_coredump(sig)) {
740 * Start a group exit and wake everybody up.
741 * This way we don't have other threads
742 * running and doing things after a slower
743 * thread has the fatal signal pending.
745 signal->flags = SIGNAL_GROUP_EXIT;
746 signal->group_exit_code = sig;
747 signal->group_stop_count = 0;
750 sigaddset(&t->pending.signal, SIGKILL);
751 signal_wake_up(t, 1);
752 } while_each_thread(p, t);
758 * The signal is already in the shared-pending queue.
759 * Tell the chosen thread to wake up and dequeue it.
761 signal_wake_up(t, sig == SIGKILL);
765 static inline int legacy_queue(struct sigpending *signals, int sig)
767 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
770 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
773 struct sigpending *pending;
776 assert_spin_locked(&t->sighand->siglock);
777 if (!prepare_signal(sig, t))
780 pending = group ? &t->signal->shared_pending : &t->pending;
782 * Short-circuit ignored signals and support queuing
783 * exactly one non-rt signal, so that we can get more
784 * detailed information about the cause of the signal.
786 if (legacy_queue(pending, sig))
789 * fast-pathed signals for kernel-internal things like SIGSTOP
792 if (info == SEND_SIG_FORCED)
795 /* Real-time signals must be queued if sent by sigqueue, or
796 some other real-time mechanism. It is implementation
797 defined whether kill() does so. We attempt to do so, on
798 the principle of least surprise, but since kill is not
799 allowed to fail with EAGAIN when low on memory we just
800 make sure at least one signal gets delivered and don't
801 pass on the info struct. */
803 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
804 (is_si_special(info) ||
805 info->si_code >= 0)));
807 list_add_tail(&q->list, &pending->list);
808 switch ((unsigned long) info) {
809 case (unsigned long) SEND_SIG_NOINFO:
810 q->info.si_signo = sig;
811 q->info.si_errno = 0;
812 q->info.si_code = SI_USER;
813 q->info.si_pid = task_pid_vnr(current);
814 q->info.si_uid = current->uid;
816 case (unsigned long) SEND_SIG_PRIV:
817 q->info.si_signo = sig;
818 q->info.si_errno = 0;
819 q->info.si_code = SI_KERNEL;
824 copy_siginfo(&q->info, info);
827 } else if (!is_si_special(info)) {
828 if (sig >= SIGRTMIN && info->si_code != SI_USER)
830 * Queue overflow, abort. We may abort if the signal was rt
831 * and sent by user using something other than kill().
837 signalfd_notify(t, sig);
838 sigaddset(&pending->signal, sig);
839 complete_signal(sig, t, group);
843 int print_fatal_signals;
845 static void print_fatal_signal(struct pt_regs *regs, int signr)
847 printk("%s/%d: potentially unexpected fatal signal %d.\n",
848 current->comm, task_pid_nr(current), signr);
850 #if defined(__i386__) && !defined(__arch_um__)
851 printk("code at %08lx: ", regs->ip);
854 for (i = 0; i < 16; i++) {
857 __get_user(insn, (unsigned char *)(regs->ip + i));
858 printk("%02x ", insn);
866 static int __init setup_print_fatal_signals(char *str)
868 get_option (&str, &print_fatal_signals);
873 __setup("print-fatal-signals=", setup_print_fatal_signals);
876 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
878 return send_signal(sig, info, p, 1);
882 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
884 return send_signal(sig, info, t, 0);
888 * Force a signal that the process can't ignore: if necessary
889 * we unblock the signal and change any SIG_IGN to SIG_DFL.
891 * Note: If we unblock the signal, we always reset it to SIG_DFL,
892 * since we do not want to have a signal handler that was blocked
893 * be invoked when user space had explicitly blocked it.
895 * We don't want to have recursive SIGSEGV's etc, for example,
896 * that is why we also clear SIGNAL_UNKILLABLE.
899 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
901 unsigned long int flags;
902 int ret, blocked, ignored;
903 struct k_sigaction *action;
905 spin_lock_irqsave(&t->sighand->siglock, flags);
906 action = &t->sighand->action[sig-1];
907 ignored = action->sa.sa_handler == SIG_IGN;
908 blocked = sigismember(&t->blocked, sig);
909 if (blocked || ignored) {
910 action->sa.sa_handler = SIG_DFL;
912 sigdelset(&t->blocked, sig);
913 recalc_sigpending_and_wake(t);
916 if (action->sa.sa_handler == SIG_DFL)
917 t->signal->flags &= ~SIGNAL_UNKILLABLE;
918 ret = specific_send_sig_info(sig, info, t);
919 spin_unlock_irqrestore(&t->sighand->siglock, flags);
925 force_sig_specific(int sig, struct task_struct *t)
927 force_sig_info(sig, SEND_SIG_FORCED, t);
931 * Nuke all other threads in the group.
933 void zap_other_threads(struct task_struct *p)
935 struct task_struct *t;
937 p->signal->group_stop_count = 0;
939 for (t = next_thread(p); t != p; t = next_thread(t)) {
941 * Don't bother with already dead threads
946 /* SIGKILL will be handled before any pending SIGSTOP */
947 sigaddset(&t->pending.signal, SIGKILL);
948 signal_wake_up(t, 1);
952 int __fatal_signal_pending(struct task_struct *tsk)
954 return sigismember(&tsk->pending.signal, SIGKILL);
956 EXPORT_SYMBOL(__fatal_signal_pending);
958 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
960 struct sighand_struct *sighand;
964 sighand = rcu_dereference(tsk->sighand);
965 if (unlikely(sighand == NULL))
968 spin_lock_irqsave(&sighand->siglock, *flags);
969 if (likely(sighand == tsk->sighand))
971 spin_unlock_irqrestore(&sighand->siglock, *flags);
978 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
983 ret = check_kill_permission(sig, info, p);
987 if (lock_task_sighand(p, &flags)) {
988 ret = __group_send_sig_info(sig, info, p);
989 unlock_task_sighand(p, &flags);
997 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
998 * control characters do (^C, ^Z etc)
1001 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1003 struct task_struct *p = NULL;
1004 int retval, success;
1008 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1009 int err = group_send_sig_info(sig, info, p);
1012 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1013 return success ? 0 : retval;
1016 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1019 struct task_struct *p;
1023 p = pid_task(pid, PIDTYPE_PID);
1025 error = group_send_sig_info(sig, info, p);
1026 if (unlikely(error == -ESRCH))
1028 * The task was unhashed in between, try again.
1029 * If it is dead, pid_task() will return NULL,
1030 * if we race with de_thread() it will find the
1041 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1045 error = kill_pid_info(sig, info, find_vpid(pid));
1050 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1051 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1052 uid_t uid, uid_t euid, u32 secid)
1055 struct task_struct *p;
1057 if (!valid_signal(sig))
1060 read_lock(&tasklist_lock);
1061 p = pid_task(pid, PIDTYPE_PID);
1066 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1067 && (euid != p->suid) && (euid != p->uid)
1068 && (uid != p->suid) && (uid != p->uid)) {
1072 ret = security_task_kill(p, info, sig, secid);
1075 if (sig && p->sighand) {
1076 unsigned long flags;
1077 spin_lock_irqsave(&p->sighand->siglock, flags);
1078 ret = __group_send_sig_info(sig, info, p);
1079 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1082 read_unlock(&tasklist_lock);
1085 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1088 * kill_something_info() interprets pid in interesting ways just like kill(2).
1090 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1091 * is probably wrong. Should make it like BSD or SYSV.
1094 static int kill_something_info(int sig, struct siginfo *info, int pid)
1100 ret = kill_pid_info(sig, info, find_vpid(pid));
1105 read_lock(&tasklist_lock);
1107 ret = __kill_pgrp_info(sig, info,
1108 pid ? find_vpid(-pid) : task_pgrp(current));
1110 int retval = 0, count = 0;
1111 struct task_struct * p;
1113 for_each_process(p) {
1114 if (p->pid > 1 && !same_thread_group(p, current)) {
1115 int err = group_send_sig_info(sig, info, p);
1121 ret = count ? retval : -ESRCH;
1123 read_unlock(&tasklist_lock);
1129 * These are for backward compatibility with the rest of the kernel source.
1133 * The caller must ensure the task can't exit.
1136 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1139 unsigned long flags;
1142 * Make sure legacy kernel users don't send in bad values
1143 * (normal paths check this in check_kill_permission).
1145 if (!valid_signal(sig))
1148 spin_lock_irqsave(&p->sighand->siglock, flags);
1149 ret = specific_send_sig_info(sig, info, p);
1150 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1154 #define __si_special(priv) \
1155 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1158 send_sig(int sig, struct task_struct *p, int priv)
1160 return send_sig_info(sig, __si_special(priv), p);
1164 force_sig(int sig, struct task_struct *p)
1166 force_sig_info(sig, SEND_SIG_PRIV, p);
1170 * When things go south during signal handling, we
1171 * will force a SIGSEGV. And if the signal that caused
1172 * the problem was already a SIGSEGV, we'll want to
1173 * make sure we don't even try to deliver the signal..
1176 force_sigsegv(int sig, struct task_struct *p)
1178 if (sig == SIGSEGV) {
1179 unsigned long flags;
1180 spin_lock_irqsave(&p->sighand->siglock, flags);
1181 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1182 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1184 force_sig(SIGSEGV, p);
1188 int kill_pgrp(struct pid *pid, int sig, int priv)
1192 read_lock(&tasklist_lock);
1193 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1194 read_unlock(&tasklist_lock);
1198 EXPORT_SYMBOL(kill_pgrp);
1200 int kill_pid(struct pid *pid, int sig, int priv)
1202 return kill_pid_info(sig, __si_special(priv), pid);
1204 EXPORT_SYMBOL(kill_pid);
1207 kill_proc(pid_t pid, int sig, int priv)
1212 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1218 * These functions support sending signals using preallocated sigqueue
1219 * structures. This is needed "because realtime applications cannot
1220 * afford to lose notifications of asynchronous events, like timer
1221 * expirations or I/O completions". In the case of Posix Timers
1222 * we allocate the sigqueue structure from the timer_create. If this
1223 * allocation fails we are able to report the failure to the application
1224 * with an EAGAIN error.
1227 struct sigqueue *sigqueue_alloc(void)
1231 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1232 q->flags |= SIGQUEUE_PREALLOC;
1236 void sigqueue_free(struct sigqueue *q)
1238 unsigned long flags;
1239 spinlock_t *lock = ¤t->sighand->siglock;
1241 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1243 * We must hold ->siglock while testing q->list
1244 * to serialize with collect_signal() or with
1245 * __exit_signal()->flush_sigqueue().
1247 spin_lock_irqsave(lock, flags);
1248 q->flags &= ~SIGQUEUE_PREALLOC;
1250 * If it is queued it will be freed when dequeued,
1251 * like the "regular" sigqueue.
1253 if (!list_empty(&q->list))
1255 spin_unlock_irqrestore(lock, flags);
1261 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1263 int sig = q->info.si_signo;
1264 struct sigpending *pending;
1265 unsigned long flags;
1268 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1271 if (!likely(lock_task_sighand(t, &flags)))
1274 ret = 1; /* the signal is ignored */
1275 if (!prepare_signal(sig, t))
1279 if (unlikely(!list_empty(&q->list))) {
1281 * If an SI_TIMER entry is already queue just increment
1282 * the overrun count.
1284 BUG_ON(q->info.si_code != SI_TIMER);
1285 q->info.si_overrun++;
1289 signalfd_notify(t, sig);
1290 pending = group ? &t->signal->shared_pending : &t->pending;
1291 list_add_tail(&q->list, &pending->list);
1292 sigaddset(&pending->signal, sig);
1293 complete_signal(sig, t, group);
1295 unlock_task_sighand(t, &flags);
1301 * Wake up any threads in the parent blocked in wait* syscalls.
1303 static inline void __wake_up_parent(struct task_struct *p,
1304 struct task_struct *parent)
1306 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1310 * Let a parent know about the death of a child.
1311 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1314 void do_notify_parent(struct task_struct *tsk, int sig)
1316 struct siginfo info;
1317 unsigned long flags;
1318 struct sighand_struct *psig;
1322 /* do_notify_parent_cldstop should have been called instead. */
1323 BUG_ON(task_is_stopped_or_traced(tsk));
1325 BUG_ON(!tsk->ptrace &&
1326 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1328 info.si_signo = sig;
1331 * we are under tasklist_lock here so our parent is tied to
1332 * us and cannot exit and release its namespace.
1334 * the only it can is to switch its nsproxy with sys_unshare,
1335 * bu uncharing pid namespaces is not allowed, so we'll always
1336 * see relevant namespace
1338 * write_lock() currently calls preempt_disable() which is the
1339 * same as rcu_read_lock(), but according to Oleg, this is not
1340 * correct to rely on this
1343 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1346 info.si_uid = tsk->uid;
1348 /* FIXME: find out whether or not this is supposed to be c*time. */
1349 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1350 tsk->signal->utime));
1351 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1352 tsk->signal->stime));
1354 info.si_status = tsk->exit_code & 0x7f;
1355 if (tsk->exit_code & 0x80)
1356 info.si_code = CLD_DUMPED;
1357 else if (tsk->exit_code & 0x7f)
1358 info.si_code = CLD_KILLED;
1360 info.si_code = CLD_EXITED;
1361 info.si_status = tsk->exit_code >> 8;
1364 psig = tsk->parent->sighand;
1365 spin_lock_irqsave(&psig->siglock, flags);
1366 if (!tsk->ptrace && sig == SIGCHLD &&
1367 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1368 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1370 * We are exiting and our parent doesn't care. POSIX.1
1371 * defines special semantics for setting SIGCHLD to SIG_IGN
1372 * or setting the SA_NOCLDWAIT flag: we should be reaped
1373 * automatically and not left for our parent's wait4 call.
1374 * Rather than having the parent do it as a magic kind of
1375 * signal handler, we just set this to tell do_exit that we
1376 * can be cleaned up without becoming a zombie. Note that
1377 * we still call __wake_up_parent in this case, because a
1378 * blocked sys_wait4 might now return -ECHILD.
1380 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1381 * is implementation-defined: we do (if you don't want
1382 * it, just use SIG_IGN instead).
1384 tsk->exit_signal = -1;
1385 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1388 if (valid_signal(sig) && sig > 0)
1389 __group_send_sig_info(sig, &info, tsk->parent);
1390 __wake_up_parent(tsk, tsk->parent);
1391 spin_unlock_irqrestore(&psig->siglock, flags);
1394 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1396 struct siginfo info;
1397 unsigned long flags;
1398 struct task_struct *parent;
1399 struct sighand_struct *sighand;
1401 if (tsk->ptrace & PT_PTRACED)
1402 parent = tsk->parent;
1404 tsk = tsk->group_leader;
1405 parent = tsk->real_parent;
1408 info.si_signo = SIGCHLD;
1411 * see comment in do_notify_parent() abot the following 3 lines
1414 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1417 info.si_uid = tsk->uid;
1419 /* FIXME: find out whether or not this is supposed to be c*time. */
1420 info.si_utime = cputime_to_jiffies(tsk->utime);
1421 info.si_stime = cputime_to_jiffies(tsk->stime);
1426 info.si_status = SIGCONT;
1429 info.si_status = tsk->signal->group_exit_code & 0x7f;
1432 info.si_status = tsk->exit_code & 0x7f;
1438 sighand = parent->sighand;
1439 spin_lock_irqsave(&sighand->siglock, flags);
1440 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1441 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1442 __group_send_sig_info(SIGCHLD, &info, parent);
1444 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1446 __wake_up_parent(tsk, parent);
1447 spin_unlock_irqrestore(&sighand->siglock, flags);
1450 static inline int may_ptrace_stop(void)
1452 if (!likely(current->ptrace & PT_PTRACED))
1455 * Are we in the middle of do_coredump?
1456 * If so and our tracer is also part of the coredump stopping
1457 * is a deadlock situation, and pointless because our tracer
1458 * is dead so don't allow us to stop.
1459 * If SIGKILL was already sent before the caller unlocked
1460 * ->siglock we must see ->core_waiters != 0. Otherwise it
1461 * is safe to enter schedule().
1463 if (unlikely(current->mm->core_waiters) &&
1464 unlikely(current->mm == current->parent->mm))
1471 * Return nonzero if there is a SIGKILL that should be waking us up.
1472 * Called with the siglock held.
1474 static int sigkill_pending(struct task_struct *tsk)
1476 return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1477 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1478 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1482 * This must be called with current->sighand->siglock held.
1484 * This should be the path for all ptrace stops.
1485 * We always set current->last_siginfo while stopped here.
1486 * That makes it a way to test a stopped process for
1487 * being ptrace-stopped vs being job-control-stopped.
1489 * If we actually decide not to stop at all because the tracer
1490 * is gone, we keep current->exit_code unless clear_code.
1492 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1496 if (arch_ptrace_stop_needed(exit_code, info)) {
1498 * The arch code has something special to do before a
1499 * ptrace stop. This is allowed to block, e.g. for faults
1500 * on user stack pages. We can't keep the siglock while
1501 * calling arch_ptrace_stop, so we must release it now.
1502 * To preserve proper semantics, we must do this before
1503 * any signal bookkeeping like checking group_stop_count.
1504 * Meanwhile, a SIGKILL could come in before we retake the
1505 * siglock. That must prevent us from sleeping in TASK_TRACED.
1506 * So after regaining the lock, we must check for SIGKILL.
1508 spin_unlock_irq(¤t->sighand->siglock);
1509 arch_ptrace_stop(exit_code, info);
1510 spin_lock_irq(¤t->sighand->siglock);
1511 killed = sigkill_pending(current);
1515 * If there is a group stop in progress,
1516 * we must participate in the bookkeeping.
1518 if (current->signal->group_stop_count > 0)
1519 --current->signal->group_stop_count;
1521 current->last_siginfo = info;
1522 current->exit_code = exit_code;
1524 /* Let the debugger run. */
1525 __set_current_state(TASK_TRACED);
1526 spin_unlock_irq(¤t->sighand->siglock);
1527 read_lock(&tasklist_lock);
1528 if (!unlikely(killed) && may_ptrace_stop()) {
1529 do_notify_parent_cldstop(current, CLD_TRAPPED);
1530 read_unlock(&tasklist_lock);
1534 * By the time we got the lock, our tracer went away.
1535 * Don't drop the lock yet, another tracer may come.
1537 __set_current_state(TASK_RUNNING);
1539 current->exit_code = 0;
1540 read_unlock(&tasklist_lock);
1544 * While in TASK_TRACED, we were considered "frozen enough".
1545 * Now that we woke up, it's crucial if we're supposed to be
1546 * frozen that we freeze now before running anything substantial.
1551 * We are back. Now reacquire the siglock before touching
1552 * last_siginfo, so that we are sure to have synchronized with
1553 * any signal-sending on another CPU that wants to examine it.
1555 spin_lock_irq(¤t->sighand->siglock);
1556 current->last_siginfo = NULL;
1559 * Queued signals ignored us while we were stopped for tracing.
1560 * So check for any that we should take before resuming user mode.
1561 * This sets TIF_SIGPENDING, but never clears it.
1563 recalc_sigpending_tsk(current);
1566 void ptrace_notify(int exit_code)
1570 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1572 memset(&info, 0, sizeof info);
1573 info.si_signo = SIGTRAP;
1574 info.si_code = exit_code;
1575 info.si_pid = task_pid_vnr(current);
1576 info.si_uid = current->uid;
1578 /* Let the debugger run. */
1579 spin_lock_irq(¤t->sighand->siglock);
1580 ptrace_stop(exit_code, 1, &info);
1581 spin_unlock_irq(¤t->sighand->siglock);
1585 finish_stop(int stop_count)
1588 * If there are no other threads in the group, or if there is
1589 * a group stop in progress and we are the last to stop,
1590 * report to the parent. When ptraced, every thread reports itself.
1592 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1593 read_lock(&tasklist_lock);
1594 do_notify_parent_cldstop(current, CLD_STOPPED);
1595 read_unlock(&tasklist_lock);
1600 } while (try_to_freeze());
1602 * Now we don't run again until continued.
1604 current->exit_code = 0;
1608 * This performs the stopping for SIGSTOP and other stop signals.
1609 * We have to stop all threads in the thread group.
1610 * Returns nonzero if we've actually stopped and released the siglock.
1611 * Returns zero if we didn't stop and still hold the siglock.
1613 static int do_signal_stop(int signr)
1615 struct signal_struct *sig = current->signal;
1618 if (sig->group_stop_count > 0) {
1620 * There is a group stop in progress. We don't need to
1621 * start another one.
1623 stop_count = --sig->group_stop_count;
1625 struct task_struct *t;
1627 if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE))
1628 != SIGNAL_STOP_DEQUEUED) ||
1629 unlikely(signal_group_exit(sig)))
1632 * There is no group stop already in progress.
1633 * We must initiate one now.
1635 sig->group_exit_code = signr;
1638 for (t = next_thread(current); t != current; t = next_thread(t))
1640 * Setting state to TASK_STOPPED for a group
1641 * stop is always done with the siglock held,
1642 * so this check has no races.
1644 if (!(t->flags & PF_EXITING) &&
1645 !task_is_stopped_or_traced(t)) {
1647 signal_wake_up(t, 0);
1649 sig->group_stop_count = stop_count;
1652 if (stop_count == 0)
1653 sig->flags = SIGNAL_STOP_STOPPED;
1654 current->exit_code = sig->group_exit_code;
1655 __set_current_state(TASK_STOPPED);
1657 spin_unlock_irq(¤t->sighand->siglock);
1658 finish_stop(stop_count);
1662 static int ptrace_signal(int signr, siginfo_t *info,
1663 struct pt_regs *regs, void *cookie)
1665 if (!(current->ptrace & PT_PTRACED))
1668 ptrace_signal_deliver(regs, cookie);
1670 /* Let the debugger run. */
1671 ptrace_stop(signr, 0, info);
1673 /* We're back. Did the debugger cancel the sig? */
1674 signr = current->exit_code;
1678 current->exit_code = 0;
1680 /* Update the siginfo structure if the signal has
1681 changed. If the debugger wanted something
1682 specific in the siginfo structure then it should
1683 have updated *info via PTRACE_SETSIGINFO. */
1684 if (signr != info->si_signo) {
1685 info->si_signo = signr;
1687 info->si_code = SI_USER;
1688 info->si_pid = task_pid_vnr(current->parent);
1689 info->si_uid = current->parent->uid;
1692 /* If the (new) signal is now blocked, requeue it. */
1693 if (sigismember(¤t->blocked, signr)) {
1694 specific_send_sig_info(signr, info, current);
1701 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1702 struct pt_regs *regs, void *cookie)
1704 struct sighand_struct *sighand = current->sighand;
1705 struct signal_struct *signal = current->signal;
1710 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1711 * While in TASK_STOPPED, we were considered "frozen enough".
1712 * Now that we woke up, it's crucial if we're supposed to be
1713 * frozen that we freeze now before running anything substantial.
1717 spin_lock_irq(&sighand->siglock);
1719 * Every stopped thread goes here after wakeup. Check to see if
1720 * we should notify the parent, prepare_signal(SIGCONT) encodes
1721 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1723 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1724 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1725 ? CLD_CONTINUED : CLD_STOPPED;
1726 signal->flags &= ~SIGNAL_CLD_MASK;
1727 spin_unlock_irq(&sighand->siglock);
1729 read_lock(&tasklist_lock);
1730 do_notify_parent_cldstop(current->group_leader, why);
1731 read_unlock(&tasklist_lock);
1736 struct k_sigaction *ka;
1738 if (unlikely(signal->group_stop_count > 0) &&
1742 signr = dequeue_signal(current, ¤t->blocked, info);
1744 break; /* will return 0 */
1746 if (signr != SIGKILL) {
1747 signr = ptrace_signal(signr, info, regs, cookie);
1752 ka = &sighand->action[signr-1];
1753 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1755 if (ka->sa.sa_handler != SIG_DFL) {
1756 /* Run the handler. */
1759 if (ka->sa.sa_flags & SA_ONESHOT)
1760 ka->sa.sa_handler = SIG_DFL;
1762 break; /* will return non-zero "signr" value */
1766 * Now we are doing the default action for this signal.
1768 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1772 * Global init gets no signals it doesn't want.
1774 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1775 !signal_group_exit(signal))
1778 if (sig_kernel_stop(signr)) {
1780 * The default action is to stop all threads in
1781 * the thread group. The job control signals
1782 * do nothing in an orphaned pgrp, but SIGSTOP
1783 * always works. Note that siglock needs to be
1784 * dropped during the call to is_orphaned_pgrp()
1785 * because of lock ordering with tasklist_lock.
1786 * This allows an intervening SIGCONT to be posted.
1787 * We need to check for that and bail out if necessary.
1789 if (signr != SIGSTOP) {
1790 spin_unlock_irq(&sighand->siglock);
1792 /* signals can be posted during this window */
1794 if (is_current_pgrp_orphaned())
1797 spin_lock_irq(&sighand->siglock);
1800 if (likely(do_signal_stop(signr))) {
1801 /* It released the siglock. */
1806 * We didn't actually stop, due to a race
1807 * with SIGCONT or something like that.
1812 spin_unlock_irq(&sighand->siglock);
1815 * Anything else is fatal, maybe with a core dump.
1817 current->flags |= PF_SIGNALED;
1819 if (sig_kernel_coredump(signr)) {
1820 if (print_fatal_signals)
1821 print_fatal_signal(regs, signr);
1823 * If it was able to dump core, this kills all
1824 * other threads in the group and synchronizes with
1825 * their demise. If we lost the race with another
1826 * thread getting here, it set group_exit_code
1827 * first and our do_group_exit call below will use
1828 * that value and ignore the one we pass it.
1830 do_coredump((long)signr, signr, regs);
1834 * Death signals, no core dump.
1836 do_group_exit(signr);
1839 spin_unlock_irq(&sighand->siglock);
1843 void exit_signals(struct task_struct *tsk)
1846 struct task_struct *t;
1848 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1849 tsk->flags |= PF_EXITING;
1853 spin_lock_irq(&tsk->sighand->siglock);
1855 * From now this task is not visible for group-wide signals,
1856 * see wants_signal(), do_signal_stop().
1858 tsk->flags |= PF_EXITING;
1859 if (!signal_pending(tsk))
1862 /* It could be that __group_complete_signal() choose us to
1863 * notify about group-wide signal. Another thread should be
1864 * woken now to take the signal since we will not.
1866 for (t = tsk; (t = next_thread(t)) != tsk; )
1867 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1868 recalc_sigpending_and_wake(t);
1870 if (unlikely(tsk->signal->group_stop_count) &&
1871 !--tsk->signal->group_stop_count) {
1872 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1876 spin_unlock_irq(&tsk->sighand->siglock);
1878 if (unlikely(group_stop)) {
1879 read_lock(&tasklist_lock);
1880 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1881 read_unlock(&tasklist_lock);
1885 EXPORT_SYMBOL(recalc_sigpending);
1886 EXPORT_SYMBOL_GPL(dequeue_signal);
1887 EXPORT_SYMBOL(flush_signals);
1888 EXPORT_SYMBOL(force_sig);
1889 EXPORT_SYMBOL(kill_proc);
1890 EXPORT_SYMBOL(ptrace_notify);
1891 EXPORT_SYMBOL(send_sig);
1892 EXPORT_SYMBOL(send_sig_info);
1893 EXPORT_SYMBOL(sigprocmask);
1894 EXPORT_SYMBOL(block_all_signals);
1895 EXPORT_SYMBOL(unblock_all_signals);
1899 * System call entry points.
1902 asmlinkage long sys_restart_syscall(void)
1904 struct restart_block *restart = ¤t_thread_info()->restart_block;
1905 return restart->fn(restart);
1908 long do_no_restart_syscall(struct restart_block *param)
1914 * We don't need to get the kernel lock - this is all local to this
1915 * particular thread.. (and that's good, because this is _heavily_
1916 * used by various programs)
1920 * This is also useful for kernel threads that want to temporarily
1921 * (or permanently) block certain signals.
1923 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1924 * interface happily blocks "unblockable" signals like SIGKILL
1927 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1931 spin_lock_irq(¤t->sighand->siglock);
1933 *oldset = current->blocked;
1938 sigorsets(¤t->blocked, ¤t->blocked, set);
1941 signandsets(¤t->blocked, ¤t->blocked, set);
1944 current->blocked = *set;
1949 recalc_sigpending();
1950 spin_unlock_irq(¤t->sighand->siglock);
1956 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1958 int error = -EINVAL;
1959 sigset_t old_set, new_set;
1961 /* XXX: Don't preclude handling different sized sigset_t's. */
1962 if (sigsetsize != sizeof(sigset_t))
1967 if (copy_from_user(&new_set, set, sizeof(*set)))
1969 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1971 error = sigprocmask(how, &new_set, &old_set);
1977 spin_lock_irq(¤t->sighand->siglock);
1978 old_set = current->blocked;
1979 spin_unlock_irq(¤t->sighand->siglock);
1983 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1991 long do_sigpending(void __user *set, unsigned long sigsetsize)
1993 long error = -EINVAL;
1996 if (sigsetsize > sizeof(sigset_t))
1999 spin_lock_irq(¤t->sighand->siglock);
2000 sigorsets(&pending, ¤t->pending.signal,
2001 ¤t->signal->shared_pending.signal);
2002 spin_unlock_irq(¤t->sighand->siglock);
2004 /* Outside the lock because only this thread touches it. */
2005 sigandsets(&pending, ¤t->blocked, &pending);
2008 if (!copy_to_user(set, &pending, sigsetsize))
2016 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2018 return do_sigpending(set, sigsetsize);
2021 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2023 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2027 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2029 if (from->si_code < 0)
2030 return __copy_to_user(to, from, sizeof(siginfo_t))
2033 * If you change siginfo_t structure, please be sure
2034 * this code is fixed accordingly.
2035 * Please remember to update the signalfd_copyinfo() function
2036 * inside fs/signalfd.c too, in case siginfo_t changes.
2037 * It should never copy any pad contained in the structure
2038 * to avoid security leaks, but must copy the generic
2039 * 3 ints plus the relevant union member.
2041 err = __put_user(from->si_signo, &to->si_signo);
2042 err |= __put_user(from->si_errno, &to->si_errno);
2043 err |= __put_user((short)from->si_code, &to->si_code);
2044 switch (from->si_code & __SI_MASK) {
2046 err |= __put_user(from->si_pid, &to->si_pid);
2047 err |= __put_user(from->si_uid, &to->si_uid);
2050 err |= __put_user(from->si_tid, &to->si_tid);
2051 err |= __put_user(from->si_overrun, &to->si_overrun);
2052 err |= __put_user(from->si_ptr, &to->si_ptr);
2055 err |= __put_user(from->si_band, &to->si_band);
2056 err |= __put_user(from->si_fd, &to->si_fd);
2059 err |= __put_user(from->si_addr, &to->si_addr);
2060 #ifdef __ARCH_SI_TRAPNO
2061 err |= __put_user(from->si_trapno, &to->si_trapno);
2065 err |= __put_user(from->si_pid, &to->si_pid);
2066 err |= __put_user(from->si_uid, &to->si_uid);
2067 err |= __put_user(from->si_status, &to->si_status);
2068 err |= __put_user(from->si_utime, &to->si_utime);
2069 err |= __put_user(from->si_stime, &to->si_stime);
2071 case __SI_RT: /* This is not generated by the kernel as of now. */
2072 case __SI_MESGQ: /* But this is */
2073 err |= __put_user(from->si_pid, &to->si_pid);
2074 err |= __put_user(from->si_uid, &to->si_uid);
2075 err |= __put_user(from->si_ptr, &to->si_ptr);
2077 default: /* this is just in case for now ... */
2078 err |= __put_user(from->si_pid, &to->si_pid);
2079 err |= __put_user(from->si_uid, &to->si_uid);
2088 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2089 siginfo_t __user *uinfo,
2090 const struct timespec __user *uts,
2099 /* XXX: Don't preclude handling different sized sigset_t's. */
2100 if (sigsetsize != sizeof(sigset_t))
2103 if (copy_from_user(&these, uthese, sizeof(these)))
2107 * Invert the set of allowed signals to get those we
2110 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2114 if (copy_from_user(&ts, uts, sizeof(ts)))
2116 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2121 spin_lock_irq(¤t->sighand->siglock);
2122 sig = dequeue_signal(current, &these, &info);
2124 timeout = MAX_SCHEDULE_TIMEOUT;
2126 timeout = (timespec_to_jiffies(&ts)
2127 + (ts.tv_sec || ts.tv_nsec));
2130 /* None ready -- temporarily unblock those we're
2131 * interested while we are sleeping in so that we'll
2132 * be awakened when they arrive. */
2133 current->real_blocked = current->blocked;
2134 sigandsets(¤t->blocked, ¤t->blocked, &these);
2135 recalc_sigpending();
2136 spin_unlock_irq(¤t->sighand->siglock);
2138 timeout = schedule_timeout_interruptible(timeout);
2140 spin_lock_irq(¤t->sighand->siglock);
2141 sig = dequeue_signal(current, &these, &info);
2142 current->blocked = current->real_blocked;
2143 siginitset(¤t->real_blocked, 0);
2144 recalc_sigpending();
2147 spin_unlock_irq(¤t->sighand->siglock);
2152 if (copy_siginfo_to_user(uinfo, &info))
2165 sys_kill(int pid, int sig)
2167 struct siginfo info;
2169 info.si_signo = sig;
2171 info.si_code = SI_USER;
2172 info.si_pid = task_tgid_vnr(current);
2173 info.si_uid = current->uid;
2175 return kill_something_info(sig, &info, pid);
2178 static int do_tkill(int tgid, int pid, int sig)
2181 struct siginfo info;
2182 struct task_struct *p;
2183 unsigned long flags;
2186 info.si_signo = sig;
2188 info.si_code = SI_TKILL;
2189 info.si_pid = task_tgid_vnr(current);
2190 info.si_uid = current->uid;
2193 p = find_task_by_vpid(pid);
2194 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2195 error = check_kill_permission(sig, &info, p);
2197 * The null signal is a permissions and process existence
2198 * probe. No signal is actually delivered.
2200 * If lock_task_sighand() fails we pretend the task dies
2201 * after receiving the signal. The window is tiny, and the
2202 * signal is private anyway.
2204 if (!error && sig && lock_task_sighand(p, &flags)) {
2205 error = specific_send_sig_info(sig, &info, p);
2206 unlock_task_sighand(p, &flags);
2215 * sys_tgkill - send signal to one specific thread
2216 * @tgid: the thread group ID of the thread
2217 * @pid: the PID of the thread
2218 * @sig: signal to be sent
2220 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2221 * exists but it's not belonging to the target process anymore. This
2222 * method solves the problem of threads exiting and PIDs getting reused.
2224 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2226 /* This is only valid for single tasks */
2227 if (pid <= 0 || tgid <= 0)
2230 return do_tkill(tgid, pid, sig);
2234 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2237 sys_tkill(int pid, int sig)
2239 /* This is only valid for single tasks */
2243 return do_tkill(0, pid, sig);
2247 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2251 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2254 /* Not even root can pretend to send signals from the kernel.
2255 Nor can they impersonate a kill(), which adds source info. */
2256 if (info.si_code >= 0)
2258 info.si_signo = sig;
2260 /* POSIX.1b doesn't mention process groups. */
2261 return kill_proc_info(sig, &info, pid);
2264 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2266 struct task_struct *t = current;
2267 struct k_sigaction *k;
2270 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2273 k = &t->sighand->action[sig-1];
2275 spin_lock_irq(¤t->sighand->siglock);
2280 sigdelsetmask(&act->sa.sa_mask,
2281 sigmask(SIGKILL) | sigmask(SIGSTOP));
2285 * "Setting a signal action to SIG_IGN for a signal that is
2286 * pending shall cause the pending signal to be discarded,
2287 * whether or not it is blocked."
2289 * "Setting a signal action to SIG_DFL for a signal that is
2290 * pending and whose default action is to ignore the signal
2291 * (for example, SIGCHLD), shall cause the pending signal to
2292 * be discarded, whether or not it is blocked"
2294 if (__sig_ignored(t, sig)) {
2296 sigaddset(&mask, sig);
2297 rm_from_queue_full(&mask, &t->signal->shared_pending);
2299 rm_from_queue_full(&mask, &t->pending);
2301 } while (t != current);
2305 spin_unlock_irq(¤t->sighand->siglock);
2310 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2316 oss.ss_sp = (void __user *) current->sas_ss_sp;
2317 oss.ss_size = current->sas_ss_size;
2318 oss.ss_flags = sas_ss_flags(sp);
2327 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2328 || __get_user(ss_sp, &uss->ss_sp)
2329 || __get_user(ss_flags, &uss->ss_flags)
2330 || __get_user(ss_size, &uss->ss_size))
2334 if (on_sig_stack(sp))
2340 * Note - this code used to test ss_flags incorrectly
2341 * old code may have been written using ss_flags==0
2342 * to mean ss_flags==SS_ONSTACK (as this was the only
2343 * way that worked) - this fix preserves that older
2346 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2349 if (ss_flags == SS_DISABLE) {
2354 if (ss_size < MINSIGSTKSZ)
2358 current->sas_ss_sp = (unsigned long) ss_sp;
2359 current->sas_ss_size = ss_size;
2364 if (copy_to_user(uoss, &oss, sizeof(oss)))
2373 #ifdef __ARCH_WANT_SYS_SIGPENDING
2376 sys_sigpending(old_sigset_t __user *set)
2378 return do_sigpending(set, sizeof(*set));
2383 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2384 /* Some platforms have their own version with special arguments others
2385 support only sys_rt_sigprocmask. */
2388 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2391 old_sigset_t old_set, new_set;
2395 if (copy_from_user(&new_set, set, sizeof(*set)))
2397 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2399 spin_lock_irq(¤t->sighand->siglock);
2400 old_set = current->blocked.sig[0];
2408 sigaddsetmask(¤t->blocked, new_set);
2411 sigdelsetmask(¤t->blocked, new_set);
2414 current->blocked.sig[0] = new_set;
2418 recalc_sigpending();
2419 spin_unlock_irq(¤t->sighand->siglock);
2425 old_set = current->blocked.sig[0];
2428 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2435 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2437 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2439 sys_rt_sigaction(int sig,
2440 const struct sigaction __user *act,
2441 struct sigaction __user *oact,
2444 struct k_sigaction new_sa, old_sa;
2447 /* XXX: Don't preclude handling different sized sigset_t's. */
2448 if (sigsetsize != sizeof(sigset_t))
2452 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2456 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2459 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2465 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2467 #ifdef __ARCH_WANT_SYS_SGETMASK
2470 * For backwards compatibility. Functionality superseded by sigprocmask.
2476 return current->blocked.sig[0];
2480 sys_ssetmask(int newmask)
2484 spin_lock_irq(¤t->sighand->siglock);
2485 old = current->blocked.sig[0];
2487 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2489 recalc_sigpending();
2490 spin_unlock_irq(¤t->sighand->siglock);
2494 #endif /* __ARCH_WANT_SGETMASK */
2496 #ifdef __ARCH_WANT_SYS_SIGNAL
2498 * For backwards compatibility. Functionality superseded by sigaction.
2500 asmlinkage unsigned long
2501 sys_signal(int sig, __sighandler_t handler)
2503 struct k_sigaction new_sa, old_sa;
2506 new_sa.sa.sa_handler = handler;
2507 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2508 sigemptyset(&new_sa.sa.sa_mask);
2510 ret = do_sigaction(sig, &new_sa, &old_sa);
2512 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2514 #endif /* __ARCH_WANT_SYS_SIGNAL */
2516 #ifdef __ARCH_WANT_SYS_PAUSE
2521 current->state = TASK_INTERRUPTIBLE;
2523 return -ERESTARTNOHAND;
2528 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2529 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2533 /* XXX: Don't preclude handling different sized sigset_t's. */
2534 if (sigsetsize != sizeof(sigset_t))
2537 if (copy_from_user(&newset, unewset, sizeof(newset)))
2539 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2541 spin_lock_irq(¤t->sighand->siglock);
2542 current->saved_sigmask = current->blocked;
2543 current->blocked = newset;
2544 recalc_sigpending();
2545 spin_unlock_irq(¤t->sighand->siglock);
2547 current->state = TASK_INTERRUPTIBLE;
2549 set_restore_sigmask();
2550 return -ERESTARTNOHAND;
2552 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2554 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2559 void __init signals_init(void)
2561 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);