2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache *sigqueue_cachep;
42 static int __sig_ignored(struct task_struct *t, int sig)
46 /* Is it explicitly or implicitly ignored? */
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig));
53 static int sig_ignored(struct task_struct *t, int sig)
56 * Tracers always want to know about signals..
58 if (t->ptrace & PT_PTRACED)
62 * Blocked signals are never ignored, since the
63 * signal handler may change by the time it is
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
69 return __sig_ignored(t, sig);
73 * Re-calculate pending state from the set of locally pending
74 * signals, globally pending signals, and blocked signals.
76 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
81 switch (_NSIG_WORDS) {
83 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
84 ready |= signal->sig[i] &~ blocked->sig[i];
87 case 4: ready = signal->sig[3] &~ blocked->sig[3];
88 ready |= signal->sig[2] &~ blocked->sig[2];
89 ready |= signal->sig[1] &~ blocked->sig[1];
90 ready |= signal->sig[0] &~ blocked->sig[0];
93 case 2: ready = signal->sig[1] &~ blocked->sig[1];
94 ready |= signal->sig[0] &~ blocked->sig[0];
97 case 1: ready = signal->sig[0] &~ blocked->sig[0];
102 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
104 static int recalc_sigpending_tsk(struct task_struct *t)
106 if (t->signal->group_stop_count > 0 ||
107 PENDING(&t->pending, &t->blocked) ||
108 PENDING(&t->signal->shared_pending, &t->blocked)) {
109 set_tsk_thread_flag(t, TIF_SIGPENDING);
113 * We must never clear the flag in another thread, or in current
114 * when it's possible the current syscall is returning -ERESTART*.
115 * So we don't clear it here, and only callers who know they should do.
121 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
122 * This is superfluous when called on current, the wakeup is a harmless no-op.
124 void recalc_sigpending_and_wake(struct task_struct *t)
126 if (recalc_sigpending_tsk(t))
127 signal_wake_up(t, 0);
130 void recalc_sigpending(void)
132 if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING);
137 /* Given the mask, find the first available signal that should be serviced. */
139 int next_signal(struct sigpending *pending, sigset_t *mask)
141 unsigned long i, *s, *m, x;
144 s = pending->signal.sig;
146 switch (_NSIG_WORDS) {
148 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
149 if ((x = *s &~ *m) != 0) {
150 sig = ffz(~x) + i*_NSIG_BPW + 1;
155 case 2: if ((x = s[0] &~ m[0]) != 0)
157 else if ((x = s[1] &~ m[1]) != 0)
164 case 1: if ((x = *s &~ *m) != 0)
172 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
175 struct sigqueue *q = NULL;
176 struct user_struct *user;
179 * In order to avoid problems with "switch_user()", we want to make
180 * sure that the compiler doesn't re-load "t->user"
184 atomic_inc(&user->sigpending);
185 if (override_rlimit ||
186 atomic_read(&user->sigpending) <=
187 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
188 q = kmem_cache_alloc(sigqueue_cachep, flags);
189 if (unlikely(q == NULL)) {
190 atomic_dec(&user->sigpending);
192 INIT_LIST_HEAD(&q->list);
194 q->user = get_uid(user);
199 static void __sigqueue_free(struct sigqueue *q)
201 if (q->flags & SIGQUEUE_PREALLOC)
203 atomic_dec(&q->user->sigpending);
205 kmem_cache_free(sigqueue_cachep, q);
208 void flush_sigqueue(struct sigpending *queue)
212 sigemptyset(&queue->signal);
213 while (!list_empty(&queue->list)) {
214 q = list_entry(queue->list.next, struct sigqueue , list);
215 list_del_init(&q->list);
221 * Flush all pending signals for a task.
223 void flush_signals(struct task_struct *t)
227 spin_lock_irqsave(&t->sighand->siglock, flags);
228 clear_tsk_thread_flag(t, TIF_SIGPENDING);
229 flush_sigqueue(&t->pending);
230 flush_sigqueue(&t->signal->shared_pending);
231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
234 void ignore_signals(struct task_struct *t)
238 for (i = 0; i < _NSIG; ++i)
239 t->sighand->action[i].sa.sa_handler = SIG_IGN;
245 * Flush all handlers for a task.
249 flush_signal_handlers(struct task_struct *t, int force_default)
252 struct k_sigaction *ka = &t->sighand->action[0];
253 for (i = _NSIG ; i != 0 ; i--) {
254 if (force_default || ka->sa.sa_handler != SIG_IGN)
255 ka->sa.sa_handler = SIG_DFL;
257 sigemptyset(&ka->sa.sa_mask);
262 int unhandled_signal(struct task_struct *tsk, int sig)
264 if (is_global_init(tsk))
266 if (tsk->ptrace & PT_PTRACED)
268 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
269 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
273 /* Notify the system that a driver wants to block all signals for this
274 * process, and wants to be notified if any signals at all were to be
275 * sent/acted upon. If the notifier routine returns non-zero, then the
276 * signal will be acted upon after all. If the notifier routine returns 0,
277 * then then signal will be blocked. Only one block per process is
278 * allowed. priv is a pointer to private data that the notifier routine
279 * can use to determine if the signal should be blocked or not. */
282 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
286 spin_lock_irqsave(¤t->sighand->siglock, flags);
287 current->notifier_mask = mask;
288 current->notifier_data = priv;
289 current->notifier = notifier;
290 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
293 /* Notify the system that blocking has ended. */
296 unblock_all_signals(void)
300 spin_lock_irqsave(¤t->sighand->siglock, flags);
301 current->notifier = NULL;
302 current->notifier_data = NULL;
304 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
307 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
309 struct sigqueue *q, *first = NULL;
310 int still_pending = 0;
312 if (unlikely(!sigismember(&list->signal, sig)))
316 * Collect the siginfo appropriate to this signal. Check if
317 * there is another siginfo for the same signal.
319 list_for_each_entry(q, &list->list, list) {
320 if (q->info.si_signo == sig) {
329 list_del_init(&first->list);
330 copy_siginfo(info, &first->info);
331 __sigqueue_free(first);
333 sigdelset(&list->signal, sig);
336 /* Ok, it wasn't in the queue. This must be
337 a fast-pathed signal or we must have been
338 out of queue space. So zero out the info.
340 sigdelset(&list->signal, sig);
341 info->si_signo = sig;
350 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
353 int sig = next_signal(pending, mask);
356 if (current->notifier) {
357 if (sigismember(current->notifier_mask, sig)) {
358 if (!(current->notifier)(current->notifier_data)) {
359 clear_thread_flag(TIF_SIGPENDING);
365 if (!collect_signal(sig, pending, info))
373 * Dequeue a signal and return the element to the caller, which is
374 * expected to free it.
376 * All callers have to hold the siglock.
378 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
382 /* We only dequeue private signals from ourselves, we don't let
383 * signalfd steal them
385 signr = __dequeue_signal(&tsk->pending, mask, info);
387 signr = __dequeue_signal(&tsk->signal->shared_pending,
392 * itimers are process shared and we restart periodic
393 * itimers in the signal delivery path to prevent DoS
394 * attacks in the high resolution timer case. This is
395 * compliant with the old way of self restarting
396 * itimers, as the SIGALRM is a legacy signal and only
397 * queued once. Changing the restart behaviour to
398 * restart the timer in the signal dequeue path is
399 * reducing the timer noise on heavy loaded !highres
402 if (unlikely(signr == SIGALRM)) {
403 struct hrtimer *tmr = &tsk->signal->real_timer;
405 if (!hrtimer_is_queued(tmr) &&
406 tsk->signal->it_real_incr.tv64 != 0) {
407 hrtimer_forward(tmr, tmr->base->get_time(),
408 tsk->signal->it_real_incr);
409 hrtimer_restart(tmr);
418 if (unlikely(sig_kernel_stop(signr))) {
420 * Set a marker that we have dequeued a stop signal. Our
421 * caller might release the siglock and then the pending
422 * stop signal it is about to process is no longer in the
423 * pending bitmasks, but must still be cleared by a SIGCONT
424 * (and overruled by a SIGKILL). So those cases clear this
425 * shared flag after we've set it. Note that this flag may
426 * remain set after the signal we return is ignored or
427 * handled. That doesn't matter because its only purpose
428 * is to alert stop-signal processing code when another
429 * processor has come along and cleared the flag.
431 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
432 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
434 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
436 * Release the siglock to ensure proper locking order
437 * of timer locks outside of siglocks. Note, we leave
438 * irqs disabled here, since the posix-timers code is
439 * about to disable them again anyway.
441 spin_unlock(&tsk->sighand->siglock);
442 do_schedule_next_timer(info);
443 spin_lock(&tsk->sighand->siglock);
449 * Tell a process that it has a new active signal..
451 * NOTE! we rely on the previous spin_lock to
452 * lock interrupts for us! We can only be called with
453 * "siglock" held, and the local interrupt must
454 * have been disabled when that got acquired!
456 * No need to set need_resched since signal event passing
457 * goes through ->blocked
459 void signal_wake_up(struct task_struct *t, int resume)
463 set_tsk_thread_flag(t, TIF_SIGPENDING);
466 * For SIGKILL, we want to wake it up in the stopped/traced/killable
467 * case. We don't check t->state here because there is a race with it
468 * executing another processor and just now entering stopped state.
469 * By using wake_up_state, we ensure the process will wake up and
470 * handle its death signal.
472 mask = TASK_INTERRUPTIBLE;
474 mask |= TASK_WAKEKILL;
475 if (!wake_up_state(t, mask))
480 * Remove signals in mask from the pending set and queue.
481 * Returns 1 if any signals were found.
483 * All callers must be holding the siglock.
485 * This version takes a sigset mask and looks at all signals,
486 * not just those in the first mask word.
488 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
490 struct sigqueue *q, *n;
493 sigandsets(&m, mask, &s->signal);
494 if (sigisemptyset(&m))
497 signandsets(&s->signal, &s->signal, mask);
498 list_for_each_entry_safe(q, n, &s->list, list) {
499 if (sigismember(mask, q->info.si_signo)) {
500 list_del_init(&q->list);
507 * Remove signals in mask from the pending set and queue.
508 * Returns 1 if any signals were found.
510 * All callers must be holding the siglock.
512 static int rm_from_queue(unsigned long mask, struct sigpending *s)
514 struct sigqueue *q, *n;
516 if (!sigtestsetmask(&s->signal, mask))
519 sigdelsetmask(&s->signal, mask);
520 list_for_each_entry_safe(q, n, &s->list, list) {
521 if (q->info.si_signo < SIGRTMIN &&
522 (mask & sigmask(q->info.si_signo))) {
523 list_del_init(&q->list);
531 * Bad permissions for sending the signal
533 static int check_kill_permission(int sig, struct siginfo *info,
534 struct task_struct *t)
538 if (!valid_signal(sig))
541 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
544 error = audit_signal_info(sig, t); /* Let audit system see the signal */
548 if (((sig != SIGCONT) || (task_session_nr(current) != task_session_nr(t)))
549 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
550 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
551 && !capable(CAP_KILL))
554 return security_task_kill(t, info, sig, 0);
558 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
561 * Handle magic process-wide effects of stop/continue signals.
562 * Unlike the signal actions, these happen immediately at signal-generation
563 * time regardless of blocking, ignoring, or handling. This does the
564 * actual continuing for SIGCONT, but not the actual stopping for stop
565 * signals. The process stop is done as a signal action for SIG_DFL.
567 static void handle_stop_signal(int sig, struct task_struct *p)
569 struct signal_struct *signal = p->signal;
570 struct task_struct *t;
572 if (signal->flags & SIGNAL_GROUP_EXIT)
574 * The process is in the middle of dying already.
578 if (sig_kernel_stop(sig)) {
580 * This is a stop signal. Remove SIGCONT from all queues.
582 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
585 rm_from_queue(sigmask(SIGCONT), &t->pending);
586 } while_each_thread(p, t);
587 } else if (sig == SIGCONT) {
590 * Remove all stop signals from all queues,
591 * and wake all threads.
593 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
597 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
599 * If there is a handler for SIGCONT, we must make
600 * sure that no thread returns to user mode before
601 * we post the signal, in case it was the only
602 * thread eligible to run the signal handler--then
603 * it must not do anything between resuming and
604 * running the handler. With the TIF_SIGPENDING
605 * flag set, the thread will pause and acquire the
606 * siglock that we hold now and until we've queued
607 * the pending signal.
609 * Wake up the stopped thread _after_ setting
612 state = __TASK_STOPPED;
613 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
614 set_tsk_thread_flag(t, TIF_SIGPENDING);
615 state |= TASK_INTERRUPTIBLE;
617 wake_up_state(t, state);
618 } while_each_thread(p, t);
621 * Notify the parent with CLD_CONTINUED if we were stopped.
623 * If we were in the middle of a group stop, we pretend it
624 * was already finished, and then continued. Since SIGCHLD
625 * doesn't queue we report only CLD_STOPPED, as if the next
626 * CLD_CONTINUED was dropped.
629 if (signal->flags & SIGNAL_STOP_STOPPED)
630 why |= SIGNAL_CLD_CONTINUED;
631 else if (signal->group_stop_count)
632 why |= SIGNAL_CLD_STOPPED;
635 signal->flags = why | SIGNAL_STOP_CONTINUED;
636 signal->group_stop_count = 0;
637 signal->group_exit_code = 0;
640 * We are not stopped, but there could be a stop
641 * signal in the middle of being processed after
642 * being removed from the queue. Clear that too.
644 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
646 } else if (sig == SIGKILL) {
648 * Make sure that any pending stop signal already dequeued
649 * is undone by the wakeup for SIGKILL.
651 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
655 static inline int legacy_queue(struct sigpending *signals, int sig)
657 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
660 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
661 struct sigpending *signals)
665 assert_spin_locked(&t->sighand->siglock);
666 handle_stop_signal(sig, t);
668 * Short-circuit ignored signals and support queuing
669 * exactly one non-rt signal, so that we can get more
670 * detailed information about the cause of the signal.
672 if (sig_ignored(t, sig) || legacy_queue(signals, sig))
676 * Deliver the signal to listening signalfds. This must be called
677 * with the sighand lock held.
679 signalfd_notify(t, sig);
682 * fast-pathed signals for kernel-internal things like SIGSTOP
685 if (info == SEND_SIG_FORCED)
688 /* Real-time signals must be queued if sent by sigqueue, or
689 some other real-time mechanism. It is implementation
690 defined whether kill() does so. We attempt to do so, on
691 the principle of least surprise, but since kill is not
692 allowed to fail with EAGAIN when low on memory we just
693 make sure at least one signal gets delivered and don't
694 pass on the info struct. */
696 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
697 (is_si_special(info) ||
698 info->si_code >= 0)));
700 list_add_tail(&q->list, &signals->list);
701 switch ((unsigned long) info) {
702 case (unsigned long) SEND_SIG_NOINFO:
703 q->info.si_signo = sig;
704 q->info.si_errno = 0;
705 q->info.si_code = SI_USER;
706 q->info.si_pid = task_pid_vnr(current);
707 q->info.si_uid = current->uid;
709 case (unsigned long) SEND_SIG_PRIV:
710 q->info.si_signo = sig;
711 q->info.si_errno = 0;
712 q->info.si_code = SI_KERNEL;
717 copy_siginfo(&q->info, info);
720 } else if (!is_si_special(info)) {
721 if (sig >= SIGRTMIN && info->si_code != SI_USER)
723 * Queue overflow, abort. We may abort if the signal was rt
724 * and sent by user using something other than kill().
730 sigaddset(&signals->signal, sig);
734 int print_fatal_signals;
736 static void print_fatal_signal(struct pt_regs *regs, int signr)
738 printk("%s/%d: potentially unexpected fatal signal %d.\n",
739 current->comm, task_pid_nr(current), signr);
741 #if defined(__i386__) && !defined(__arch_um__)
742 printk("code at %08lx: ", regs->ip);
745 for (i = 0; i < 16; i++) {
748 __get_user(insn, (unsigned char *)(regs->ip + i));
749 printk("%02x ", insn);
757 static int __init setup_print_fatal_signals(char *str)
759 get_option (&str, &print_fatal_signals);
764 __setup("print-fatal-signals=", setup_print_fatal_signals);
767 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
771 ret = send_signal(sig, info, t, &t->pending);
775 if (!sigismember(&t->blocked, sig))
776 signal_wake_up(t, sig == SIGKILL);
781 * Force a signal that the process can't ignore: if necessary
782 * we unblock the signal and change any SIG_IGN to SIG_DFL.
784 * Note: If we unblock the signal, we always reset it to SIG_DFL,
785 * since we do not want to have a signal handler that was blocked
786 * be invoked when user space had explicitly blocked it.
788 * We don't want to have recursive SIGSEGV's etc, for example.
791 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
793 unsigned long int flags;
794 int ret, blocked, ignored;
795 struct k_sigaction *action;
797 spin_lock_irqsave(&t->sighand->siglock, flags);
798 action = &t->sighand->action[sig-1];
799 ignored = action->sa.sa_handler == SIG_IGN;
800 blocked = sigismember(&t->blocked, sig);
801 if (blocked || ignored) {
802 action->sa.sa_handler = SIG_DFL;
804 sigdelset(&t->blocked, sig);
805 recalc_sigpending_and_wake(t);
808 ret = specific_send_sig_info(sig, info, t);
809 spin_unlock_irqrestore(&t->sighand->siglock, flags);
815 force_sig_specific(int sig, struct task_struct *t)
817 force_sig_info(sig, SEND_SIG_FORCED, t);
821 * Test if P wants to take SIG. After we've checked all threads with this,
822 * it's equivalent to finding no threads not blocking SIG. Any threads not
823 * blocking SIG were ruled out because they are not running and already
824 * have pending signals. Such threads will dequeue from the shared queue
825 * as soon as they're available, so putting the signal on the shared queue
826 * will be equivalent to sending it to one such thread.
828 static inline int wants_signal(int sig, struct task_struct *p)
830 if (sigismember(&p->blocked, sig))
832 if (p->flags & PF_EXITING)
836 if (task_is_stopped_or_traced(p))
838 return task_curr(p) || !signal_pending(p);
842 __group_complete_signal(int sig, struct task_struct *p)
844 struct signal_struct *signal = p->signal;
845 struct task_struct *t;
848 * Now find a thread we can wake up to take the signal off the queue.
850 * If the main thread wants the signal, it gets first crack.
851 * Probably the least surprising to the average bear.
853 if (wants_signal(sig, p))
855 else if (thread_group_empty(p))
857 * There is just one thread and it does not need to be woken.
858 * It will dequeue unblocked signals before it runs again.
863 * Otherwise try to find a suitable thread.
865 t = signal->curr_target;
867 /* restart balancing at this thread */
868 t = signal->curr_target = p;
870 while (!wants_signal(sig, t)) {
872 if (t == signal->curr_target)
874 * No thread needs to be woken.
875 * Any eligible threads will see
876 * the signal in the queue soon.
880 signal->curr_target = t;
884 * Found a killable thread. If the signal will be fatal,
885 * then start taking the whole group down immediately.
887 if (sig_fatal(p, sig) && !(signal->flags & SIGNAL_GROUP_EXIT) &&
888 !sigismember(&t->real_blocked, sig) &&
889 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
891 * This signal will be fatal to the whole group.
893 if (!sig_kernel_coredump(sig)) {
895 * Start a group exit and wake everybody up.
896 * This way we don't have other threads
897 * running and doing things after a slower
898 * thread has the fatal signal pending.
900 signal->flags = SIGNAL_GROUP_EXIT;
901 signal->group_exit_code = sig;
902 signal->group_stop_count = 0;
905 sigaddset(&t->pending.signal, SIGKILL);
906 signal_wake_up(t, 1);
907 } while_each_thread(p, t);
913 * The signal is already in the shared-pending queue.
914 * Tell the chosen thread to wake up and dequeue it.
916 signal_wake_up(t, sig == SIGKILL);
921 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
926 * Put this signal on the shared-pending queue, or fail with EAGAIN.
927 * We always use the shared queue for process-wide signals,
928 * to avoid several races.
930 ret = send_signal(sig, info, p, &p->signal->shared_pending);
934 __group_complete_signal(sig, p);
939 * Nuke all other threads in the group.
941 void zap_other_threads(struct task_struct *p)
943 struct task_struct *t;
945 p->signal->group_stop_count = 0;
947 for (t = next_thread(p); t != p; t = next_thread(t)) {
949 * Don't bother with already dead threads
954 /* SIGKILL will be handled before any pending SIGSTOP */
955 sigaddset(&t->pending.signal, SIGKILL);
956 signal_wake_up(t, 1);
960 int __fatal_signal_pending(struct task_struct *tsk)
962 return sigismember(&tsk->pending.signal, SIGKILL);
964 EXPORT_SYMBOL(__fatal_signal_pending);
966 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
968 struct sighand_struct *sighand;
972 sighand = rcu_dereference(tsk->sighand);
973 if (unlikely(sighand == NULL))
976 spin_lock_irqsave(&sighand->siglock, *flags);
977 if (likely(sighand == tsk->sighand))
979 spin_unlock_irqrestore(&sighand->siglock, *flags);
986 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
991 ret = check_kill_permission(sig, info, p);
995 if (lock_task_sighand(p, &flags)) {
996 ret = __group_send_sig_info(sig, info, p);
997 unlock_task_sighand(p, &flags);
1005 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1006 * control characters do (^C, ^Z etc)
1009 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1011 struct task_struct *p = NULL;
1012 int retval, success;
1016 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1017 int err = group_send_sig_info(sig, info, p);
1020 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1021 return success ? 0 : retval;
1024 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1027 struct task_struct *p;
1031 p = pid_task(pid, PIDTYPE_PID);
1033 error = group_send_sig_info(sig, info, p);
1034 if (unlikely(error == -ESRCH))
1036 * The task was unhashed in between, try again.
1037 * If it is dead, pid_task() will return NULL,
1038 * if we race with de_thread() it will find the
1049 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1053 error = kill_pid_info(sig, info, find_vpid(pid));
1058 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1059 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1060 uid_t uid, uid_t euid, u32 secid)
1063 struct task_struct *p;
1065 if (!valid_signal(sig))
1068 read_lock(&tasklist_lock);
1069 p = pid_task(pid, PIDTYPE_PID);
1074 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1075 && (euid != p->suid) && (euid != p->uid)
1076 && (uid != p->suid) && (uid != p->uid)) {
1080 ret = security_task_kill(p, info, sig, secid);
1083 if (sig && p->sighand) {
1084 unsigned long flags;
1085 spin_lock_irqsave(&p->sighand->siglock, flags);
1086 ret = __group_send_sig_info(sig, info, p);
1087 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1090 read_unlock(&tasklist_lock);
1093 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1096 * kill_something_info() interprets pid in interesting ways just like kill(2).
1098 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1099 * is probably wrong. Should make it like BSD or SYSV.
1102 static int kill_something_info(int sig, struct siginfo *info, int pid)
1108 ret = kill_pid_info(sig, info, find_vpid(pid));
1113 read_lock(&tasklist_lock);
1115 ret = __kill_pgrp_info(sig, info,
1116 pid ? find_vpid(-pid) : task_pgrp(current));
1118 int retval = 0, count = 0;
1119 struct task_struct * p;
1121 for_each_process(p) {
1122 if (p->pid > 1 && !same_thread_group(p, current)) {
1123 int err = group_send_sig_info(sig, info, p);
1129 ret = count ? retval : -ESRCH;
1131 read_unlock(&tasklist_lock);
1137 * These are for backward compatibility with the rest of the kernel source.
1141 * These two are the most common entry points. They send a signal
1142 * just to the specific thread.
1145 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1148 unsigned long flags;
1151 * Make sure legacy kernel users don't send in bad values
1152 * (normal paths check this in check_kill_permission).
1154 if (!valid_signal(sig))
1158 * We need the tasklist lock even for the specific
1159 * thread case (when we don't need to follow the group
1160 * lists) in order to avoid races with "p->sighand"
1161 * going away or changing from under us.
1163 read_lock(&tasklist_lock);
1164 spin_lock_irqsave(&p->sighand->siglock, flags);
1165 ret = specific_send_sig_info(sig, info, p);
1166 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1167 read_unlock(&tasklist_lock);
1171 #define __si_special(priv) \
1172 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1175 send_sig(int sig, struct task_struct *p, int priv)
1177 return send_sig_info(sig, __si_special(priv), p);
1181 force_sig(int sig, struct task_struct *p)
1183 force_sig_info(sig, SEND_SIG_PRIV, p);
1187 * When things go south during signal handling, we
1188 * will force a SIGSEGV. And if the signal that caused
1189 * the problem was already a SIGSEGV, we'll want to
1190 * make sure we don't even try to deliver the signal..
1193 force_sigsegv(int sig, struct task_struct *p)
1195 if (sig == SIGSEGV) {
1196 unsigned long flags;
1197 spin_lock_irqsave(&p->sighand->siglock, flags);
1198 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1199 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1201 force_sig(SIGSEGV, p);
1205 int kill_pgrp(struct pid *pid, int sig, int priv)
1209 read_lock(&tasklist_lock);
1210 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1211 read_unlock(&tasklist_lock);
1215 EXPORT_SYMBOL(kill_pgrp);
1217 int kill_pid(struct pid *pid, int sig, int priv)
1219 return kill_pid_info(sig, __si_special(priv), pid);
1221 EXPORT_SYMBOL(kill_pid);
1224 kill_proc(pid_t pid, int sig, int priv)
1229 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1235 * These functions support sending signals using preallocated sigqueue
1236 * structures. This is needed "because realtime applications cannot
1237 * afford to lose notifications of asynchronous events, like timer
1238 * expirations or I/O completions". In the case of Posix Timers
1239 * we allocate the sigqueue structure from the timer_create. If this
1240 * allocation fails we are able to report the failure to the application
1241 * with an EAGAIN error.
1244 struct sigqueue *sigqueue_alloc(void)
1248 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1249 q->flags |= SIGQUEUE_PREALLOC;
1253 void sigqueue_free(struct sigqueue *q)
1255 unsigned long flags;
1256 spinlock_t *lock = ¤t->sighand->siglock;
1258 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1260 * If the signal is still pending remove it from the
1261 * pending queue. We must hold ->siglock while testing
1262 * q->list to serialize with collect_signal().
1264 spin_lock_irqsave(lock, flags);
1265 if (!list_empty(&q->list))
1266 list_del_init(&q->list);
1267 spin_unlock_irqrestore(lock, flags);
1269 q->flags &= ~SIGQUEUE_PREALLOC;
1273 static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t,
1274 struct sigpending *pending)
1276 handle_stop_signal(sig, t);
1278 if (unlikely(!list_empty(&q->list))) {
1280 * If an SI_TIMER entry is already queue just increment
1281 * the overrun count.
1284 BUG_ON(q->info.si_code != SI_TIMER);
1285 q->info.si_overrun++;
1289 if (sig_ignored(t, sig))
1292 signalfd_notify(t, sig);
1293 list_add_tail(&q->list, &pending->list);
1294 sigaddset(&pending->signal, sig);
1298 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1300 unsigned long flags;
1303 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1306 * The rcu based delayed sighand destroy makes it possible to
1307 * run this without tasklist lock held. The task struct itself
1308 * cannot go away as create_timer did get_task_struct().
1310 * We return -1, when the task is marked exiting, so
1311 * posix_timer_event can redirect it to the group leader
1313 if (!likely(lock_task_sighand(p, &flags)))
1316 ret = do_send_sigqueue(sig, q, p, &p->pending);
1318 if (!sigismember(&p->blocked, sig))
1319 signal_wake_up(p, sig == SIGKILL);
1321 unlock_task_sighand(p, &flags);
1327 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1329 unsigned long flags;
1332 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1334 /* Since it_lock is held, p->sighand cannot be NULL. */
1335 spin_lock_irqsave(&p->sighand->siglock, flags);
1337 ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending);
1339 __group_complete_signal(sig, p);
1341 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1347 * Wake up any threads in the parent blocked in wait* syscalls.
1349 static inline void __wake_up_parent(struct task_struct *p,
1350 struct task_struct *parent)
1352 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1356 * Let a parent know about the death of a child.
1357 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1360 void do_notify_parent(struct task_struct *tsk, int sig)
1362 struct siginfo info;
1363 unsigned long flags;
1364 struct sighand_struct *psig;
1368 /* do_notify_parent_cldstop should have been called instead. */
1369 BUG_ON(task_is_stopped_or_traced(tsk));
1371 BUG_ON(!tsk->ptrace &&
1372 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1374 info.si_signo = sig;
1377 * we are under tasklist_lock here so our parent is tied to
1378 * us and cannot exit and release its namespace.
1380 * the only it can is to switch its nsproxy with sys_unshare,
1381 * bu uncharing pid namespaces is not allowed, so we'll always
1382 * see relevant namespace
1384 * write_lock() currently calls preempt_disable() which is the
1385 * same as rcu_read_lock(), but according to Oleg, this is not
1386 * correct to rely on this
1389 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1392 info.si_uid = tsk->uid;
1394 /* FIXME: find out whether or not this is supposed to be c*time. */
1395 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1396 tsk->signal->utime));
1397 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1398 tsk->signal->stime));
1400 info.si_status = tsk->exit_code & 0x7f;
1401 if (tsk->exit_code & 0x80)
1402 info.si_code = CLD_DUMPED;
1403 else if (tsk->exit_code & 0x7f)
1404 info.si_code = CLD_KILLED;
1406 info.si_code = CLD_EXITED;
1407 info.si_status = tsk->exit_code >> 8;
1410 psig = tsk->parent->sighand;
1411 spin_lock_irqsave(&psig->siglock, flags);
1412 if (!tsk->ptrace && sig == SIGCHLD &&
1413 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1414 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1416 * We are exiting and our parent doesn't care. POSIX.1
1417 * defines special semantics for setting SIGCHLD to SIG_IGN
1418 * or setting the SA_NOCLDWAIT flag: we should be reaped
1419 * automatically and not left for our parent's wait4 call.
1420 * Rather than having the parent do it as a magic kind of
1421 * signal handler, we just set this to tell do_exit that we
1422 * can be cleaned up without becoming a zombie. Note that
1423 * we still call __wake_up_parent in this case, because a
1424 * blocked sys_wait4 might now return -ECHILD.
1426 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1427 * is implementation-defined: we do (if you don't want
1428 * it, just use SIG_IGN instead).
1430 tsk->exit_signal = -1;
1431 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1434 if (valid_signal(sig) && sig > 0)
1435 __group_send_sig_info(sig, &info, tsk->parent);
1436 __wake_up_parent(tsk, tsk->parent);
1437 spin_unlock_irqrestore(&psig->siglock, flags);
1440 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1442 struct siginfo info;
1443 unsigned long flags;
1444 struct task_struct *parent;
1445 struct sighand_struct *sighand;
1447 if (tsk->ptrace & PT_PTRACED)
1448 parent = tsk->parent;
1450 tsk = tsk->group_leader;
1451 parent = tsk->real_parent;
1454 info.si_signo = SIGCHLD;
1457 * see comment in do_notify_parent() abot the following 3 lines
1460 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1463 info.si_uid = tsk->uid;
1465 /* FIXME: find out whether or not this is supposed to be c*time. */
1466 info.si_utime = cputime_to_jiffies(tsk->utime);
1467 info.si_stime = cputime_to_jiffies(tsk->stime);
1472 info.si_status = SIGCONT;
1475 info.si_status = tsk->signal->group_exit_code & 0x7f;
1478 info.si_status = tsk->exit_code & 0x7f;
1484 sighand = parent->sighand;
1485 spin_lock_irqsave(&sighand->siglock, flags);
1486 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1487 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1488 __group_send_sig_info(SIGCHLD, &info, parent);
1490 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1492 __wake_up_parent(tsk, parent);
1493 spin_unlock_irqrestore(&sighand->siglock, flags);
1496 static inline int may_ptrace_stop(void)
1498 if (!likely(current->ptrace & PT_PTRACED))
1501 * Are we in the middle of do_coredump?
1502 * If so and our tracer is also part of the coredump stopping
1503 * is a deadlock situation, and pointless because our tracer
1504 * is dead so don't allow us to stop.
1505 * If SIGKILL was already sent before the caller unlocked
1506 * ->siglock we must see ->core_waiters != 0. Otherwise it
1507 * is safe to enter schedule().
1509 if (unlikely(current->mm->core_waiters) &&
1510 unlikely(current->mm == current->parent->mm))
1517 * Return nonzero if there is a SIGKILL that should be waking us up.
1518 * Called with the siglock held.
1520 static int sigkill_pending(struct task_struct *tsk)
1522 return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1523 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1524 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1528 * This must be called with current->sighand->siglock held.
1530 * This should be the path for all ptrace stops.
1531 * We always set current->last_siginfo while stopped here.
1532 * That makes it a way to test a stopped process for
1533 * being ptrace-stopped vs being job-control-stopped.
1535 * If we actually decide not to stop at all because the tracer
1536 * is gone, we keep current->exit_code unless clear_code.
1538 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1542 if (arch_ptrace_stop_needed(exit_code, info)) {
1544 * The arch code has something special to do before a
1545 * ptrace stop. This is allowed to block, e.g. for faults
1546 * on user stack pages. We can't keep the siglock while
1547 * calling arch_ptrace_stop, so we must release it now.
1548 * To preserve proper semantics, we must do this before
1549 * any signal bookkeeping like checking group_stop_count.
1550 * Meanwhile, a SIGKILL could come in before we retake the
1551 * siglock. That must prevent us from sleeping in TASK_TRACED.
1552 * So after regaining the lock, we must check for SIGKILL.
1554 spin_unlock_irq(¤t->sighand->siglock);
1555 arch_ptrace_stop(exit_code, info);
1556 spin_lock_irq(¤t->sighand->siglock);
1557 killed = sigkill_pending(current);
1561 * If there is a group stop in progress,
1562 * we must participate in the bookkeeping.
1564 if (current->signal->group_stop_count > 0)
1565 --current->signal->group_stop_count;
1567 current->last_siginfo = info;
1568 current->exit_code = exit_code;
1570 /* Let the debugger run. */
1571 __set_current_state(TASK_TRACED);
1572 spin_unlock_irq(¤t->sighand->siglock);
1573 read_lock(&tasklist_lock);
1574 if (!unlikely(killed) && may_ptrace_stop()) {
1575 do_notify_parent_cldstop(current, CLD_TRAPPED);
1576 read_unlock(&tasklist_lock);
1580 * By the time we got the lock, our tracer went away.
1581 * Don't drop the lock yet, another tracer may come.
1583 __set_current_state(TASK_RUNNING);
1585 current->exit_code = 0;
1586 read_unlock(&tasklist_lock);
1590 * While in TASK_TRACED, we were considered "frozen enough".
1591 * Now that we woke up, it's crucial if we're supposed to be
1592 * frozen that we freeze now before running anything substantial.
1597 * We are back. Now reacquire the siglock before touching
1598 * last_siginfo, so that we are sure to have synchronized with
1599 * any signal-sending on another CPU that wants to examine it.
1601 spin_lock_irq(¤t->sighand->siglock);
1602 current->last_siginfo = NULL;
1605 * Queued signals ignored us while we were stopped for tracing.
1606 * So check for any that we should take before resuming user mode.
1607 * This sets TIF_SIGPENDING, but never clears it.
1609 recalc_sigpending_tsk(current);
1612 void ptrace_notify(int exit_code)
1616 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1618 memset(&info, 0, sizeof info);
1619 info.si_signo = SIGTRAP;
1620 info.si_code = exit_code;
1621 info.si_pid = task_pid_vnr(current);
1622 info.si_uid = current->uid;
1624 /* Let the debugger run. */
1625 spin_lock_irq(¤t->sighand->siglock);
1626 ptrace_stop(exit_code, 1, &info);
1627 spin_unlock_irq(¤t->sighand->siglock);
1631 finish_stop(int stop_count)
1634 * If there are no other threads in the group, or if there is
1635 * a group stop in progress and we are the last to stop,
1636 * report to the parent. When ptraced, every thread reports itself.
1638 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1639 read_lock(&tasklist_lock);
1640 do_notify_parent_cldstop(current, CLD_STOPPED);
1641 read_unlock(&tasklist_lock);
1646 } while (try_to_freeze());
1648 * Now we don't run again until continued.
1650 current->exit_code = 0;
1654 * This performs the stopping for SIGSTOP and other stop signals.
1655 * We have to stop all threads in the thread group.
1656 * Returns nonzero if we've actually stopped and released the siglock.
1657 * Returns zero if we didn't stop and still hold the siglock.
1659 static int do_signal_stop(int signr)
1661 struct signal_struct *sig = current->signal;
1664 if (sig->group_stop_count > 0) {
1666 * There is a group stop in progress. We don't need to
1667 * start another one.
1669 stop_count = --sig->group_stop_count;
1671 struct task_struct *t;
1673 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1674 unlikely(signal_group_exit(sig)))
1677 * There is no group stop already in progress.
1678 * We must initiate one now.
1680 sig->group_exit_code = signr;
1683 for (t = next_thread(current); t != current; t = next_thread(t))
1685 * Setting state to TASK_STOPPED for a group
1686 * stop is always done with the siglock held,
1687 * so this check has no races.
1689 if (!(t->flags & PF_EXITING) &&
1690 !task_is_stopped_or_traced(t)) {
1692 signal_wake_up(t, 0);
1694 sig->group_stop_count = stop_count;
1697 if (stop_count == 0)
1698 sig->flags = SIGNAL_STOP_STOPPED;
1699 current->exit_code = sig->group_exit_code;
1700 __set_current_state(TASK_STOPPED);
1702 spin_unlock_irq(¤t->sighand->siglock);
1703 finish_stop(stop_count);
1707 static int ptrace_signal(int signr, siginfo_t *info,
1708 struct pt_regs *regs, void *cookie)
1710 if (!(current->ptrace & PT_PTRACED))
1713 ptrace_signal_deliver(regs, cookie);
1715 /* Let the debugger run. */
1716 ptrace_stop(signr, 0, info);
1718 /* We're back. Did the debugger cancel the sig? */
1719 signr = current->exit_code;
1723 current->exit_code = 0;
1725 /* Update the siginfo structure if the signal has
1726 changed. If the debugger wanted something
1727 specific in the siginfo structure then it should
1728 have updated *info via PTRACE_SETSIGINFO. */
1729 if (signr != info->si_signo) {
1730 info->si_signo = signr;
1732 info->si_code = SI_USER;
1733 info->si_pid = task_pid_vnr(current->parent);
1734 info->si_uid = current->parent->uid;
1737 /* If the (new) signal is now blocked, requeue it. */
1738 if (sigismember(¤t->blocked, signr)) {
1739 specific_send_sig_info(signr, info, current);
1746 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1747 struct pt_regs *regs, void *cookie)
1749 struct sighand_struct *sighand = current->sighand;
1750 struct signal_struct *signal = current->signal;
1755 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1756 * While in TASK_STOPPED, we were considered "frozen enough".
1757 * Now that we woke up, it's crucial if we're supposed to be
1758 * frozen that we freeze now before running anything substantial.
1762 spin_lock_irq(&sighand->siglock);
1764 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1765 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1766 ? CLD_CONTINUED : CLD_STOPPED;
1767 signal->flags &= ~SIGNAL_CLD_MASK;
1768 spin_unlock_irq(&sighand->siglock);
1770 read_lock(&tasklist_lock);
1771 do_notify_parent_cldstop(current->group_leader, why);
1772 read_unlock(&tasklist_lock);
1777 struct k_sigaction *ka;
1779 if (unlikely(signal->group_stop_count > 0) &&
1783 signr = dequeue_signal(current, ¤t->blocked, info);
1785 break; /* will return 0 */
1787 if (signr != SIGKILL) {
1788 signr = ptrace_signal(signr, info, regs, cookie);
1793 ka = &sighand->action[signr-1];
1794 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1796 if (ka->sa.sa_handler != SIG_DFL) {
1797 /* Run the handler. */
1800 if (ka->sa.sa_flags & SA_ONESHOT)
1801 ka->sa.sa_handler = SIG_DFL;
1803 break; /* will return non-zero "signr" value */
1807 * Now we are doing the default action for this signal.
1809 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1813 * Global init gets no signals it doesn't want.
1815 if (is_global_init(current))
1818 if (sig_kernel_stop(signr)) {
1820 * The default action is to stop all threads in
1821 * the thread group. The job control signals
1822 * do nothing in an orphaned pgrp, but SIGSTOP
1823 * always works. Note that siglock needs to be
1824 * dropped during the call to is_orphaned_pgrp()
1825 * because of lock ordering with tasklist_lock.
1826 * This allows an intervening SIGCONT to be posted.
1827 * We need to check for that and bail out if necessary.
1829 if (signr != SIGSTOP) {
1830 spin_unlock_irq(&sighand->siglock);
1832 /* signals can be posted during this window */
1834 if (is_current_pgrp_orphaned())
1837 spin_lock_irq(&sighand->siglock);
1840 if (likely(do_signal_stop(signr))) {
1841 /* It released the siglock. */
1846 * We didn't actually stop, due to a race
1847 * with SIGCONT or something like that.
1852 spin_unlock_irq(&sighand->siglock);
1855 * Anything else is fatal, maybe with a core dump.
1857 current->flags |= PF_SIGNALED;
1858 if ((signr != SIGKILL) && print_fatal_signals)
1859 print_fatal_signal(regs, signr);
1860 if (sig_kernel_coredump(signr)) {
1862 * If it was able to dump core, this kills all
1863 * other threads in the group and synchronizes with
1864 * their demise. If we lost the race with another
1865 * thread getting here, it set group_exit_code
1866 * first and our do_group_exit call below will use
1867 * that value and ignore the one we pass it.
1869 do_coredump((long)signr, signr, regs);
1873 * Death signals, no core dump.
1875 do_group_exit(signr);
1878 spin_unlock_irq(&sighand->siglock);
1882 void exit_signals(struct task_struct *tsk)
1885 struct task_struct *t;
1887 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1888 tsk->flags |= PF_EXITING;
1892 spin_lock_irq(&tsk->sighand->siglock);
1894 * From now this task is not visible for group-wide signals,
1895 * see wants_signal(), do_signal_stop().
1897 tsk->flags |= PF_EXITING;
1898 if (!signal_pending(tsk))
1901 /* It could be that __group_complete_signal() choose us to
1902 * notify about group-wide signal. Another thread should be
1903 * woken now to take the signal since we will not.
1905 for (t = tsk; (t = next_thread(t)) != tsk; )
1906 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1907 recalc_sigpending_and_wake(t);
1909 if (unlikely(tsk->signal->group_stop_count) &&
1910 !--tsk->signal->group_stop_count) {
1911 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1915 spin_unlock_irq(&tsk->sighand->siglock);
1917 if (unlikely(group_stop)) {
1918 read_lock(&tasklist_lock);
1919 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1920 read_unlock(&tasklist_lock);
1924 EXPORT_SYMBOL(recalc_sigpending);
1925 EXPORT_SYMBOL_GPL(dequeue_signal);
1926 EXPORT_SYMBOL(flush_signals);
1927 EXPORT_SYMBOL(force_sig);
1928 EXPORT_SYMBOL(kill_proc);
1929 EXPORT_SYMBOL(ptrace_notify);
1930 EXPORT_SYMBOL(send_sig);
1931 EXPORT_SYMBOL(send_sig_info);
1932 EXPORT_SYMBOL(sigprocmask);
1933 EXPORT_SYMBOL(block_all_signals);
1934 EXPORT_SYMBOL(unblock_all_signals);
1938 * System call entry points.
1941 asmlinkage long sys_restart_syscall(void)
1943 struct restart_block *restart = ¤t_thread_info()->restart_block;
1944 return restart->fn(restart);
1947 long do_no_restart_syscall(struct restart_block *param)
1953 * We don't need to get the kernel lock - this is all local to this
1954 * particular thread.. (and that's good, because this is _heavily_
1955 * used by various programs)
1959 * This is also useful for kernel threads that want to temporarily
1960 * (or permanently) block certain signals.
1962 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1963 * interface happily blocks "unblockable" signals like SIGKILL
1966 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1970 spin_lock_irq(¤t->sighand->siglock);
1972 *oldset = current->blocked;
1977 sigorsets(¤t->blocked, ¤t->blocked, set);
1980 signandsets(¤t->blocked, ¤t->blocked, set);
1983 current->blocked = *set;
1988 recalc_sigpending();
1989 spin_unlock_irq(¤t->sighand->siglock);
1995 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1997 int error = -EINVAL;
1998 sigset_t old_set, new_set;
2000 /* XXX: Don't preclude handling different sized sigset_t's. */
2001 if (sigsetsize != sizeof(sigset_t))
2006 if (copy_from_user(&new_set, set, sizeof(*set)))
2008 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2010 error = sigprocmask(how, &new_set, &old_set);
2016 spin_lock_irq(¤t->sighand->siglock);
2017 old_set = current->blocked;
2018 spin_unlock_irq(¤t->sighand->siglock);
2022 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2030 long do_sigpending(void __user *set, unsigned long sigsetsize)
2032 long error = -EINVAL;
2035 if (sigsetsize > sizeof(sigset_t))
2038 spin_lock_irq(¤t->sighand->siglock);
2039 sigorsets(&pending, ¤t->pending.signal,
2040 ¤t->signal->shared_pending.signal);
2041 spin_unlock_irq(¤t->sighand->siglock);
2043 /* Outside the lock because only this thread touches it. */
2044 sigandsets(&pending, ¤t->blocked, &pending);
2047 if (!copy_to_user(set, &pending, sigsetsize))
2055 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2057 return do_sigpending(set, sigsetsize);
2060 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2062 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2066 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2068 if (from->si_code < 0)
2069 return __copy_to_user(to, from, sizeof(siginfo_t))
2072 * If you change siginfo_t structure, please be sure
2073 * this code is fixed accordingly.
2074 * Please remember to update the signalfd_copyinfo() function
2075 * inside fs/signalfd.c too, in case siginfo_t changes.
2076 * It should never copy any pad contained in the structure
2077 * to avoid security leaks, but must copy the generic
2078 * 3 ints plus the relevant union member.
2080 err = __put_user(from->si_signo, &to->si_signo);
2081 err |= __put_user(from->si_errno, &to->si_errno);
2082 err |= __put_user((short)from->si_code, &to->si_code);
2083 switch (from->si_code & __SI_MASK) {
2085 err |= __put_user(from->si_pid, &to->si_pid);
2086 err |= __put_user(from->si_uid, &to->si_uid);
2089 err |= __put_user(from->si_tid, &to->si_tid);
2090 err |= __put_user(from->si_overrun, &to->si_overrun);
2091 err |= __put_user(from->si_ptr, &to->si_ptr);
2094 err |= __put_user(from->si_band, &to->si_band);
2095 err |= __put_user(from->si_fd, &to->si_fd);
2098 err |= __put_user(from->si_addr, &to->si_addr);
2099 #ifdef __ARCH_SI_TRAPNO
2100 err |= __put_user(from->si_trapno, &to->si_trapno);
2104 err |= __put_user(from->si_pid, &to->si_pid);
2105 err |= __put_user(from->si_uid, &to->si_uid);
2106 err |= __put_user(from->si_status, &to->si_status);
2107 err |= __put_user(from->si_utime, &to->si_utime);
2108 err |= __put_user(from->si_stime, &to->si_stime);
2110 case __SI_RT: /* This is not generated by the kernel as of now. */
2111 case __SI_MESGQ: /* But this is */
2112 err |= __put_user(from->si_pid, &to->si_pid);
2113 err |= __put_user(from->si_uid, &to->si_uid);
2114 err |= __put_user(from->si_ptr, &to->si_ptr);
2116 default: /* this is just in case for now ... */
2117 err |= __put_user(from->si_pid, &to->si_pid);
2118 err |= __put_user(from->si_uid, &to->si_uid);
2127 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2128 siginfo_t __user *uinfo,
2129 const struct timespec __user *uts,
2138 /* XXX: Don't preclude handling different sized sigset_t's. */
2139 if (sigsetsize != sizeof(sigset_t))
2142 if (copy_from_user(&these, uthese, sizeof(these)))
2146 * Invert the set of allowed signals to get those we
2149 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2153 if (copy_from_user(&ts, uts, sizeof(ts)))
2155 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2160 spin_lock_irq(¤t->sighand->siglock);
2161 sig = dequeue_signal(current, &these, &info);
2163 timeout = MAX_SCHEDULE_TIMEOUT;
2165 timeout = (timespec_to_jiffies(&ts)
2166 + (ts.tv_sec || ts.tv_nsec));
2169 /* None ready -- temporarily unblock those we're
2170 * interested while we are sleeping in so that we'll
2171 * be awakened when they arrive. */
2172 current->real_blocked = current->blocked;
2173 sigandsets(¤t->blocked, ¤t->blocked, &these);
2174 recalc_sigpending();
2175 spin_unlock_irq(¤t->sighand->siglock);
2177 timeout = schedule_timeout_interruptible(timeout);
2179 spin_lock_irq(¤t->sighand->siglock);
2180 sig = dequeue_signal(current, &these, &info);
2181 current->blocked = current->real_blocked;
2182 siginitset(¤t->real_blocked, 0);
2183 recalc_sigpending();
2186 spin_unlock_irq(¤t->sighand->siglock);
2191 if (copy_siginfo_to_user(uinfo, &info))
2204 sys_kill(int pid, int sig)
2206 struct siginfo info;
2208 info.si_signo = sig;
2210 info.si_code = SI_USER;
2211 info.si_pid = task_tgid_vnr(current);
2212 info.si_uid = current->uid;
2214 return kill_something_info(sig, &info, pid);
2217 static int do_tkill(int tgid, int pid, int sig)
2220 struct siginfo info;
2221 struct task_struct *p;
2224 info.si_signo = sig;
2226 info.si_code = SI_TKILL;
2227 info.si_pid = task_tgid_vnr(current);
2228 info.si_uid = current->uid;
2230 read_lock(&tasklist_lock);
2231 p = find_task_by_vpid(pid);
2232 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2233 error = check_kill_permission(sig, &info, p);
2235 * The null signal is a permissions and process existence
2236 * probe. No signal is actually delivered.
2238 if (!error && sig && p->sighand) {
2239 spin_lock_irq(&p->sighand->siglock);
2240 error = specific_send_sig_info(sig, &info, p);
2241 spin_unlock_irq(&p->sighand->siglock);
2244 read_unlock(&tasklist_lock);
2250 * sys_tgkill - send signal to one specific thread
2251 * @tgid: the thread group ID of the thread
2252 * @pid: the PID of the thread
2253 * @sig: signal to be sent
2255 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2256 * exists but it's not belonging to the target process anymore. This
2257 * method solves the problem of threads exiting and PIDs getting reused.
2259 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2261 /* This is only valid for single tasks */
2262 if (pid <= 0 || tgid <= 0)
2265 return do_tkill(tgid, pid, sig);
2269 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2272 sys_tkill(int pid, int sig)
2274 /* This is only valid for single tasks */
2278 return do_tkill(0, pid, sig);
2282 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2286 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2289 /* Not even root can pretend to send signals from the kernel.
2290 Nor can they impersonate a kill(), which adds source info. */
2291 if (info.si_code >= 0)
2293 info.si_signo = sig;
2295 /* POSIX.1b doesn't mention process groups. */
2296 return kill_proc_info(sig, &info, pid);
2299 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2301 struct task_struct *t = current;
2302 struct k_sigaction *k;
2305 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2308 k = &t->sighand->action[sig-1];
2310 spin_lock_irq(¤t->sighand->siglock);
2315 sigdelsetmask(&act->sa.sa_mask,
2316 sigmask(SIGKILL) | sigmask(SIGSTOP));
2320 * "Setting a signal action to SIG_IGN for a signal that is
2321 * pending shall cause the pending signal to be discarded,
2322 * whether or not it is blocked."
2324 * "Setting a signal action to SIG_DFL for a signal that is
2325 * pending and whose default action is to ignore the signal
2326 * (for example, SIGCHLD), shall cause the pending signal to
2327 * be discarded, whether or not it is blocked"
2329 if (__sig_ignored(t, sig)) {
2331 sigaddset(&mask, sig);
2332 rm_from_queue_full(&mask, &t->signal->shared_pending);
2334 rm_from_queue_full(&mask, &t->pending);
2336 } while (t != current);
2340 spin_unlock_irq(¤t->sighand->siglock);
2345 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2351 oss.ss_sp = (void __user *) current->sas_ss_sp;
2352 oss.ss_size = current->sas_ss_size;
2353 oss.ss_flags = sas_ss_flags(sp);
2362 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2363 || __get_user(ss_sp, &uss->ss_sp)
2364 || __get_user(ss_flags, &uss->ss_flags)
2365 || __get_user(ss_size, &uss->ss_size))
2369 if (on_sig_stack(sp))
2375 * Note - this code used to test ss_flags incorrectly
2376 * old code may have been written using ss_flags==0
2377 * to mean ss_flags==SS_ONSTACK (as this was the only
2378 * way that worked) - this fix preserves that older
2381 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2384 if (ss_flags == SS_DISABLE) {
2389 if (ss_size < MINSIGSTKSZ)
2393 current->sas_ss_sp = (unsigned long) ss_sp;
2394 current->sas_ss_size = ss_size;
2399 if (copy_to_user(uoss, &oss, sizeof(oss)))
2408 #ifdef __ARCH_WANT_SYS_SIGPENDING
2411 sys_sigpending(old_sigset_t __user *set)
2413 return do_sigpending(set, sizeof(*set));
2418 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2419 /* Some platforms have their own version with special arguments others
2420 support only sys_rt_sigprocmask. */
2423 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2426 old_sigset_t old_set, new_set;
2430 if (copy_from_user(&new_set, set, sizeof(*set)))
2432 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2434 spin_lock_irq(¤t->sighand->siglock);
2435 old_set = current->blocked.sig[0];
2443 sigaddsetmask(¤t->blocked, new_set);
2446 sigdelsetmask(¤t->blocked, new_set);
2449 current->blocked.sig[0] = new_set;
2453 recalc_sigpending();
2454 spin_unlock_irq(¤t->sighand->siglock);
2460 old_set = current->blocked.sig[0];
2463 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2470 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2472 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2474 sys_rt_sigaction(int sig,
2475 const struct sigaction __user *act,
2476 struct sigaction __user *oact,
2479 struct k_sigaction new_sa, old_sa;
2482 /* XXX: Don't preclude handling different sized sigset_t's. */
2483 if (sigsetsize != sizeof(sigset_t))
2487 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2491 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2494 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2500 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2502 #ifdef __ARCH_WANT_SYS_SGETMASK
2505 * For backwards compatibility. Functionality superseded by sigprocmask.
2511 return current->blocked.sig[0];
2515 sys_ssetmask(int newmask)
2519 spin_lock_irq(¤t->sighand->siglock);
2520 old = current->blocked.sig[0];
2522 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2524 recalc_sigpending();
2525 spin_unlock_irq(¤t->sighand->siglock);
2529 #endif /* __ARCH_WANT_SGETMASK */
2531 #ifdef __ARCH_WANT_SYS_SIGNAL
2533 * For backwards compatibility. Functionality superseded by sigaction.
2535 asmlinkage unsigned long
2536 sys_signal(int sig, __sighandler_t handler)
2538 struct k_sigaction new_sa, old_sa;
2541 new_sa.sa.sa_handler = handler;
2542 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2543 sigemptyset(&new_sa.sa.sa_mask);
2545 ret = do_sigaction(sig, &new_sa, &old_sa);
2547 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2549 #endif /* __ARCH_WANT_SYS_SIGNAL */
2551 #ifdef __ARCH_WANT_SYS_PAUSE
2556 current->state = TASK_INTERRUPTIBLE;
2558 return -ERESTARTNOHAND;
2563 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2564 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2568 /* XXX: Don't preclude handling different sized sigset_t's. */
2569 if (sigsetsize != sizeof(sigset_t))
2572 if (copy_from_user(&newset, unewset, sizeof(newset)))
2574 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2576 spin_lock_irq(¤t->sighand->siglock);
2577 current->saved_sigmask = current->blocked;
2578 current->blocked = newset;
2579 recalc_sigpending();
2580 spin_unlock_irq(¤t->sighand->siglock);
2582 current->state = TASK_INTERRUPTIBLE;
2584 set_thread_flag(TIF_RESTORE_SIGMASK);
2585 return -ERESTARTNOHAND;
2587 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2589 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2594 void __init signals_init(void)
2596 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);