X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fsignal.c;h=dbd7fe073c558dbc57080e7d0d9344b2cf47b2dd;hb=cac36f707119b792b2396aed371d6b5cdc194890;hp=ec81defde33946380518215cab07e309876360ed;hpb=d3228a887cae75ef2b8b1211c31c539bef5a5698;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/signal.c b/kernel/signal.c index ec81def..dbd7fe0 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -22,8 +21,16 @@ #include #include #include +#include +#include +#include #include #include +#include +#include +#define CREATE_TRACE_POINTS +#include + #include #include #include @@ -36,148 +43,51 @@ static struct kmem_cache *sigqueue_cachep; -/* - * In POSIX a signal is sent either to a specific thread (Linux task) - * or to the process as a whole (Linux thread group). How the signal - * is sent determines whether it's to one thread or the whole group, - * which determines which signal mask(s) are involved in blocking it - * from being delivered until later. When the signal is delivered, - * either it's caught or ignored by a user handler or it has a default - * effect that applies to the whole thread group (POSIX process). - * - * The possible effects an unblocked signal set to SIG_DFL can have are: - * ignore - Nothing Happens - * terminate - kill the process, i.e. all threads in the group, - * similar to exit_group. The group leader (only) reports - * WIFSIGNALED status to its parent. - * coredump - write a core dump file describing all threads using - * the same mm and then kill all those threads - * stop - stop all the threads in the group, i.e. TASK_STOPPED state - * - * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. - * Other signals when not blocked and set to SIG_DFL behaves as follows. - * The job control signals also have other special effects. - * - * +--------------------+------------------+ - * | POSIX signal | default action | - * +--------------------+------------------+ - * | SIGHUP | terminate | - * | SIGINT | terminate | - * | SIGQUIT | coredump | - * | SIGILL | coredump | - * | SIGTRAP | coredump | - * | SIGABRT/SIGIOT | coredump | - * | SIGBUS | coredump | - * | SIGFPE | coredump | - * | SIGKILL | terminate(+) | - * | SIGUSR1 | terminate | - * | SIGSEGV | coredump | - * | SIGUSR2 | terminate | - * | SIGPIPE | terminate | - * | SIGALRM | terminate | - * | SIGTERM | terminate | - * | SIGCHLD | ignore | - * | SIGCONT | ignore(*) | - * | SIGSTOP | stop(*)(+) | - * | SIGTSTP | stop(*) | - * | SIGTTIN | stop(*) | - * | SIGTTOU | stop(*) | - * | SIGURG | ignore | - * | SIGXCPU | coredump | - * | SIGXFSZ | coredump | - * | SIGVTALRM | terminate | - * | SIGPROF | terminate | - * | SIGPOLL/SIGIO | terminate | - * | SIGSYS/SIGUNUSED | coredump | - * | SIGSTKFLT | terminate | - * | SIGWINCH | ignore | - * | SIGPWR | terminate | - * | SIGRTMIN-SIGRTMAX | terminate | - * +--------------------+------------------+ - * | non-POSIX signal | default action | - * +--------------------+------------------+ - * | SIGEMT | coredump | - * +--------------------+------------------+ - * - * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". - * (*) Special job control effects: - * When SIGCONT is sent, it resumes the process (all threads in the group) - * from TASK_STOPPED state and also clears any pending/queued stop signals - * (any of those marked with "stop(*)"). This happens regardless of blocking, - * catching, or ignoring SIGCONT. When any stop signal is sent, it clears - * any pending/queued SIGCONT signals; this happens regardless of blocking, - * catching, or ignored the stop signal, though (except for SIGSTOP) the - * default action of stopping the process may happen later or never. - */ - -#ifdef SIGEMT -#define M_SIGEMT M(SIGEMT) -#else -#define M_SIGEMT 0 -#endif - -#if SIGRTMIN > BITS_PER_LONG -#define M(sig) (1ULL << ((sig)-1)) -#else -#define M(sig) (1UL << ((sig)-1)) -#endif -#define T(sig, mask) (M(sig) & (mask)) - -#define SIG_KERNEL_ONLY_MASK (\ - M(SIGKILL) | M(SIGSTOP) ) +int print_fatal_signals __read_mostly; -#define SIG_KERNEL_STOP_MASK (\ - M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) ) - -#define SIG_KERNEL_COREDUMP_MASK (\ - M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \ - M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \ - M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT ) +static void __user *sig_handler(struct task_struct *t, int sig) +{ + return t->sighand->action[sig - 1].sa.sa_handler; +} -#define SIG_KERNEL_IGNORE_MASK (\ - M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) ) +static int sig_handler_ignored(void __user *handler, int sig) +{ + /* Is it explicitly or implicitly ignored? */ + return handler == SIG_IGN || + (handler == SIG_DFL && sig_kernel_ignore(sig)); +} -#define sig_kernel_only(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK)) -#define sig_kernel_coredump(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK)) -#define sig_kernel_ignore(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK)) -#define sig_kernel_stop(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) +static int sig_task_ignored(struct task_struct *t, int sig, + int from_ancestor_ns) +{ + void __user *handler; -#define sig_needs_tasklist(sig) ((sig) == SIGCONT) + handler = sig_handler(t, sig); -#define sig_user_defined(t, signr) \ - (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ - ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) + if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && + handler == SIG_DFL && !from_ancestor_ns) + return 1; -#define sig_fatal(t, signr) \ - (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ - (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) + return sig_handler_ignored(handler, sig); +} -static int sig_ignored(struct task_struct *t, int sig) +static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) { - void __user * handler; - - /* - * Tracers always want to know about signals.. - */ - if (t->ptrace & PT_PTRACED) - return 0; - /* * Blocked signals are never ignored, since the * signal handler may change by the time it is * unblocked. */ - if (sigismember(&t->blocked, sig)) + if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) return 0; - /* Is it explicitly or implicitly ignored? */ - handler = t->sighand->action[sig-1].sa.sa_handler; - return handler == SIG_IGN || - (handler == SIG_DFL && sig_kernel_ignore(sig)); + if (!sig_task_ignored(t, sig, from_ancestor_ns)) + return 0; + + /* + * Tracers may want to know about even ignored signals. + */ + return !tracehook_consider_ignored_signal(t, sig); } /* @@ -212,83 +122,145 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) -fastcall void recalc_sigpending_tsk(struct task_struct *t) +static int recalc_sigpending_tsk(struct task_struct *t) { if (t->signal->group_stop_count > 0 || - (freezing(t)) || PENDING(&t->pending, &t->blocked) || - PENDING(&t->signal->shared_pending, &t->blocked)) + PENDING(&t->signal->shared_pending, &t->blocked)) { set_tsk_thread_flag(t, TIF_SIGPENDING); - else - clear_tsk_thread_flag(t, TIF_SIGPENDING); + return 1; + } + /* + * We must never clear the flag in another thread, or in current + * when it's possible the current syscall is returning -ERESTART*. + * So we don't clear it here, and only callers who know they should do. + */ + return 0; +} + +/* + * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. + * This is superfluous when called on current, the wakeup is a harmless no-op. + */ +void recalc_sigpending_and_wake(struct task_struct *t) +{ + if (recalc_sigpending_tsk(t)) + signal_wake_up(t, 0); } void recalc_sigpending(void) { - recalc_sigpending_tsk(current); + if (unlikely(tracehook_force_sigpending())) + set_thread_flag(TIF_SIGPENDING); + else if (!recalc_sigpending_tsk(current) && !freezing(current)) + clear_thread_flag(TIF_SIGPENDING); + } /* Given the mask, find the first available signal that should be serviced. */ -static int -next_signal(struct sigpending *pending, sigset_t *mask) +#define SYNCHRONOUS_MASK \ + (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ + sigmask(SIGTRAP) | sigmask(SIGFPE)) + +int next_signal(struct sigpending *pending, sigset_t *mask) { unsigned long i, *s, *m, x; int sig = 0; - + s = pending->signal.sig; m = mask->sig; + + /* + * Handle the first word specially: it contains the + * synchronous signals that need to be dequeued first. + */ + x = *s &~ *m; + if (x) { + if (x & SYNCHRONOUS_MASK) + x &= SYNCHRONOUS_MASK; + sig = ffz(~x) + 1; + return sig; + } + switch (_NSIG_WORDS) { default: - for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) - if ((x = *s &~ *m) != 0) { - sig = ffz(~x) + i*_NSIG_BPW + 1; - break; - } + for (i = 1; i < _NSIG_WORDS; ++i) { + x = *++s &~ *++m; + if (!x) + continue; + sig = ffz(~x) + i*_NSIG_BPW + 1; + break; + } break; - case 2: if ((x = s[0] &~ m[0]) != 0) - sig = 1; - else if ((x = s[1] &~ m[1]) != 0) - sig = _NSIG_BPW + 1; - else + case 2: + x = s[1] &~ m[1]; + if (!x) break; - sig += ffz(~x); + sig = ffz(~x) + _NSIG_BPW + 1; break; - case 1: if ((x = *s &~ *m) != 0) - sig = ffz(~x) + 1; + case 1: + /* Nothing to do */ break; } - + return sig; } -static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, - int override_rlimit) +static inline void print_dropped_signal(int sig) +{ + static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); + + if (!print_fatal_signals) + return; + + if (!__ratelimit(&ratelimit_state)) + return; + + printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", + current->comm, current->pid, sig); +} + +/* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appopriate lock must be held to stop the target task from exiting + */ +static struct sigqueue * +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) { struct sigqueue *q = NULL; struct user_struct *user; /* - * In order to avoid problems with "switch_user()", we want to make - * sure that the compiler doesn't re-load "t->user" + * Protect access to @t credentials. This can go away when all + * callers hold rcu read lock. */ - user = t->user; - barrier(); + rcu_read_lock(); + user = get_uid(__task_cred(t)->user); atomic_inc(&user->sigpending); + rcu_read_unlock(); + if (override_rlimit || atomic_read(&user->sigpending) <= - t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) + task_rlimit(t, RLIMIT_SIGPENDING)) { q = kmem_cache_alloc(sigqueue_cachep, flags); + } else { + print_dropped_signal(sig); + } + if (unlikely(q == NULL)) { atomic_dec(&user->sigpending); + free_uid(user); } else { INIT_LIST_HEAD(&q->list); q->flags = 0; - q->user = get_uid(user); + q->user = user; } - return(q); + + return q; } static void __sigqueue_free(struct sigqueue *q) @@ -315,17 +287,66 @@ void flush_sigqueue(struct sigpending *queue) /* * Flush all pending signals for a task. */ +void __flush_signals(struct task_struct *t) +{ + clear_tsk_thread_flag(t, TIF_SIGPENDING); + flush_sigqueue(&t->pending); + flush_sigqueue(&t->signal->shared_pending); +} + void flush_signals(struct task_struct *t) { unsigned long flags; spin_lock_irqsave(&t->sighand->siglock, flags); - clear_tsk_thread_flag(t,TIF_SIGPENDING); - flush_sigqueue(&t->pending); - flush_sigqueue(&t->signal->shared_pending); + __flush_signals(t); spin_unlock_irqrestore(&t->sighand->siglock, flags); } +static void __flush_itimer_signals(struct sigpending *pending) +{ + sigset_t signal, retain; + struct sigqueue *q, *n; + + signal = pending->signal; + sigemptyset(&retain); + + list_for_each_entry_safe(q, n, &pending->list, list) { + int sig = q->info.si_signo; + + if (likely(q->info.si_code != SI_TIMER)) { + sigaddset(&retain, sig); + } else { + sigdelset(&signal, sig); + list_del_init(&q->list); + __sigqueue_free(q); + } + } + + sigorsets(&pending->signal, &signal, &retain); +} + +void flush_itimer_signals(void) +{ + struct task_struct *tsk = current; + unsigned long flags; + + spin_lock_irqsave(&tsk->sighand->siglock, flags); + __flush_itimer_signals(&tsk->pending); + __flush_itimer_signals(&tsk->signal->shared_pending); + spin_unlock_irqrestore(&tsk->sighand->siglock, flags); +} + +void ignore_signals(struct task_struct *t) +{ + int i; + + for (i = 0; i < _NSIG; ++i) + t->sighand->action[i].sa.sa_handler = SIG_IGN; + + flush_signals(t); +} + /* * Flush all handlers for a task. */ @@ -344,6 +365,16 @@ flush_signal_handlers(struct task_struct *t, int force_default) } } +int unhandled_signal(struct task_struct *tsk, int sig) +{ + void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; + if (is_global_init(tsk)) + return 1; + if (handler != SIG_IGN && handler != SIG_DFL) + return 0; + return !tracehook_consider_fatal_signal(tsk, sig); +} + /* Notify the system that a driver wants to block all signals for this * process, and wants to be notified if any signals at all were to be @@ -379,13 +410,9 @@ unblock_all_signals(void) spin_unlock_irqrestore(¤t->sighand->siglock, flags); } -static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) +static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) { struct sigqueue *q, *first = NULL; - int still_pending = 0; - - if (unlikely(!sigismember(&list->signal, sig))) - return 0; /* * Collect the siginfo appropriate to this signal. Check if @@ -393,33 +420,30 @@ static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) */ list_for_each_entry(q, &list->list, list) { if (q->info.si_signo == sig) { - if (first) { - still_pending = 1; - break; - } + if (first) + goto still_pending; first = q; } } + + sigdelset(&list->signal, sig); + if (first) { +still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); __sigqueue_free(first); - if (!still_pending) - sigdelset(&list->signal, sig); } else { - /* Ok, it wasn't in the queue. This must be a fast-pathed signal or we must have been out of queue space. So zero out the info. */ - sigdelset(&list->signal, sig); info->si_signo = sig; info->si_errno = 0; - info->si_code = 0; + info->si_code = SI_USER; info->si_pid = 0; info->si_uid = 0; } - return 1; } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, @@ -437,8 +461,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, } } - if (!collect_signal(sig, pending, info)) - sig = 0; + collect_signal(sig, pending, info); } return sig; @@ -452,30 +475,60 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { - int signr = __dequeue_signal(&tsk->pending, mask, info); - if (!signr) + int signr; + + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ + signr = __dequeue_signal(&tsk->pending, mask, info); + if (!signr) { signr = __dequeue_signal(&tsk->signal->shared_pending, mask, info); - recalc_sigpending_tsk(tsk); - if (signr && unlikely(sig_kernel_stop(signr))) { - /* - * Set a marker that we have dequeued a stop signal. Our - * caller might release the siglock and then the pending - * stop signal it is about to process is no longer in the - * pending bitmasks, but must still be cleared by a SIGCONT - * (and overruled by a SIGKILL). So those cases clear this - * shared flag after we've set it. Note that this flag may - * remain set after the signal we return is ignored or - * handled. That doesn't matter because its only purpose - * is to alert stop-signal processing code when another - * processor has come along and cleared the flag. - */ - if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) - tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; - } - if ( signr && - ((info->si_code & __SI_MASK) == __SI_TIMER) && - info->si_sys_private){ + /* + * itimer signal ? + * + * itimers are process shared and we restart periodic + * itimers in the signal delivery path to prevent DoS + * attacks in the high resolution timer case. This is + * compliant with the old way of self restarting + * itimers, as the SIGALRM is a legacy signal and only + * queued once. Changing the restart behaviour to + * restart the timer in the signal dequeue path is + * reducing the timer noise on heavy loaded !highres + * systems too. + */ + if (unlikely(signr == SIGALRM)) { + struct hrtimer *tmr = &tsk->signal->real_timer; + + if (!hrtimer_is_queued(tmr) && + tsk->signal->it_real_incr.tv64 != 0) { + hrtimer_forward(tmr, tmr->base->get_time(), + tsk->signal->it_real_incr); + hrtimer_restart(tmr); + } + } + } + + recalc_sigpending(); + if (!signr) + return 0; + + if (unlikely(sig_kernel_stop(signr))) { + /* + * Set a marker that we have dequeued a stop signal. Our + * caller might release the siglock and then the pending + * stop signal it is about to process is no longer in the + * pending bitmasks, but must still be cleared by a SIGCONT + * (and overruled by a SIGKILL). So those cases clear this + * shared flag after we've set it. Note that this flag may + * remain set after the signal we return is ignored or + * handled. That doesn't matter because its only purpose + * is to alert stop-signal processing code when another + * processor has come along and cleared the flag. + */ + tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; + } + if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { /* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave @@ -507,15 +560,15 @@ void signal_wake_up(struct task_struct *t, int resume) set_tsk_thread_flag(t, TIF_SIGPENDING); /* - * For SIGKILL, we want to wake it up in the stopped/traced case. - * We don't check t->state here because there is a race with it + * For SIGKILL, we want to wake it up in the stopped/traced/killable + * case. We don't check t->state here because there is a race with it * executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ mask = TASK_INTERRUPTIBLE; if (resume) - mask |= TASK_STOPPED | TASK_TRACED; + mask |= TASK_WAKEKILL; if (!wake_up_state(t, mask)) kick_process(t); } @@ -571,90 +624,100 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) return 1; } +static inline int is_si_special(const struct siginfo *info) +{ + return info <= SEND_SIG_FORCED; +} + +static inline bool si_fromuser(const struct siginfo *info) +{ + return info == SEND_SIG_NOINFO || + (!is_si_special(info) && SI_FROMUSER(info)); +} + /* * Bad permissions for sending the signal + * - the caller must hold at least the RCU read lock */ static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { - int error = -EINVAL; + const struct cred *cred = current_cred(), *tcred; + struct pid *sid; + int error; + if (!valid_signal(sig)) - return error; - error = -EPERM; - if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) - && ((sig != SIGCONT) || - (current->signal->session != t->signal->session)) - && (current->euid ^ t->suid) && (current->euid ^ t->uid) - && (current->uid ^ t->suid) && (current->uid ^ t->uid) - && !capable(CAP_KILL)) + return -EINVAL; + + if (!si_fromuser(info)) + return 0; + + error = audit_signal_info(sig, t); /* Let audit system see the signal */ + if (error) return error; - error = security_task_kill(t, info, sig, 0); - if (!error) - audit_signal_info(sig, t); /* Let audit system see the signal */ - return error; -} + tcred = __task_cred(t); + if ((cred->euid ^ tcred->suid) && + (cred->euid ^ tcred->uid) && + (cred->uid ^ tcred->suid) && + (cred->uid ^ tcred->uid) && + !capable(CAP_KILL)) { + switch (sig) { + case SIGCONT: + sid = task_session(t); + /* + * We don't return the error if sid == NULL. The + * task was unhashed, the caller must notice this. + */ + if (!sid || sid == task_session(current)) + break; + default: + return -EPERM; + } + } -/* forward decl */ -static void do_notify_parent_cldstop(struct task_struct *tsk, int why); + return security_task_kill(t, info, sig, 0); +} /* - * Handle magic process-wide effects of stop/continue signals. - * Unlike the signal actions, these happen immediately at signal-generation + * Handle magic process-wide effects of stop/continue signals. Unlike + * the signal actions, these happen immediately at signal-generation * time regardless of blocking, ignoring, or handling. This does the * actual continuing for SIGCONT, but not the actual stopping for stop - * signals. The process stop is done as a signal action for SIG_DFL. + * signals. The process stop is done as a signal action for SIG_DFL. + * + * Returns true if the signal should be actually delivered, otherwise + * it should be dropped. */ -static void handle_stop_signal(int sig, struct task_struct *p) +static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) { + struct signal_struct *signal = p->signal; struct task_struct *t; - if (p->signal->flags & SIGNAL_GROUP_EXIT) + if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { /* - * The process is in the middle of dying already. + * The process is in the middle of dying, nothing to do. */ - return; - - if (sig_kernel_stop(sig)) { + } else if (sig_kernel_stop(sig)) { /* * This is a stop signal. Remove SIGCONT from all queues. */ - rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); + rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); t = p; do { rm_from_queue(sigmask(SIGCONT), &t->pending); - t = next_thread(t); - } while (t != p); + } while_each_thread(p, t); } else if (sig == SIGCONT) { + unsigned int why; /* * Remove all stop signals from all queues, * and wake all threads. */ - if (unlikely(p->signal->group_stop_count > 0)) { - /* - * There was a group stop in progress. We'll - * pretend it finished before we got here. We are - * obliged to report it to the parent: if the - * SIGSTOP happened "after" this SIGCONT, then it - * would have cleared this pending SIGCONT. If it - * happened "before" this SIGCONT, then the parent - * got the SIGCHLD about the stop finishing before - * the continue happened. We do the notification - * now, and it's as if the stop had finished and - * the SIGCHLD was pending on entry to this kill. - */ - p->signal->group_stop_count = 0; - p->signal->flags = SIGNAL_STOP_CONTINUED; - spin_unlock(&p->sighand->siglock); - do_notify_parent_cldstop(p, CLD_STOPPED); - spin_lock(&p->sighand->siglock); - } - rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); + rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); t = p; do { unsigned int state; rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); - /* * If there is a handler for SIGCONT, we must make * sure that no thread returns to user mode before @@ -664,133 +727,319 @@ static void handle_stop_signal(int sig, struct task_struct *p) * running the handler. With the TIF_SIGPENDING * flag set, the thread will pause and acquire the * siglock that we hold now and until we've queued - * the pending signal. + * the pending signal. * * Wake up the stopped thread _after_ setting * TIF_SIGPENDING */ - state = TASK_STOPPED; + state = __TASK_STOPPED; if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { set_tsk_thread_flag(t, TIF_SIGPENDING); state |= TASK_INTERRUPTIBLE; } wake_up_state(t, state); + } while_each_thread(p, t); - t = next_thread(t); - } while (t != p); + /* + * Notify the parent with CLD_CONTINUED if we were stopped. + * + * If we were in the middle of a group stop, we pretend it + * was already finished, and then continued. Since SIGCHLD + * doesn't queue we report only CLD_STOPPED, as if the next + * CLD_CONTINUED was dropped. + */ + why = 0; + if (signal->flags & SIGNAL_STOP_STOPPED) + why |= SIGNAL_CLD_CONTINUED; + else if (signal->group_stop_count) + why |= SIGNAL_CLD_STOPPED; - if (p->signal->flags & SIGNAL_STOP_STOPPED) { + if (why) { /* - * We were in fact stopped, and are now continued. - * Notify the parent with CLD_CONTINUED. + * The first thread which returns from do_signal_stop() + * will take ->siglock, notice SIGNAL_CLD_MASK, and + * notify its parent. See get_signal_to_deliver(). */ - p->signal->flags = SIGNAL_STOP_CONTINUED; - p->signal->group_exit_code = 0; - spin_unlock(&p->sighand->siglock); - do_notify_parent_cldstop(p, CLD_CONTINUED); - spin_lock(&p->sighand->siglock); + signal->flags = why | SIGNAL_STOP_CONTINUED; + signal->group_stop_count = 0; + signal->group_exit_code = 0; } else { /* * We are not stopped, but there could be a stop * signal in the middle of being processed after * being removed from the queue. Clear that too. */ - p->signal->flags = 0; + signal->flags &= ~SIGNAL_STOP_DEQUEUED; } - } else if (sig == SIGKILL) { - /* - * Make sure that any pending stop signal already dequeued - * is undone by the wakeup for SIGKILL. - */ - p->signal->flags = 0; } + + return !sig_ignored(p, sig, from_ancestor_ns); } -static int send_signal(int sig, struct siginfo *info, struct task_struct *t, - struct sigpending *signals) +/* + * Test if P wants to take SIG. After we've checked all threads with this, + * it's equivalent to finding no threads not blocking SIG. Any threads not + * blocking SIG were ruled out because they are not running and already + * have pending signals. Such threads will dequeue from the shared queue + * as soon as they're available, so putting the signal on the shared queue + * will be equivalent to sending it to one such thread. + */ +static inline int wants_signal(int sig, struct task_struct *p) +{ + if (sigismember(&p->blocked, sig)) + return 0; + if (p->flags & PF_EXITING) + return 0; + if (sig == SIGKILL) + return 1; + if (task_is_stopped_or_traced(p)) + return 0; + return task_curr(p) || !signal_pending(p); +} + +static void complete_signal(int sig, struct task_struct *p, int group) { - struct sigqueue * q = NULL; - int ret = 0; + struct signal_struct *signal = p->signal; + struct task_struct *t; /* - * fast-pathed signals for kernel-internal things like SIGSTOP - * or SIGKILL. + * Now find a thread we can wake up to take the signal off the queue. + * + * If the main thread wants the signal, it gets first crack. + * Probably the least surprising to the average bear. */ - if (info == SEND_SIG_FORCED) - goto out_set; - - /* Real-time signals must be queued if sent by sigqueue, or - some other real-time mechanism. It is implementation - defined whether kill() does so. We attempt to do so, on - the principle of least surprise, but since kill is not - allowed to fail with EAGAIN when low on memory we just - make sure at least one signal gets delivered and don't - pass on the info struct. */ - - q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - (is_si_special(info) || - info->si_code >= 0))); - if (q) { - list_add_tail(&q->list, &signals->list); - switch ((unsigned long) info) { - case (unsigned long) SEND_SIG_NOINFO: - q->info.si_signo = sig; - q->info.si_errno = 0; - q->info.si_code = SI_USER; - q->info.si_pid = current->pid; - q->info.si_uid = current->uid; - break; - case (unsigned long) SEND_SIG_PRIV: - q->info.si_signo = sig; - q->info.si_errno = 0; - q->info.si_code = SI_KERNEL; - q->info.si_pid = 0; - q->info.si_uid = 0; - break; - default: - copy_siginfo(&q->info, info); - break; - } - } else if (!is_si_special(info)) { - if (sig >= SIGRTMIN && info->si_code != SI_USER) + if (wants_signal(sig, p)) + t = p; + else if (!group || thread_group_empty(p)) /* - * Queue overflow, abort. We may abort if the signal was rt - * and sent by user using something other than kill(). + * There is just one thread and it does not need to be woken. + * It will dequeue unblocked signals before it runs again. */ - return -EAGAIN; + return; + else { + /* + * Otherwise try to find a suitable thread. + */ + t = signal->curr_target; + while (!wants_signal(sig, t)) { + t = next_thread(t); + if (t == signal->curr_target) + /* + * No thread needs to be woken. + * Any eligible threads will see + * the signal in the queue soon. + */ + return; + } + signal->curr_target = t; } -out_set: - sigaddset(&signals->signal, sig); - return ret; -} + /* + * Found a killable thread. If the signal will be fatal, + * then start taking the whole group down immediately. + */ + if (sig_fatal(p, sig) && + !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && + !sigismember(&t->real_blocked, sig) && + (sig == SIGKILL || + !tracehook_consider_fatal_signal(t, sig))) { + /* + * This signal will be fatal to the whole group. + */ + if (!sig_kernel_coredump(sig)) { + /* + * Start a group exit and wake everybody up. + * This way we don't have other threads + * running and doing things after a slower + * thread has the fatal signal pending. + */ + signal->flags = SIGNAL_GROUP_EXIT; + signal->group_exit_code = sig; + signal->group_stop_count = 0; + t = p; + do { + sigaddset(&t->pending.signal, SIGKILL); + signal_wake_up(t, 1); + } while_each_thread(p, t); + return; + } + } -#define LEGACY_QUEUE(sigptr, sig) \ - (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) + /* + * The signal is already in the shared-pending queue. + * Tell the chosen thread to wake up and dequeue it. + */ + signal_wake_up(t, sig == SIGKILL); + return; +} +static inline int legacy_queue(struct sigpending *signals, int sig) +{ + return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); +} -static int -specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) +static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, + int group, int from_ancestor_ns) { - int ret = 0; + struct sigpending *pending; + struct sigqueue *q; + int override_rlimit; + + trace_signal_generate(sig, info, t); - BUG_ON(!irqs_disabled()); assert_spin_locked(&t->sighand->siglock); - /* Short-circuit ignored signals. */ - if (sig_ignored(t, sig)) - goto out; + if (!prepare_signal(sig, t, from_ancestor_ns)) + return 0; - /* Support queueing exactly one non-rt signal, so that we - can get more detailed information about the cause of - the signal. */ - if (LEGACY_QUEUE(&t->pending, sig)) - goto out; + pending = group ? &t->signal->shared_pending : &t->pending; + /* + * Short-circuit ignored signals and support queuing + * exactly one non-rt signal, so that we can get more + * detailed information about the cause of the signal. + */ + if (legacy_queue(pending, sig)) + return 0; + /* + * fast-pathed signals for kernel-internal things like SIGSTOP + * or SIGKILL. + */ + if (info == SEND_SIG_FORCED) + goto out_set; + + /* Real-time signals must be queued if sent by sigqueue, or + some other real-time mechanism. It is implementation + defined whether kill() does so. We attempt to do so, on + the principle of least surprise, but since kill is not + allowed to fail with EAGAIN when low on memory we just + make sure at least one signal gets delivered and don't + pass on the info struct. */ + + if (sig < SIGRTMIN) + override_rlimit = (is_si_special(info) || info->si_code >= 0); + else + override_rlimit = 0; + + q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, + override_rlimit); + if (q) { + list_add_tail(&q->list, &pending->list); + switch ((unsigned long) info) { + case (unsigned long) SEND_SIG_NOINFO: + q->info.si_signo = sig; + q->info.si_errno = 0; + q->info.si_code = SI_USER; + q->info.si_pid = task_tgid_nr_ns(current, + task_active_pid_ns(t)); + q->info.si_uid = current_uid(); + break; + case (unsigned long) SEND_SIG_PRIV: + q->info.si_signo = sig; + q->info.si_errno = 0; + q->info.si_code = SI_KERNEL; + q->info.si_pid = 0; + q->info.si_uid = 0; + break; + default: + copy_siginfo(&q->info, info); + if (from_ancestor_ns) + q->info.si_pid = 0; + break; + } + } else if (!is_si_special(info)) { + if (sig >= SIGRTMIN && info->si_code != SI_USER) { + /* + * Queue overflow, abort. We may abort if the + * signal was rt and sent by user using something + * other than kill(). + */ + trace_signal_overflow_fail(sig, group, info); + return -EAGAIN; + } else { + /* + * This is a silent loss of information. We still + * send the signal, but the *info bits are lost. + */ + trace_signal_lose_info(sig, group, info); + } + } + +out_set: + signalfd_notify(t, sig); + sigaddset(&pending->signal, sig); + complete_signal(sig, t, group); + return 0; +} + +static int send_signal(int sig, struct siginfo *info, struct task_struct *t, + int group) +{ + int from_ancestor_ns = 0; + +#ifdef CONFIG_PID_NS + from_ancestor_ns = si_fromuser(info) && + !task_pid_nr_ns(current, task_active_pid_ns(t)); +#endif + + return __send_signal(sig, info, t, group, from_ancestor_ns); +} + +static void print_fatal_signal(struct pt_regs *regs, int signr) +{ + printk("%s/%d: potentially unexpected fatal signal %d.\n", + current->comm, task_pid_nr(current), signr); + +#if defined(__i386__) && !defined(__arch_um__) + printk("code at %08lx: ", regs->ip); + { + int i; + for (i = 0; i < 16; i++) { + unsigned char insn; + + if (get_user(insn, (unsigned char *)(regs->ip + i))) + break; + printk("%02x ", insn); + } + } +#endif + printk("\n"); + preempt_disable(); + show_regs(regs); + preempt_enable(); +} + +static int __init setup_print_fatal_signals(char *str) +{ + get_option (&str, &print_fatal_signals); + + return 1; +} + +__setup("print-fatal-signals=", setup_print_fatal_signals); + +int +__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +{ + return send_signal(sig, info, p, 1); +} + +static int +specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) +{ + return send_signal(sig, info, t, 0); +} + +int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, + bool group) +{ + unsigned long flags; + int ret = -ESRCH; + + if (lock_task_sighand(p, &flags)) { + ret = send_signal(sig, info, p, group); + unlock_task_sighand(p, &flags); + } - ret = send_signal(sig, info, t, &t->pending); - if (!ret && !sigismember(&t->blocked, sig)) - signal_wake_up(t, sig == SIGKILL); -out: return ret; } @@ -802,7 +1051,8 @@ out: * since we do not want to have a signal handler that was blocked * be invoked when user space had explicitly blocked it. * - * We don't want to have recursive SIGSEGV's etc, for example. + * We don't want to have recursive SIGSEGV's etc, for example, + * that is why we also clear SIGNAL_UNKILLABLE. */ int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) @@ -819,173 +1069,17 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) action->sa.sa_handler = SIG_DFL; if (blocked) { sigdelset(&t->blocked, sig); - recalc_sigpending_tsk(t); + recalc_sigpending_and_wake(t); } } + if (action->sa.sa_handler == SIG_DFL) + t->signal->flags &= ~SIGNAL_UNKILLABLE; ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sighand->siglock, flags); return ret; } -void -force_sig_specific(int sig, struct task_struct *t) -{ - force_sig_info(sig, SEND_SIG_FORCED, t); -} - -/* - * Test if P wants to take SIG. After we've checked all threads with this, - * it's equivalent to finding no threads not blocking SIG. Any threads not - * blocking SIG were ruled out because they are not running and already - * have pending signals. Such threads will dequeue from the shared queue - * as soon as they're available, so putting the signal on the shared queue - * will be equivalent to sending it to one such thread. - */ -static inline int wants_signal(int sig, struct task_struct *p) -{ - if (sigismember(&p->blocked, sig)) - return 0; - if (p->flags & PF_EXITING) - return 0; - if (sig == SIGKILL) - return 1; - if (p->state & (TASK_STOPPED | TASK_TRACED)) - return 0; - return task_curr(p) || !signal_pending(p); -} - -static void -__group_complete_signal(int sig, struct task_struct *p) -{ - struct task_struct *t; - - /* - * Now find a thread we can wake up to take the signal off the queue. - * - * If the main thread wants the signal, it gets first crack. - * Probably the least surprising to the average bear. - */ - if (wants_signal(sig, p)) - t = p; - else if (thread_group_empty(p)) - /* - * There is just one thread and it does not need to be woken. - * It will dequeue unblocked signals before it runs again. - */ - return; - else { - /* - * Otherwise try to find a suitable thread. - */ - t = p->signal->curr_target; - if (t == NULL) - /* restart balancing at this thread */ - t = p->signal->curr_target = p; - - while (!wants_signal(sig, t)) { - t = next_thread(t); - if (t == p->signal->curr_target) - /* - * No thread needs to be woken. - * Any eligible threads will see - * the signal in the queue soon. - */ - return; - } - p->signal->curr_target = t; - } - - /* - * Found a killable thread. If the signal will be fatal, - * then start taking the whole group down immediately. - */ - if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && - !sigismember(&t->real_blocked, sig) && - (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { - /* - * This signal will be fatal to the whole group. - */ - if (!sig_kernel_coredump(sig)) { - /* - * Start a group exit and wake everybody up. - * This way we don't have other threads - * running and doing things after a slower - * thread has the fatal signal pending. - */ - p->signal->flags = SIGNAL_GROUP_EXIT; - p->signal->group_exit_code = sig; - p->signal->group_stop_count = 0; - t = p; - do { - sigaddset(&t->pending.signal, SIGKILL); - signal_wake_up(t, 1); - t = next_thread(t); - } while (t != p); - return; - } - - /* - * There will be a core dump. We make all threads other - * than the chosen one go into a group stop so that nothing - * happens until it gets scheduled, takes the signal off - * the shared queue, and does the core dump. This is a - * little more complicated than strictly necessary, but it - * keeps the signal state that winds up in the core dump - * unchanged from the death state, e.g. which thread had - * the core-dump signal unblocked. - */ - rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); - rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); - p->signal->group_stop_count = 0; - p->signal->group_exit_task = t; - t = p; - do { - p->signal->group_stop_count++; - signal_wake_up(t, 0); - t = next_thread(t); - } while (t != p); - wake_up_process(p->signal->group_exit_task); - return; - } - - /* - * The signal is already in the shared-pending queue. - * Tell the chosen thread to wake up and dequeue it. - */ - signal_wake_up(t, sig == SIGKILL); - return; -} - -int -__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) -{ - int ret = 0; - - assert_spin_locked(&p->sighand->siglock); - handle_stop_signal(sig, p); - - /* Short-circuit ignored signals. */ - if (sig_ignored(p, sig)) - return ret; - - if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) - /* This is a non-RT signal and we already have one queued. */ - return ret; - - /* - * Put this signal on the shared-pending queue, or fail with EAGAIN. - * We always use the shared queue for process-wide signals, - * to avoid several races. - */ - ret = send_signal(sig, info, p, &p->signal->shared_pending); - if (unlikely(ret)) - return ret; - - __group_complete_signal(sig, p); - return 0; -} - /* * Nuke all other threads in the group. */ @@ -993,12 +1087,8 @@ void zap_other_threads(struct task_struct *p) { struct task_struct *t; - p->signal->flags = SIGNAL_GROUP_EXIT; p->signal->group_stop_count = 0; - if (thread_group_empty(p)) - return; - for (t = next_thread(p); t != p; t = next_thread(t)) { /* * Don't bother with already dead threads @@ -1006,30 +1096,17 @@ void zap_other_threads(struct task_struct *p) if (t->exit_state) continue; - /* - * We don't want to notify the parent, since we are - * killed as part of a thread group due to another - * thread doing an execve() or similar. So set the - * exit signal to -1 to allow immediate reaping of - * the process. But don't detach the thread group - * leader. - */ - if (t != p->group_leader) - t->exit_signal = -1; - /* SIGKILL will be handled before any pending SIGSTOP */ sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); } } -/* - * Must be called under rcu_read_lock() or with tasklist_lock read-held. - */ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) { struct sighand_struct *sighand; + rcu_read_lock(); for (;;) { sighand = rcu_dereference(tsk->sighand); if (unlikely(sighand == NULL)) @@ -1040,33 +1117,30 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long break; spin_unlock_irqrestore(&sighand->siglock, *flags); } + rcu_read_unlock(); return sighand; } +/* + * send signal info to all the members of a group + * - the caller must hold the RCU read lock at least + */ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { - unsigned long flags; - int ret; + int ret = check_kill_permission(sig, info, p); - ret = check_kill_permission(sig, info, p); - - if (!ret && sig) { - ret = -ESRCH; - if (lock_task_sighand(p, &flags)) { - ret = __group_send_sig_info(sig, info, p); - unlock_task_sighand(p, &flags); - } - } + if (!ret && sig) + ret = do_send_sig_info(sig, info, p, true); return ret; } /* - * kill_pgrp_info() sends a signal to a process group: this is what the tty + * __kill_pgrp_info() sends a signal to a process group: this is what the tty * control characters do (^C, ^Z etc) + * - the caller must hold at least a readlock on tasklist_lock */ - int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) { struct task_struct *p = NULL; @@ -1082,63 +1156,36 @@ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) return success ? 0 : retval; } -int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) -{ - int retval; - - read_lock(&tasklist_lock); - retval = __kill_pgrp_info(sig, info, pgrp); - read_unlock(&tasklist_lock); - - return retval; -} - -int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) -{ - if (pgrp <= 0) - return -EINVAL; - - return __kill_pgrp_info(sig, info, find_pid(pgrp)); -} - -int -kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) -{ - int retval; - - read_lock(&tasklist_lock); - retval = __kill_pg_info(sig, info, pgrp); - read_unlock(&tasklist_lock); - - return retval; -} - int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) { - int error; - int acquired_tasklist_lock = 0; + int error = -ESRCH; struct task_struct *p; rcu_read_lock(); - if (unlikely(sig_needs_tasklist(sig))) { - read_lock(&tasklist_lock); - acquired_tasklist_lock = 1; - } +retry: p = pid_task(pid, PIDTYPE_PID); - error = -ESRCH; - if (p) + if (p) { error = group_send_sig_info(sig, info, p); - if (unlikely(acquired_tasklist_lock)) - read_unlock(&tasklist_lock); + if (unlikely(error == -ESRCH)) + /* + * The task was unhashed in between, try again. + * If it is dead, pid_task() will return NULL, + * if we race with de_thread() it will find the + * new leader. + */ + goto retry; + } rcu_read_unlock(); + return error; } -static int kill_proc_info(int sig, struct siginfo *info, pid_t pid) +int +kill_proc_info(int sig, struct siginfo *info, pid_t pid) { int error; rcu_read_lock(); - error = kill_pid_info(sig, info, find_pid(pid)); + error = kill_pid_info(sig, info, find_vpid(pid)); rcu_read_unlock(); return error; } @@ -1149,33 +1196,38 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, { int ret = -EINVAL; struct task_struct *p; + const struct cred *pcred; + unsigned long flags; if (!valid_signal(sig)) return ret; - read_lock(&tasklist_lock); + rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (!p) { ret = -ESRCH; goto out_unlock; } - if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) - && (euid != p->suid) && (euid != p->uid) - && (uid != p->suid) && (uid != p->uid)) { + pcred = __task_cred(p); + if (si_fromuser(info) && + euid != pcred->suid && euid != pcred->uid && + uid != pcred->suid && uid != pcred->uid) { ret = -EPERM; goto out_unlock; } ret = security_task_kill(p, info, sig, secid); if (ret) goto out_unlock; - if (sig && p->sighand) { - unsigned long flags; - spin_lock_irqsave(&p->sighand->siglock, flags); - ret = __group_send_sig_info(sig, info, p); - spin_unlock_irqrestore(&p->sighand->siglock, flags); + + if (sig) { + if (lock_task_sighand(p, &flags)) { + ret = __send_signal(sig, info, p, 1, 0); + unlock_task_sighand(p, &flags); + } else + ret = -ESRCH; } out_unlock: - read_unlock(&tasklist_lock); + rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); @@ -1187,46 +1239,48 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); * is probably wrong. Should make it like BSD or SYSV. */ -static int kill_something_info(int sig, struct siginfo *info, int pid) +static int kill_something_info(int sig, struct siginfo *info, pid_t pid) { - if (!pid) { - return kill_pg_info(sig, info, process_group(current)); - } else if (pid == -1) { + int ret; + + if (pid > 0) { + rcu_read_lock(); + ret = kill_pid_info(sig, info, find_vpid(pid)); + rcu_read_unlock(); + return ret; + } + + read_lock(&tasklist_lock); + if (pid != -1) { + ret = __kill_pgrp_info(sig, info, + pid ? find_vpid(-pid) : task_pgrp(current)); + } else { int retval = 0, count = 0; struct task_struct * p; - read_lock(&tasklist_lock); for_each_process(p) { - if (p->pid > 1 && p->tgid != current->tgid) { + if (task_pid_vnr(p) > 1 && + !same_thread_group(p, current)) { int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) retval = err; } } - read_unlock(&tasklist_lock); - return count ? retval : -ESRCH; - } else if (pid < 0) { - return kill_pg_info(sig, info, -pid); - } else { - return kill_proc_info(sig, info, pid); + ret = count ? retval : -ESRCH; } + read_unlock(&tasklist_lock); + + return ret; } /* * These are for backward compatibility with the rest of the kernel source. */ -/* - * These two are the most common entry points. They send a signal - * just to the specific thread. - */ int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { - int ret; - unsigned long flags; - /* * Make sure legacy kernel users don't send in bad values * (normal paths check this in check_kill_permission). @@ -1234,18 +1288,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p) if (!valid_signal(sig)) return -EINVAL; - /* - * We need the tasklist lock even for the specific - * thread case (when we don't need to follow the group - * lists) in order to avoid races with "p->sighand" - * going away or changing from under us. - */ - read_lock(&tasklist_lock); - spin_lock_irqsave(&p->sighand->siglock, flags); - ret = specific_send_sig_info(sig, info, p); - spin_unlock_irqrestore(&p->sighand->siglock, flags); - read_unlock(&tasklist_lock); - return ret; + return do_send_sig_info(sig, info, p, false); } #define __si_special(priv) \ @@ -1257,20 +1300,6 @@ send_sig(int sig, struct task_struct *p, int priv) return send_sig_info(sig, __si_special(priv), p); } -/* - * This is the entry point for "process-wide" signals. - * They will go to an appropriate thread in the thread group. - */ -int -send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) -{ - int ret; - read_lock(&tasklist_lock); - ret = group_send_sig_info(sig, info, p); - read_unlock(&tasklist_lock); - return ret; -} - void force_sig(int sig, struct task_struct *p) { @@ -1298,201 +1327,151 @@ force_sigsegv(int sig, struct task_struct *p) int kill_pgrp(struct pid *pid, int sig, int priv) { - return kill_pgrp_info(sig, __si_special(priv), pid); -} -EXPORT_SYMBOL(kill_pgrp); + int ret; -int kill_pid(struct pid *pid, int sig, int priv) -{ - return kill_pid_info(sig, __si_special(priv), pid); -} -EXPORT_SYMBOL(kill_pid); + read_lock(&tasklist_lock); + ret = __kill_pgrp_info(sig, __si_special(priv), pid); + read_unlock(&tasklist_lock); -int -kill_pg(pid_t pgrp, int sig, int priv) -{ - return kill_pg_info(sig, __si_special(priv), pgrp); + return ret; } +EXPORT_SYMBOL(kill_pgrp); -int -kill_proc(pid_t pid, int sig, int priv) +int kill_pid(struct pid *pid, int sig, int priv) { - return kill_proc_info(sig, __si_special(priv), pid); + return kill_pid_info(sig, __si_special(priv), pid); } +EXPORT_SYMBOL(kill_pid); /* * These functions support sending signals using preallocated sigqueue * structures. This is needed "because realtime applications cannot * afford to lose notifications of asynchronous events, like timer - * expirations or I/O completions". In the case of Posix Timers + * expirations or I/O completions". In the case of Posix Timers * we allocate the sigqueue structure from the timer_create. If this * allocation fails we are able to report the failure to the application * with an EAGAIN error. */ - struct sigqueue *sigqueue_alloc(void) { - struct sigqueue *q; + struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); - if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) + if (q) q->flags |= SIGQUEUE_PREALLOC; - return(q); + + return q; } void sigqueue_free(struct sigqueue *q) { unsigned long flags; + spinlock_t *lock = ¤t->sighand->siglock; + BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); /* - * If the signal is still pending remove it from the - * pending queue. + * We must hold ->siglock while testing q->list + * to serialize with collect_signal() or with + * __exit_signal()->flush_sigqueue(). */ - if (unlikely(!list_empty(&q->list))) { - spinlock_t *lock = ¤t->sighand->siglock; - read_lock(&tasklist_lock); - spin_lock_irqsave(lock, flags); - if (!list_empty(&q->list)) - list_del_init(&q->list); - spin_unlock_irqrestore(lock, flags); - read_unlock(&tasklist_lock); - } + spin_lock_irqsave(lock, flags); q->flags &= ~SIGQUEUE_PREALLOC; - __sigqueue_free(q); -} - -int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) -{ - unsigned long flags; - int ret = 0; - - BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); - /* - * The rcu based delayed sighand destroy makes it possible to - * run this without tasklist lock held. The task struct itself - * cannot go away as create_timer did get_task_struct(). - * - * We return -1, when the task is marked exiting, so - * posix_timer_event can redirect it to the group leader + * If it is queued it will be freed when dequeued, + * like the "regular" sigqueue. */ - rcu_read_lock(); - - if (!likely(lock_task_sighand(p, &flags))) { - ret = -1; - goto out_err; - } - - if (unlikely(!list_empty(&q->list))) { - /* - * If an SI_TIMER entry is already queue just increment - * the overrun count. - */ - BUG_ON(q->info.si_code != SI_TIMER); - q->info.si_overrun++; - goto out; - } - /* Short-circuit ignored signals. */ - if (sig_ignored(p, sig)) { - ret = 1; - goto out; - } - - list_add_tail(&q->list, &p->pending.list); - sigaddset(&p->pending.signal, sig); - if (!sigismember(&p->blocked, sig)) - signal_wake_up(p, sig == SIGKILL); - -out: - unlock_task_sighand(p, &flags); -out_err: - rcu_read_unlock(); + if (!list_empty(&q->list)) + q = NULL; + spin_unlock_irqrestore(lock, flags); - return ret; + if (q) + __sigqueue_free(q); } -int -send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) +int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) { + int sig = q->info.si_signo; + struct sigpending *pending; unsigned long flags; - int ret = 0; + int ret; BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); - read_lock(&tasklist_lock); - /* Since it_lock is held, p->sighand cannot be NULL. */ - spin_lock_irqsave(&p->sighand->siglock, flags); - handle_stop_signal(sig, p); + ret = -1; + if (!likely(lock_task_sighand(t, &flags))) + goto ret; - /* Short-circuit ignored signals. */ - if (sig_ignored(p, sig)) { - ret = 1; + ret = 1; /* the signal is ignored */ + if (!prepare_signal(sig, t, 0)) goto out; - } + ret = 0; if (unlikely(!list_empty(&q->list))) { /* * If an SI_TIMER entry is already queue just increment - * the overrun count. Other uses should not try to - * send the signal multiple times. + * the overrun count. */ BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; goto out; - } - - /* - * Put this signal on the shared-pending queue. - * We always use the shared queue for process-wide signals, - * to avoid several races. - */ - list_add_tail(&q->list, &p->signal->shared_pending.list); - sigaddset(&p->signal->shared_pending.signal, sig); + } + q->info.si_overrun = 0; - __group_complete_signal(sig, p); + signalfd_notify(t, sig); + pending = group ? &t->signal->shared_pending : &t->pending; + list_add_tail(&q->list, &pending->list); + sigaddset(&pending->signal, sig); + complete_signal(sig, t, group); out: - spin_unlock_irqrestore(&p->sighand->siglock, flags); - read_unlock(&tasklist_lock); + unlock_task_sighand(t, &flags); +ret: return ret; } /* - * Wake up any threads in the parent blocked in wait* syscalls. - */ -static inline void __wake_up_parent(struct task_struct *p, - struct task_struct *parent) -{ - wake_up_interruptible_sync(&parent->signal->wait_chldexit); -} - -/* * Let a parent know about the death of a child. * For a stopped/continued status change, use do_notify_parent_cldstop instead. + * + * Returns -1 if our parent ignored us and so we've switched to + * self-reaping, or else @sig. */ - -void do_notify_parent(struct task_struct *tsk, int sig) +int do_notify_parent(struct task_struct *tsk, int sig) { struct siginfo info; unsigned long flags; struct sighand_struct *psig; + int ret = sig; BUG_ON(sig == -1); /* do_notify_parent_cldstop should have been called instead. */ - BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); + BUG_ON(task_is_stopped_or_traced(tsk)); - BUG_ON(!tsk->ptrace && + BUG_ON(!task_ptrace(tsk) && (tsk->group_leader != tsk || !thread_group_empty(tsk))); info.si_signo = sig; info.si_errno = 0; - info.si_pid = tsk->pid; - info.si_uid = tsk->uid; + /* + * we are under tasklist_lock here so our parent is tied to + * us and cannot exit and release its namespace. + * + * the only it can is to switch its nsproxy with sys_unshare, + * bu uncharing pid namespaces is not allowed, so we'll always + * see relevant namespace + * + * write_lock() currently calls preempt_disable() which is the + * same as rcu_read_lock(), but according to Oleg, this is not + * correct to rely on this + */ + rcu_read_lock(); + info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + info.si_uid = __task_cred(tsk)->uid; + rcu_read_unlock(); - /* FIXME: find out whether or not this is supposed to be c*time. */ - info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, - tsk->signal->utime)); - info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, - tsk->signal->stime)); + info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, + tsk->signal->utime)); + info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, + tsk->signal->stime)); info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) @@ -1506,7 +1485,7 @@ void do_notify_parent(struct task_struct *tsk, int sig) psig = tsk->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); - if (!tsk->ptrace && sig == SIGCHLD && + if (!task_ptrace(tsk) && sig == SIGCHLD && (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { /* @@ -1524,14 +1503,16 @@ void do_notify_parent(struct task_struct *tsk, int sig) * is implementation-defined: we do (if you don't want * it, just use SIG_IGN instead). */ - tsk->exit_signal = -1; + ret = tsk->exit_signal = -1; if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) - sig = 0; + sig = -1; } if (valid_signal(sig) && sig > 0) __group_send_sig_info(sig, &info, tsk->parent); __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); + + return ret; } static void do_notify_parent_cldstop(struct task_struct *tsk, int why) @@ -1541,7 +1522,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) struct task_struct *parent; struct sighand_struct *sighand; - if (tsk->ptrace & PT_PTRACED) + if (task_ptrace(tsk)) parent = tsk->parent; else { tsk = tsk->group_leader; @@ -1550,12 +1531,16 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) info.si_signo = SIGCHLD; info.si_errno = 0; - info.si_pid = tsk->pid; - info.si_uid = tsk->uid; + /* + * see comment in do_notify_parent() abot the following 3 lines + */ + rcu_read_lock(); + info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); + info.si_uid = __task_cred(tsk)->uid; + rcu_read_unlock(); - /* FIXME: find out whether or not this is supposed to be c*time. */ - info.si_utime = cputime_to_jiffies(tsk->utime); - info.si_stime = cputime_to_jiffies(tsk->stime); + info.si_utime = cputime_to_clock_t(tsk->utime); + info.si_stime = cputime_to_clock_t(tsk->stime); info.si_code = why; switch (why) { @@ -1586,27 +1571,18 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) static inline int may_ptrace_stop(void) { - if (!likely(current->ptrace & PT_PTRACED)) - return 0; - - if (unlikely(current->parent == current->real_parent && - (current->ptrace & PT_ATTACHED))) - return 0; - - if (unlikely(current->signal == current->parent->signal) && - unlikely(current->signal->flags & SIGNAL_GROUP_EXIT)) + if (!likely(task_ptrace(current))) return 0; - /* * Are we in the middle of do_coredump? * If so and our tracer is also part of the coredump stopping * is a deadlock situation, and pointless because our tracer * is dead so don't allow us to stop. * If SIGKILL was already sent before the caller unlocked - * ->siglock we must see ->core_waiters != 0. Otherwise it + * ->siglock we must see ->core_state != NULL. Otherwise it * is safe to enter schedule(). */ - if (unlikely(current->mm->core_waiters) && + if (unlikely(current->mm->core_state) && unlikely(current->mm == current->parent->mm)) return 0; @@ -1614,6 +1590,16 @@ static inline int may_ptrace_stop(void) } /* + * Return nonzero if there is a SIGKILL that should be waking us up. + * Called with the siglock held. + */ +static int sigkill_pending(struct task_struct *tsk) +{ + return sigismember(&tsk->pending.signal, SIGKILL) || + sigismember(&tsk->signal->shared_pending.signal, SIGKILL); +} + +/* * This must be called with current->sighand->siglock held. * * This should be the path for all ptrace stops. @@ -1621,11 +1607,30 @@ static inline int may_ptrace_stop(void) * That makes it a way to test a stopped process for * being ptrace-stopped vs being job-control-stopped. * - * If we actually decide not to stop at all because the tracer is gone, - * we leave nostop_code in current->exit_code. + * If we actually decide not to stop at all because the tracer + * is gone, we keep current->exit_code unless clear_code. */ -static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) +static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) { + if (arch_ptrace_stop_needed(exit_code, info)) { + /* + * The arch code has something special to do before a + * ptrace stop. This is allowed to block, e.g. for faults + * on user stack pages. We can't keep the siglock while + * calling arch_ptrace_stop, so we must release it now. + * To preserve proper semantics, we must do this before + * any signal bookkeeping like checking group_stop_count. + * Meanwhile, a SIGKILL could come in before we retake the + * siglock. That must prevent us from sleeping in TASK_TRACED. + * So after regaining the lock, we must check for SIGKILL. + */ + spin_unlock_irq(¤t->sighand->siglock); + arch_ptrace_stop(exit_code, info); + spin_lock_irq(¤t->sighand->siglock); + if (sigkill_pending(current)) + return; + } + /* * If there is a group stop in progress, * we must participate in the bookkeeping. @@ -1637,25 +1642,40 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) current->exit_code = exit_code; /* Let the debugger run. */ - set_current_state(TASK_TRACED); + __set_current_state(TASK_TRACED); spin_unlock_irq(¤t->sighand->siglock); - try_to_freeze(); read_lock(&tasklist_lock); if (may_ptrace_stop()) { do_notify_parent_cldstop(current, CLD_TRAPPED); + /* + * Don't want to allow preemption here, because + * sys_ptrace() needs this task to be inactive. + * + * XXX: implement read_unlock_no_resched(). + */ + preempt_disable(); read_unlock(&tasklist_lock); + preempt_enable_no_resched(); schedule(); } else { /* * By the time we got the lock, our tracer went away. - * Don't stop here. + * Don't drop the lock yet, another tracer may come. */ + __set_current_state(TASK_RUNNING); + if (clear_code) + current->exit_code = 0; read_unlock(&tasklist_lock); - set_current_state(TASK_RUNNING); - current->exit_code = nostop_code; } /* + * While in TASK_TRACED, we were considered "frozen enough". + * Now that we woke up, it's crucial if we're supposed to be + * frozen that we freeze now before running anything substantial. + */ + try_to_freeze(); + + /* * We are back. Now reacquire the siglock before touching * last_siginfo, so that we are sure to have synchronized with * any signal-sending on another CPU that wants to examine it. @@ -1666,8 +1686,9 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) /* * Queued signals ignored us while we were stopped for tracing. * So check for any that we should take before resuming user mode. + * This sets TIF_SIGPENDING, but never clears it. */ - recalc_sigpending(); + recalc_sigpending_tsk(current); } void ptrace_notify(int exit_code) @@ -1679,36 +1700,15 @@ void ptrace_notify(int exit_code) memset(&info, 0, sizeof info); info.si_signo = SIGTRAP; info.si_code = exit_code; - info.si_pid = current->pid; - info.si_uid = current->uid; + info.si_pid = task_pid_vnr(current); + info.si_uid = current_uid(); /* Let the debugger run. */ spin_lock_irq(¤t->sighand->siglock); - ptrace_stop(exit_code, 0, &info); + ptrace_stop(exit_code, 1, &info); spin_unlock_irq(¤t->sighand->siglock); } -static void -finish_stop(int stop_count) -{ - /* - * If there are no other threads in the group, or if there is - * a group stop in progress and we are the last to stop, - * report to the parent. When ptraced, every thread reports itself. - */ - if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { - read_lock(&tasklist_lock); - do_notify_parent_cldstop(current, CLD_STOPPED); - read_unlock(&tasklist_lock); - } - - schedule(); - /* - * Now we don't run again until continued. - */ - current->exit_code = 0; -} - /* * This performs the stopping for SIGSTOP and other stop signals. * We have to stop all threads in the thread group. @@ -1718,147 +1718,183 @@ finish_stop(int stop_count) static int do_signal_stop(int signr) { struct signal_struct *sig = current->signal; - int stop_count; + int notify; - if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) - return 0; + if (!sig->group_stop_count) { + struct task_struct *t; - if (sig->group_stop_count > 0) { - /* - * There is a group stop in progress. We don't need to - * start another one. - */ - stop_count = --sig->group_stop_count; - } else { + if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || + unlikely(signal_group_exit(sig))) + return 0; /* * There is no group stop already in progress. * We must initiate one now. */ - struct task_struct *t; - sig->group_exit_code = signr; - stop_count = 0; + sig->group_stop_count = 1; for (t = next_thread(current); t != current; t = next_thread(t)) /* * Setting state to TASK_STOPPED for a group * stop is always done with the siglock held, * so this check has no races. */ - if (!t->exit_state && - !(t->state & (TASK_STOPPED|TASK_TRACED))) { - stop_count++; + if (!(t->flags & PF_EXITING) && + !task_is_stopped_or_traced(t)) { + sig->group_stop_count++; signal_wake_up(t, 0); } - sig->group_stop_count = stop_count; } + /* + * If there are no other threads in the group, or if there is + * a group stop in progress and we are the last to stop, report + * to the parent. When ptraced, every thread reports itself. + */ + notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; + notify = tracehook_notify_jctl(notify, CLD_STOPPED); + /* + * tracehook_notify_jctl() can drop and reacquire siglock, so + * we keep ->group_stop_count != 0 before the call. If SIGCONT + * or SIGKILL comes in between ->group_stop_count == 0. + */ + if (sig->group_stop_count) { + if (!--sig->group_stop_count) + sig->flags = SIGNAL_STOP_STOPPED; + current->exit_code = sig->group_exit_code; + __set_current_state(TASK_STOPPED); + } + spin_unlock_irq(¤t->sighand->siglock); + + if (notify) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current, notify); + read_unlock(&tasklist_lock); + } + + /* Now we don't run again until woken by SIGCONT or SIGKILL */ + do { + schedule(); + } while (try_to_freeze()); - if (stop_count == 0) - sig->flags = SIGNAL_STOP_STOPPED; - current->exit_code = sig->group_exit_code; - __set_current_state(TASK_STOPPED); + tracehook_finish_jctl(); + current->exit_code = 0; - spin_unlock_irq(¤t->sighand->siglock); - finish_stop(stop_count); return 1; } -/* - * Do appropriate magic when group_stop_count > 0. - * We return nonzero if we stopped, after releasing the siglock. - * We return zero if we still hold the siglock and should look - * for another signal without checking group_stop_count again. - */ -static int handle_group_stop(void) +static int ptrace_signal(int signr, siginfo_t *info, + struct pt_regs *regs, void *cookie) { - int stop_count; + if (!task_ptrace(current)) + return signr; - if (current->signal->group_exit_task == current) { - /* - * Group stop is so we can do a core dump, - * We are the initiating thread, so get on with it. - */ - current->signal->group_exit_task = NULL; - return 0; + ptrace_signal_deliver(regs, cookie); + + /* Let the debugger run. */ + ptrace_stop(signr, 0, info); + + /* We're back. Did the debugger cancel the sig? */ + signr = current->exit_code; + if (signr == 0) + return signr; + + current->exit_code = 0; + + /* Update the siginfo structure if the signal has + changed. If the debugger wanted something + specific in the siginfo structure then it should + have updated *info via PTRACE_SETSIGINFO. */ + if (signr != info->si_signo) { + info->si_signo = signr; + info->si_errno = 0; + info->si_code = SI_USER; + info->si_pid = task_pid_vnr(current->parent); + info->si_uid = task_uid(current->parent); } - if (current->signal->flags & SIGNAL_GROUP_EXIT) - /* - * Group stop is so another thread can do a core dump, - * or else we are racing against a death signal. - * Just punt the stop so we can get the next signal. - */ - return 0; + /* If the (new) signal is now blocked, requeue it. */ + if (sigismember(¤t->blocked, signr)) { + specific_send_sig_info(signr, info, current); + signr = 0; + } - /* - * There is a group stop in progress. We stop - * without any associated signal being in our queue. - */ - stop_count = --current->signal->group_stop_count; - if (stop_count == 0) - current->signal->flags = SIGNAL_STOP_STOPPED; - current->exit_code = current->signal->group_exit_code; - set_current_state(TASK_STOPPED); - spin_unlock_irq(¤t->sighand->siglock); - finish_stop(stop_count); - return 1; + return signr; } int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie) { - sigset_t *mask = ¤t->blocked; - int signr = 0; + struct sighand_struct *sighand = current->sighand; + struct signal_struct *signal = current->signal; + int signr; +relock: + /* + * We'll jump back here after any time we were stopped in TASK_STOPPED. + * While in TASK_STOPPED, we were considered "frozen enough". + * Now that we woke up, it's crucial if we're supposed to be + * frozen that we freeze now before running anything substantial. + */ try_to_freeze(); -relock: - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irq(&sighand->siglock); + /* + * Every stopped thread goes here after wakeup. Check to see if + * we should notify the parent, prepare_signal(SIGCONT) encodes + * the CLD_ si_code into SIGNAL_CLD_MASK bits. + */ + if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { + int why = (signal->flags & SIGNAL_STOP_CONTINUED) + ? CLD_CONTINUED : CLD_STOPPED; + signal->flags &= ~SIGNAL_CLD_MASK; + + why = tracehook_notify_jctl(why, CLD_CONTINUED); + spin_unlock_irq(&sighand->siglock); + + if (why) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current->group_leader, why); + read_unlock(&tasklist_lock); + } + goto relock; + } + for (;;) { struct k_sigaction *ka; - - if (unlikely(current->signal->group_stop_count > 0) && - handle_group_stop()) + /* + * Tracing can induce an artifical signal and choose sigaction. + * The return value in @signr determines the default action, + * but @info->si_signo is the signal number we will report. + */ + signr = tracehook_get_signal(current, regs, info, return_ka); + if (unlikely(signr < 0)) goto relock; + if (unlikely(signr != 0)) + ka = return_ka; + else { + if (unlikely(signal->group_stop_count > 0) && + do_signal_stop(0)) + goto relock; - signr = dequeue_signal(current, mask, info); - - if (!signr) - break; /* will return 0 */ - - if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { - ptrace_signal_deliver(regs, cookie); - - /* Let the debugger run. */ - ptrace_stop(signr, signr, info); - - /* We're back. Did the debugger cancel the sig? */ - signr = current->exit_code; - if (signr == 0) - continue; + signr = dequeue_signal(current, ¤t->blocked, + info); - current->exit_code = 0; + if (!signr) + break; /* will return 0 */ - /* Update the siginfo structure if the signal has - changed. If the debugger wanted something - specific in the siginfo structure then it should - have updated *info via PTRACE_SETSIGINFO. */ - if (signr != info->si_signo) { - info->si_signo = signr; - info->si_errno = 0; - info->si_code = SI_USER; - info->si_pid = current->parent->pid; - info->si_uid = current->parent->uid; + if (signr != SIGKILL) { + signr = ptrace_signal(signr, info, + regs, cookie); + if (!signr) + continue; } - /* If the (new) signal is now blocked, requeue it. */ - if (sigismember(¤t->blocked, signr)) { - specific_send_sig_info(signr, info, current); - continue; - } + ka = &sighand->action[signr-1]; } - ka = ¤t->sighand->action[signr-1]; + /* Trace actually delivered signals. */ + trace_signal_deliver(signr, info, ka); + if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ continue; if (ka->sa.sa_handler != SIG_DFL) { @@ -1877,8 +1913,18 @@ relock: if (sig_kernel_ignore(signr)) /* Default is nothing. */ continue; - /* Init gets no signals it doesn't want. */ - if (current == child_reaper) + /* + * Global init gets no signals it doesn't want. + * Container-init gets no signals it doesn't want from same + * container. + * + * Note that if global/container-init sees a sig_kernel_only() + * signal here, the signal must have been generated internally + * or must have come from an ancestor namespace. In either + * case, the signal cannot be dropped. + */ + if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && + !sig_kernel_only(signr)) continue; if (sig_kernel_stop(signr)) { @@ -1893,17 +1939,17 @@ relock: * We need to check for that and bail out if necessary. */ if (signr != SIGSTOP) { - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irq(&sighand->siglock); /* signals can be posted during this window */ - if (is_orphaned_pgrp(process_group(current))) + if (is_current_pgrp_orphaned()) goto relock; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irq(&sighand->siglock); } - if (likely(do_signal_stop(signr))) { + if (likely(do_signal_stop(info->si_signo))) { /* It released the siglock. */ goto relock; } @@ -1915,13 +1961,16 @@ relock: continue; } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irq(&sighand->siglock); /* * Anything else is fatal, maybe with a core dump. */ current->flags |= PF_SIGNALED; + if (sig_kernel_coredump(signr)) { + if (print_fatal_signals) + print_fatal_signal(regs, info->si_signo); /* * If it was able to dump core, this kills all * other threads in the group and synchronizes with @@ -1930,26 +1979,65 @@ relock: * first and our do_group_exit call below will use * that value and ignore the one we pass it. */ - do_coredump((long)signr, signr, regs); + do_coredump(info->si_signo, info->si_signo, regs); } /* * Death signals, no core dump. */ - do_group_exit(signr); + do_group_exit(info->si_signo); /* NOTREACHED */ } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irq(&sighand->siglock); return signr; } +void exit_signals(struct task_struct *tsk) +{ + int group_stop = 0; + struct task_struct *t; + + if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { + tsk->flags |= PF_EXITING; + return; + } + + spin_lock_irq(&tsk->sighand->siglock); + /* + * From now this task is not visible for group-wide signals, + * see wants_signal(), do_signal_stop(). + */ + tsk->flags |= PF_EXITING; + if (!signal_pending(tsk)) + goto out; + + /* It could be that __group_complete_signal() choose us to + * notify about group-wide signal. Another thread should be + * woken now to take the signal since we will not. + */ + for (t = tsk; (t = next_thread(t)) != tsk; ) + if (!signal_pending(t) && !(t->flags & PF_EXITING)) + recalc_sigpending_and_wake(t); + + if (unlikely(tsk->signal->group_stop_count) && + !--tsk->signal->group_stop_count) { + tsk->signal->flags = SIGNAL_STOP_STOPPED; + group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); + } +out: + spin_unlock_irq(&tsk->sighand->siglock); + + if (unlikely(group_stop)) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(tsk, group_stop); + read_unlock(&tasklist_lock); + } +} + EXPORT_SYMBOL(recalc_sigpending); EXPORT_SYMBOL_GPL(dequeue_signal); EXPORT_SYMBOL(flush_signals); EXPORT_SYMBOL(force_sig); -EXPORT_SYMBOL(kill_pg); -EXPORT_SYMBOL(kill_proc); -EXPORT_SYMBOL(ptrace_notify); EXPORT_SYMBOL(send_sig); EXPORT_SYMBOL(send_sig_info); EXPORT_SYMBOL(sigprocmask); @@ -1961,7 +2049,7 @@ EXPORT_SYMBOL(unblock_all_signals); * System call entry points. */ -asmlinkage long sys_restart_syscall(void) +SYSCALL_DEFINE0(restart_syscall) { struct restart_block *restart = ¤t_thread_info()->restart_block; return restart->fn(restart); @@ -2014,8 +2102,8 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) return error; } -asmlinkage long -sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) +SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, + sigset_t __user *, oset, size_t, sigsetsize) { int error = -EINVAL; sigset_t old_set, new_set; @@ -2074,8 +2162,7 @@ out: return error; } -asmlinkage long -sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) +SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) { return do_sigpending(set, sigsetsize); } @@ -2094,6 +2181,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) /* * If you change siginfo_t structure, please be sure * this code is fixed accordingly. + * Please remember to update the signalfd_copyinfo() function + * inside fs/signalfd.c too, in case siginfo_t changes. * It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic * 3 ints plus the relevant union member. @@ -2144,11 +2233,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) #endif -asmlinkage long -sys_rt_sigtimedwait(const sigset_t __user *uthese, - siginfo_t __user *uinfo, - const struct timespec __user *uts, - size_t sigsetsize) +SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, + siginfo_t __user *, uinfo, const struct timespec __user *, uts, + size_t, sigsetsize) { int ret, sig; sigset_t these; @@ -2221,64 +2308,73 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, return ret; } -asmlinkage long -sys_kill(int pid, int sig) +SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) { struct siginfo info; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_USER; - info.si_pid = current->tgid; - info.si_uid = current->uid; + info.si_pid = task_tgid_vnr(current); + info.si_uid = current_uid(); return kill_something_info(sig, &info, pid); } -static int do_tkill(int tgid, int pid, int sig) +static int +do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) { - int error; - struct siginfo info; struct task_struct *p; + int error = -ESRCH; - error = -ESRCH; - info.si_signo = sig; - info.si_errno = 0; - info.si_code = SI_TKILL; - info.si_pid = current->tgid; - info.si_uid = current->uid; - - read_lock(&tasklist_lock); - p = find_task_by_pid(pid); - if (p && (tgid <= 0 || p->tgid == tgid)) { - error = check_kill_permission(sig, &info, p); + rcu_read_lock(); + p = find_task_by_vpid(pid); + if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { + error = check_kill_permission(sig, info, p); /* * The null signal is a permissions and process existence * probe. No signal is actually delivered. */ - if (!error && sig && p->sighand) { - spin_lock_irq(&p->sighand->siglock); - handle_stop_signal(sig, p); - error = specific_send_sig_info(sig, &info, p); - spin_unlock_irq(&p->sighand->siglock); + if (!error && sig) { + error = do_send_sig_info(sig, info, p, false); + /* + * If lock_task_sighand() failed we pretend the task + * dies after receiving the signal. The window is tiny, + * and the signal is private anyway. + */ + if (unlikely(error == -ESRCH)) + error = 0; } } - read_unlock(&tasklist_lock); + rcu_read_unlock(); return error; } +static int do_tkill(pid_t tgid, pid_t pid, int sig) +{ + struct siginfo info; + + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_TKILL; + info.si_pid = task_tgid_vnr(current); + info.si_uid = current_uid(); + + return do_send_specific(tgid, pid, sig, &info); +} + /** * sys_tgkill - send signal to one specific thread * @tgid: the thread group ID of the thread * @pid: the PID of the thread * @sig: signal to be sent * - * This syscall also checks the tgid and returns -ESRCH even if the PID + * This syscall also checks the @tgid and returns -ESRCH even if the PID * exists but it's not belonging to the target process anymore. This * method solves the problem of threads exiting and PIDs getting reused. */ -asmlinkage long sys_tgkill(int tgid, int pid, int sig) +SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) { /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) @@ -2290,8 +2386,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig) /* * Send a signal to only one task, even if it's a CLONE_THREAD task. */ -asmlinkage long -sys_tkill(int pid, int sig) +SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) { /* This is only valid for single tasks */ if (pid <= 0) @@ -2300,8 +2395,8 @@ sys_tkill(int pid, int sig) return do_tkill(0, pid, sig); } -asmlinkage long -sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) +SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, + siginfo_t __user *, uinfo) { siginfo_t info; @@ -2318,26 +2413,44 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) return kill_proc_info(sig, &info, pid); } +long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) +{ + /* This is only valid for single tasks */ + if (pid <= 0 || tgid <= 0) + return -EINVAL; + + /* Not even root can pretend to send signals from the kernel. + Nor can they impersonate a kill(), which adds source info. */ + if (info->si_code >= 0) + return -EPERM; + info->si_signo = sig; + + return do_send_specific(tgid, pid, sig, info); +} + +SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, + siginfo_t __user *, uinfo) +{ + siginfo_t info; + + if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) + return -EFAULT; + + return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); +} + int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) { + struct task_struct *t = current; struct k_sigaction *k; sigset_t mask; if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) return -EINVAL; - k = ¤t->sighand->action[sig-1]; + k = &t->sighand->action[sig-1]; spin_lock_irq(¤t->sighand->siglock); - if (signal_pending(current)) { - /* - * If there might be a fatal signal pending on multiple - * threads, make sure we take it before changing the action. - */ - spin_unlock_irq(¤t->sighand->siglock); - return -ERESTARTNOINTR; - } - if (oact) *oact = *k; @@ -2356,15 +2469,12 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) * (for example, SIGCHLD), shall cause the pending signal to * be discarded, whether or not it is blocked" */ - if (act->sa.sa_handler == SIG_IGN || - (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { - struct task_struct *t = current; + if (sig_handler_ignored(sig_handler(t, sig), sig)) { sigemptyset(&mask); sigaddset(&mask, sig); rm_from_queue_full(&mask, &t->signal->shared_pending); do { rm_from_queue_full(&mask, &t->pending); - recalc_sigpending_tsk(t); t = next_thread(t); } while (t != current); } @@ -2380,11 +2490,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s stack_t oss; int error; - if (uoss) { - oss.ss_sp = (void __user *) current->sas_ss_sp; - oss.ss_size = current->sas_ss_size; - oss.ss_flags = sas_ss_flags(sp); - } + oss.ss_sp = (void __user *) current->sas_ss_sp; + oss.ss_size = current->sas_ss_size; + oss.ss_flags = sas_ss_flags(sp); if (uss) { void __user *ss_sp; @@ -2392,10 +2500,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s int ss_flags; error = -EFAULT; - if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) - || __get_user(ss_sp, &uss->ss_sp) - || __get_user(ss_flags, &uss->ss_flags) - || __get_user(ss_size, &uss->ss_size)) + if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) + goto out; + error = __get_user(ss_sp, &uss->ss_sp) | + __get_user(ss_flags, &uss->ss_flags) | + __get_user(ss_size, &uss->ss_size); + if (error) goto out; error = -EPERM; @@ -2427,21 +2537,23 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s current->sas_ss_size = ss_size; } + error = 0; if (uoss) { error = -EFAULT; - if (copy_to_user(uoss, &oss, sizeof(oss))) + if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) goto out; + error = __put_user(oss.ss_sp, &uoss->ss_sp) | + __put_user(oss.ss_size, &uoss->ss_size) | + __put_user(oss.ss_flags, &uoss->ss_flags); } - error = 0; out: return error; } #ifdef __ARCH_WANT_SYS_SIGPENDING -asmlinkage long -sys_sigpending(old_sigset_t __user *set) +SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) { return do_sigpending(set, sizeof(*set)); } @@ -2452,8 +2564,8 @@ sys_sigpending(old_sigset_t __user *set) /* Some platforms have their own version with special arguments others support only sys_rt_sigprocmask. */ -asmlinkage long -sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) +SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, + old_sigset_t __user *, oset) { int error; old_sigset_t old_set, new_set; @@ -2503,11 +2615,10 @@ out: #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ #ifdef __ARCH_WANT_SYS_RT_SIGACTION -asmlinkage long -sys_rt_sigaction(int sig, - const struct sigaction __user *act, - struct sigaction __user *oact, - size_t sigsetsize) +SYSCALL_DEFINE4(rt_sigaction, int, sig, + const struct sigaction __user *, act, + struct sigaction __user *, oact, + size_t, sigsetsize) { struct k_sigaction new_sa, old_sa; int ret = -EINVAL; @@ -2537,15 +2648,13 @@ out: /* * For backwards compatibility. Functionality superseded by sigprocmask. */ -asmlinkage long -sys_sgetmask(void) +SYSCALL_DEFINE0(sgetmask) { /* SMP safe */ return current->blocked.sig[0]; } -asmlinkage long -sys_ssetmask(int newmask) +SYSCALL_DEFINE1(ssetmask, int, newmask) { int old; @@ -2565,8 +2674,7 @@ sys_ssetmask(int newmask) /* * For backwards compatibility. Functionality superseded by sigaction. */ -asmlinkage unsigned long -sys_signal(int sig, __sighandler_t handler) +SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) { struct k_sigaction new_sa, old_sa; int ret; @@ -2583,8 +2691,7 @@ sys_signal(int sig, __sighandler_t handler) #ifdef __ARCH_WANT_SYS_PAUSE -asmlinkage long -sys_pause(void) +SYSCALL_DEFINE0(pause) { current->state = TASK_INTERRUPTIBLE; schedule(); @@ -2594,7 +2701,7 @@ sys_pause(void) #endif #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND -asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) +SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) { sigset_t newset; @@ -2614,7 +2721,7 @@ asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) current->state = TASK_INTERRUPTIBLE; schedule(); - set_thread_flag(TIF_RESTORE_SIGMASK); + set_restore_sigmask(); return -ERESTARTNOHAND; } #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ @@ -2626,9 +2733,5 @@ __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) void __init signals_init(void) { - sigqueue_cachep = - kmem_cache_create("sigqueue", - sizeof(struct sigqueue), - __alignof__(struct sigqueue), - SLAB_PANIC, NULL, NULL); + sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); }