X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fsignal.c;h=6705320784fd2b07a518c4d2bb47520d0b342aa8;hb=f5dc37530ba8a35aae0f7f4f13781d1904f71e94;hp=115c04f3f143dda03fbd48964b33f1e85f8f85f1;hpb=e442055193e4584218006e616c9bdce0c5e9ae5c;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/signal.c b/kernel/signal.c index 115c04f..6705320 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -22,10 +22,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include @@ -39,25 +41,34 @@ static struct kmem_cache *sigqueue_cachep; -static int __sig_ignored(struct task_struct *t, int sig) +static void __user *sig_handler(struct task_struct *t, int sig) { - void __user *handler; + return t->sighand->action[sig - 1].sa.sa_handler; +} +static int sig_handler_ignored(void __user *handler, int sig) +{ /* Is it explicitly or implicitly ignored? */ - - handler = t->sighand->action[sig - 1].sa.sa_handler; return handler == SIG_IGN || (handler == SIG_DFL && sig_kernel_ignore(sig)); } -static int sig_ignored(struct task_struct *t, int sig) +static int sig_task_ignored(struct task_struct *t, int sig, + int from_ancestor_ns) { - /* - * Tracers always want to know about signals.. - */ - if (t->ptrace & PT_PTRACED) - return 0; + void __user *handler; + handler = sig_handler(t, sig); + + if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && + handler == SIG_DFL && !from_ancestor_ns) + return 1; + + return sig_handler_ignored(handler, sig); +} + +static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) +{ /* * Blocked signals are never ignored, since the * signal handler may change by the time it is @@ -66,7 +77,13 @@ static int sig_ignored(struct task_struct *t, int sig) if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) return 0; - return __sig_ignored(t, sig); + if (!sig_task_ignored(t, sig, from_ancestor_ns)) + return 0; + + /* + * Tracers may want to know about even ignored signals. + */ + return !tracehook_consider_ignored_signal(t, sig); } /* @@ -129,7 +146,9 @@ void recalc_sigpending_and_wake(struct task_struct *t) void recalc_sigpending(void) { - if (!recalc_sigpending_tsk(current) && !freezing(current)) + if (unlikely(tracehook_force_sigpending())) + set_thread_flag(TIF_SIGPENDING); + else if (!recalc_sigpending_tsk(current) && !freezing(current)) clear_thread_flag(TIF_SIGPENDING); } @@ -169,6 +188,11 @@ int next_signal(struct sigpending *pending, sigset_t *mask) return sig; } +/* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appopriate lock must be held to stop the target task from exiting + */ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, int override_rlimit) { @@ -176,11 +200,12 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, struct user_struct *user; /* - * In order to avoid problems with "switch_user()", we want to make - * sure that the compiler doesn't re-load "t->user" + * We won't get problems with the target's UID changing under us + * because changing it requires RCU be used, and if t != current, the + * caller must be holding the RCU readlock (by way of a spinlock) and + * we use RCU protection here */ - user = t->user; - barrier(); + user = get_uid(__task_cred(t)->user); atomic_inc(&user->sigpending); if (override_rlimit || atomic_read(&user->sigpending) <= @@ -188,12 +213,14 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, q = kmem_cache_alloc(sigqueue_cachep, flags); if (unlikely(q == NULL)) { atomic_dec(&user->sigpending); + free_uid(user); } else { INIT_LIST_HEAD(&q->list); q->flags = 0; - q->user = get_uid(user); + q->user = user; } - return(q); + + return q; } static void __sigqueue_free(struct sigqueue *q) @@ -220,17 +247,56 @@ void flush_sigqueue(struct sigpending *queue) /* * Flush all pending signals for a task. */ +void __flush_signals(struct task_struct *t) +{ + clear_tsk_thread_flag(t, TIF_SIGPENDING); + flush_sigqueue(&t->pending); + flush_sigqueue(&t->signal->shared_pending); +} + void flush_signals(struct task_struct *t) { unsigned long flags; spin_lock_irqsave(&t->sighand->siglock, flags); - clear_tsk_thread_flag(t, TIF_SIGPENDING); - flush_sigqueue(&t->pending); - flush_sigqueue(&t->signal->shared_pending); + __flush_signals(t); spin_unlock_irqrestore(&t->sighand->siglock, flags); } +static void __flush_itimer_signals(struct sigpending *pending) +{ + sigset_t signal, retain; + struct sigqueue *q, *n; + + signal = pending->signal; + sigemptyset(&retain); + + list_for_each_entry_safe(q, n, &pending->list, list) { + int sig = q->info.si_signo; + + if (likely(q->info.si_code != SI_TIMER)) { + sigaddset(&retain, sig); + } else { + sigdelset(&signal, sig); + list_del_init(&q->list); + __sigqueue_free(q); + } + } + + sigorsets(&pending->signal, &signal, &retain); +} + +void flush_itimer_signals(void) +{ + struct task_struct *tsk = current; + unsigned long flags; + + spin_lock_irqsave(&tsk->sighand->siglock, flags); + __flush_itimer_signals(&tsk->pending); + __flush_itimer_signals(&tsk->signal->shared_pending); + spin_unlock_irqrestore(&tsk->sighand->siglock, flags); +} + void ignore_signals(struct task_struct *t) { int i; @@ -261,12 +327,12 @@ flush_signal_handlers(struct task_struct *t, int force_default) int unhandled_signal(struct task_struct *tsk, int sig) { + void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; if (is_global_init(tsk)) return 1; - if (tsk->ptrace & PT_PTRACED) + if (handler != SIG_IGN && handler != SIG_DFL) return 0; - return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || - (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL); + return !tracehook_consider_fatal_signal(tsk, sig); } @@ -304,13 +370,9 @@ unblock_all_signals(void) spin_unlock_irqrestore(¤t->sighand->siglock, flags); } -static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) +static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) { struct sigqueue *q, *first = NULL; - int still_pending = 0; - - if (unlikely(!sigismember(&list->signal, sig))) - return 0; /* * Collect the siginfo appropriate to this signal. Check if @@ -318,33 +380,30 @@ static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) */ list_for_each_entry(q, &list->list, list) { if (q->info.si_signo == sig) { - if (first) { - still_pending = 1; - break; - } + if (first) + goto still_pending; first = q; } } + + sigdelset(&list->signal, sig); + if (first) { +still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); __sigqueue_free(first); - if (!still_pending) - sigdelset(&list->signal, sig); } else { - /* Ok, it wasn't in the queue. This must be a fast-pathed signal or we must have been out of queue space. So zero out the info. */ - sigdelset(&list->signal, sig); info->si_signo = sig; info->si_errno = 0; info->si_code = 0; info->si_pid = 0; info->si_uid = 0; } - return 1; } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, @@ -362,8 +421,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, } } - if (!collect_signal(sig, pending, info)) - sig = 0; + collect_signal(sig, pending, info); } return sig; @@ -428,8 +486,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * is to alert stop-signal processing code when another * processor has come along and cleared the flag. */ - if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) - tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; + tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; } if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { /* @@ -529,10 +586,13 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) /* * Bad permissions for sending the signal + * - the caller must hold at least the RCU read lock */ static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { + const struct cred *cred = current_cred(), *tcred; + struct pid *sid; int error; if (!valid_signal(sig)) @@ -545,73 +605,68 @@ static int check_kill_permission(int sig, struct siginfo *info, if (error) return error; - if (((sig != SIGCONT) || (task_session_nr(current) != task_session_nr(t))) - && (current->euid ^ t->suid) && (current->euid ^ t->uid) - && (current->uid ^ t->suid) && (current->uid ^ t->uid) - && !capable(CAP_KILL)) - return -EPERM; + tcred = __task_cred(t); + if ((cred->euid ^ tcred->suid) && + (cred->euid ^ tcred->uid) && + (cred->uid ^ tcred->suid) && + (cred->uid ^ tcred->uid) && + !capable(CAP_KILL)) { + switch (sig) { + case SIGCONT: + sid = task_session(t); + /* + * We don't return the error if sid == NULL. The + * task was unhashed, the caller must notice this. + */ + if (!sid || sid == task_session(current)) + break; + default: + return -EPERM; + } + } return security_task_kill(t, info, sig, 0); } -/* forward decl */ -static void do_notify_parent_cldstop(struct task_struct *tsk, int why); - /* - * Handle magic process-wide effects of stop/continue signals. - * Unlike the signal actions, these happen immediately at signal-generation + * Handle magic process-wide effects of stop/continue signals. Unlike + * the signal actions, these happen immediately at signal-generation * time regardless of blocking, ignoring, or handling. This does the * actual continuing for SIGCONT, but not the actual stopping for stop - * signals. The process stop is done as a signal action for SIG_DFL. + * signals. The process stop is done as a signal action for SIG_DFL. + * + * Returns true if the signal should be actually delivered, otherwise + * it should be dropped. */ -static void handle_stop_signal(int sig, struct task_struct *p) +static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) { + struct signal_struct *signal = p->signal; struct task_struct *t; - if (p->signal->flags & SIGNAL_GROUP_EXIT) + if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { /* - * The process is in the middle of dying already. + * The process is in the middle of dying, nothing to do. */ - return; - - if (sig_kernel_stop(sig)) { + } else if (sig_kernel_stop(sig)) { /* * This is a stop signal. Remove SIGCONT from all queues. */ - rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); + rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); t = p; do { rm_from_queue(sigmask(SIGCONT), &t->pending); - t = next_thread(t); - } while (t != p); + } while_each_thread(p, t); } else if (sig == SIGCONT) { + unsigned int why; /* * Remove all stop signals from all queues, * and wake all threads. */ - if (unlikely(p->signal->group_stop_count > 0)) { - /* - * There was a group stop in progress. We'll - * pretend it finished before we got here. We are - * obliged to report it to the parent: if the - * SIGSTOP happened "after" this SIGCONT, then it - * would have cleared this pending SIGCONT. If it - * happened "before" this SIGCONT, then the parent - * got the SIGCHLD about the stop finishing before - * the continue happened. We do the notification - * now, and it's as if the stop had finished and - * the SIGCHLD was pending on entry to this kill. - */ - p->signal->group_stop_count = 0; - p->signal->flags = SIGNAL_STOP_CONTINUED | - SIGNAL_CLD_STOPPED; - } - rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); + rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); t = p; do { unsigned int state; rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); - /* * If there is a handler for SIGCONT, we must make * sure that no thread returns to user mode before @@ -621,7 +676,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) * running the handler. With the TIF_SIGPENDING * flag set, the thread will pause and acquire the * siglock that we hold now and until we've queued - * the pending signal. + * the pending signal. * * Wake up the stopped thread _after_ setting * TIF_SIGPENDING @@ -632,33 +687,139 @@ static void handle_stop_signal(int sig, struct task_struct *p) state |= TASK_INTERRUPTIBLE; } wake_up_state(t, state); + } while_each_thread(p, t); - t = next_thread(t); - } while (t != p); + /* + * Notify the parent with CLD_CONTINUED if we were stopped. + * + * If we were in the middle of a group stop, we pretend it + * was already finished, and then continued. Since SIGCHLD + * doesn't queue we report only CLD_STOPPED, as if the next + * CLD_CONTINUED was dropped. + */ + why = 0; + if (signal->flags & SIGNAL_STOP_STOPPED) + why |= SIGNAL_CLD_CONTINUED; + else if (signal->group_stop_count) + why |= SIGNAL_CLD_STOPPED; - if (p->signal->flags & SIGNAL_STOP_STOPPED) { + if (why) { /* - * We were in fact stopped, and are now continued. - * Notify the parent with CLD_CONTINUED. + * The first thread which returns from do_signal_stop() + * will take ->siglock, notice SIGNAL_CLD_MASK, and + * notify its parent. See get_signal_to_deliver(). */ - p->signal->flags = SIGNAL_STOP_CONTINUED | - SIGNAL_CLD_CONTINUED; - p->signal->group_exit_code = 0; + signal->flags = why | SIGNAL_STOP_CONTINUED; + signal->group_stop_count = 0; + signal->group_exit_code = 0; } else { /* * We are not stopped, but there could be a stop * signal in the middle of being processed after * being removed from the queue. Clear that too. */ - p->signal->flags &= ~SIGNAL_STOP_DEQUEUED; + signal->flags &= ~SIGNAL_STOP_DEQUEUED; + } + } + + return !sig_ignored(p, sig, from_ancestor_ns); +} + +/* + * Test if P wants to take SIG. After we've checked all threads with this, + * it's equivalent to finding no threads not blocking SIG. Any threads not + * blocking SIG were ruled out because they are not running and already + * have pending signals. Such threads will dequeue from the shared queue + * as soon as they're available, so putting the signal on the shared queue + * will be equivalent to sending it to one such thread. + */ +static inline int wants_signal(int sig, struct task_struct *p) +{ + if (sigismember(&p->blocked, sig)) + return 0; + if (p->flags & PF_EXITING) + return 0; + if (sig == SIGKILL) + return 1; + if (task_is_stopped_or_traced(p)) + return 0; + return task_curr(p) || !signal_pending(p); +} + +static void complete_signal(int sig, struct task_struct *p, int group) +{ + struct signal_struct *signal = p->signal; + struct task_struct *t; + + /* + * Now find a thread we can wake up to take the signal off the queue. + * + * If the main thread wants the signal, it gets first crack. + * Probably the least surprising to the average bear. + */ + if (wants_signal(sig, p)) + t = p; + else if (!group || thread_group_empty(p)) + /* + * There is just one thread and it does not need to be woken. + * It will dequeue unblocked signals before it runs again. + */ + return; + else { + /* + * Otherwise try to find a suitable thread. + */ + t = signal->curr_target; + while (!wants_signal(sig, t)) { + t = next_thread(t); + if (t == signal->curr_target) + /* + * No thread needs to be woken. + * Any eligible threads will see + * the signal in the queue soon. + */ + return; } - } else if (sig == SIGKILL) { + signal->curr_target = t; + } + + /* + * Found a killable thread. If the signal will be fatal, + * then start taking the whole group down immediately. + */ + if (sig_fatal(p, sig) && + !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && + !sigismember(&t->real_blocked, sig) && + (sig == SIGKILL || + !tracehook_consider_fatal_signal(t, sig))) { /* - * Make sure that any pending stop signal already dequeued - * is undone by the wakeup for SIGKILL. + * This signal will be fatal to the whole group. */ - p->signal->flags &= ~SIGNAL_STOP_DEQUEUED; + if (!sig_kernel_coredump(sig)) { + /* + * Start a group exit and wake everybody up. + * This way we don't have other threads + * running and doing things after a slower + * thread has the fatal signal pending. + */ + signal->flags = SIGNAL_GROUP_EXIT; + signal->group_exit_code = sig; + signal->group_stop_count = 0; + t = p; + do { + sigaddset(&t->pending.signal, SIGKILL); + signal_wake_up(t, 1); + } while_each_thread(p, t); + return; + } } + + /* + * The signal is already in the shared-pending queue. + * Tell the chosen thread to wake up and dequeue it. + */ + signal_wake_up(t, sig == SIGKILL); + return; } static inline int legacy_queue(struct sigpending *signals, int sig) @@ -666,25 +827,28 @@ static inline int legacy_queue(struct sigpending *signals, int sig) return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); } -static int send_signal(int sig, struct siginfo *info, struct task_struct *t, - struct sigpending *signals) +static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, + int group, int from_ancestor_ns) { - struct sigqueue * q = NULL; + struct sigpending *pending; + struct sigqueue *q; + int override_rlimit; + + trace_sched_signal_send(sig, t); + + assert_spin_locked(&t->sighand->siglock); + + if (!prepare_signal(sig, t, from_ancestor_ns)) + return 0; + pending = group ? &t->signal->shared_pending : &t->pending; /* * Short-circuit ignored signals and support queuing * exactly one non-rt signal, so that we can get more * detailed information about the cause of the signal. */ - if (sig_ignored(t, sig) || legacy_queue(signals, sig)) + if (legacy_queue(pending, sig)) return 0; - - /* - * Deliver the signal to listening signalfds. This must be called - * with the sighand lock held. - */ - signalfd_notify(t, sig); - /* * fast-pathed signals for kernel-internal things like SIGSTOP * or SIGKILL. @@ -700,18 +864,23 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, make sure at least one signal gets delivered and don't pass on the info struct. */ - q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - (is_si_special(info) || - info->si_code >= 0))); + if (sig < SIGRTMIN) + override_rlimit = (is_si_special(info) || info->si_code >= 0); + else + override_rlimit = 0; + + q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, + override_rlimit); if (q) { - list_add_tail(&q->list, &signals->list); + list_add_tail(&q->list, &pending->list); switch ((unsigned long) info) { case (unsigned long) SEND_SIG_NOINFO: q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_USER; - q->info.si_pid = task_pid_vnr(current); - q->info.si_uid = current->uid; + q->info.si_pid = task_tgid_nr_ns(current, + task_active_pid_ns(t)); + q->info.si_uid = current_uid(); break; case (unsigned long) SEND_SIG_PRIV: q->info.si_signo = sig; @@ -722,6 +891,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, break; default: copy_siginfo(&q->info, info); + if (from_ancestor_ns) + q->info.si_pid = 0; break; } } else if (!is_si_special(info)) { @@ -734,8 +905,24 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, } out_set: - sigaddset(&signals->signal, sig); - return 1; + signalfd_notify(t, sig); + sigaddset(&pending->signal, sig); + complete_signal(sig, t, group); + return 0; +} + +static int send_signal(int sig, struct siginfo *info, struct task_struct *t, + int group) +{ + int from_ancestor_ns = 0; + +#ifdef CONFIG_PID_NS + if (!is_si_special(info) && SI_FROMUSER(info) && + task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0) + from_ancestor_ns = 1; +#endif + + return __send_signal(sig, info, t, group, from_ancestor_ns); } int print_fatal_signals; @@ -758,7 +945,9 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) } #endif printk("\n"); + preempt_disable(); show_regs(regs); + preempt_enable(); } static int __init setup_print_fatal_signals(char *str) @@ -770,21 +959,30 @@ static int __init setup_print_fatal_signals(char *str) __setup("print-fatal-signals=", setup_print_fatal_signals); +int +__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +{ + return send_signal(sig, info, p, 1); +} + static int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { - int ret; + return send_signal(sig, info, t, 0); +} - BUG_ON(!irqs_disabled()); - assert_spin_locked(&t->sighand->siglock); +int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, + bool group) +{ + unsigned long flags; + int ret = -ESRCH; - ret = send_signal(sig, info, t, &t->pending); - if (ret <= 0) - return ret; + if (lock_task_sighand(p, &flags)) { + ret = send_signal(sig, info, p, group); + unlock_task_sighand(p, &flags); + } - if (!sigismember(&t->blocked, sig)) - signal_wake_up(t, sig == SIGKILL); - return 0; + return ret; } /* @@ -795,7 +993,8 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) * since we do not want to have a signal handler that was blocked * be invoked when user space had explicitly blocked it. * - * We don't want to have recursive SIGSEGV's etc, for example. + * We don't want to have recursive SIGSEGV's etc, for example, + * that is why we also clear SIGNAL_UNKILLABLE. */ int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) @@ -815,6 +1014,8 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) recalc_sigpending_and_wake(t); } } + if (action->sa.sa_handler == SIG_DFL) + t->signal->flags &= ~SIGNAL_UNKILLABLE; ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sighand->siglock, flags); @@ -828,126 +1029,6 @@ force_sig_specific(int sig, struct task_struct *t) } /* - * Test if P wants to take SIG. After we've checked all threads with this, - * it's equivalent to finding no threads not blocking SIG. Any threads not - * blocking SIG were ruled out because they are not running and already - * have pending signals. Such threads will dequeue from the shared queue - * as soon as they're available, so putting the signal on the shared queue - * will be equivalent to sending it to one such thread. - */ -static inline int wants_signal(int sig, struct task_struct *p) -{ - if (sigismember(&p->blocked, sig)) - return 0; - if (p->flags & PF_EXITING) - return 0; - if (sig == SIGKILL) - return 1; - if (task_is_stopped_or_traced(p)) - return 0; - return task_curr(p) || !signal_pending(p); -} - -static void -__group_complete_signal(int sig, struct task_struct *p) -{ - struct task_struct *t; - - /* - * Now find a thread we can wake up to take the signal off the queue. - * - * If the main thread wants the signal, it gets first crack. - * Probably the least surprising to the average bear. - */ - if (wants_signal(sig, p)) - t = p; - else if (thread_group_empty(p)) - /* - * There is just one thread and it does not need to be woken. - * It will dequeue unblocked signals before it runs again. - */ - return; - else { - /* - * Otherwise try to find a suitable thread. - */ - t = p->signal->curr_target; - if (t == NULL) - /* restart balancing at this thread */ - t = p->signal->curr_target = p; - - while (!wants_signal(sig, t)) { - t = next_thread(t); - if (t == p->signal->curr_target) - /* - * No thread needs to be woken. - * Any eligible threads will see - * the signal in the queue soon. - */ - return; - } - p->signal->curr_target = t; - } - - /* - * Found a killable thread. If the signal will be fatal, - * then start taking the whole group down immediately. - */ - if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && - !sigismember(&t->real_blocked, sig) && - (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { - /* - * This signal will be fatal to the whole group. - */ - if (!sig_kernel_coredump(sig)) { - /* - * Start a group exit and wake everybody up. - * This way we don't have other threads - * running and doing things after a slower - * thread has the fatal signal pending. - */ - p->signal->flags = SIGNAL_GROUP_EXIT; - p->signal->group_exit_code = sig; - p->signal->group_stop_count = 0; - t = p; - do { - sigaddset(&t->pending.signal, SIGKILL); - signal_wake_up(t, 1); - } while_each_thread(p, t); - return; - } - } - - /* - * The signal is already in the shared-pending queue. - * Tell the chosen thread to wake up and dequeue it. - */ - signal_wake_up(t, sig == SIGKILL); - return; -} - -int -__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) -{ - int ret; - - assert_spin_locked(&p->sighand->siglock); - handle_stop_signal(sig, p); - - /* - * Put this signal on the shared-pending queue, or fail with EAGAIN. - * We always use the shared queue for process-wide signals, - * to avoid several races. - */ - ret = send_signal(sig, info, p, &p->signal->shared_pending); - if (ret <= 0) - return ret; - - __group_complete_signal(sig, p); - return 0; -} - -/* * Nuke all other threads in the group. */ void zap_other_threads(struct task_struct *p) @@ -969,12 +1050,6 @@ void zap_other_threads(struct task_struct *p) } } -int __fatal_signal_pending(struct task_struct *tsk) -{ - return sigismember(&tsk->pending.signal, SIGKILL); -} -EXPORT_SYMBOL(__fatal_signal_pending); - struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) { struct sighand_struct *sighand; @@ -995,20 +1070,16 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long return sighand; } +/* + * send signal info to all the members of a group + * - the caller must hold the RCU read lock at least + */ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { - unsigned long flags; - int ret; + int ret = check_kill_permission(sig, info, p); - ret = check_kill_permission(sig, info, p); - - if (!ret && sig) { - ret = -ESRCH; - if (lock_task_sighand(p, &flags)) { - ret = __group_send_sig_info(sig, info, p); - unlock_task_sighand(p, &flags); - } - } + if (!ret && sig) + ret = do_send_sig_info(sig, info, p, true); return ret; } @@ -1016,8 +1087,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) /* * __kill_pgrp_info() sends a signal to a process group: this is what the tty * control characters do (^C, ^Z etc) + * - the caller must hold at least a readlock on tasklist_lock */ - int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) { struct task_struct *p = NULL; @@ -1039,9 +1110,6 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) struct task_struct *p; rcu_read_lock(); - if (unlikely(sig_needs_tasklist(sig))) - read_lock(&tasklist_lock); - retry: p = pid_task(pid, PIDTYPE_PID); if (p) { @@ -1055,10 +1123,8 @@ retry: */ goto retry; } - - if (unlikely(sig_needs_tasklist(sig))) - read_unlock(&tasklist_lock); rcu_read_unlock(); + return error; } @@ -1078,6 +1144,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, { int ret = -EINVAL; struct task_struct *p; + const struct cred *pcred; if (!valid_signal(sig)) return ret; @@ -1088,9 +1155,11 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, ret = -ESRCH; goto out_unlock; } - if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) - && (euid != p->suid) && (euid != p->uid) - && (uid != p->suid) && (uid != p->uid)) { + pcred = __task_cred(p); + if ((info == SEND_SIG_NOINFO || + (!is_si_special(info) && SI_FROMUSER(info))) && + euid != pcred->suid && euid != pcred->uid && + uid != pcred->suid && uid != pcred->uid) { ret = -EPERM; goto out_unlock; } @@ -1100,7 +1169,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, if (sig && p->sighand) { unsigned long flags; spin_lock_irqsave(&p->sighand->siglock, flags); - ret = __group_send_sig_info(sig, info, p); + ret = __send_signal(sig, info, p, 1, 0); spin_unlock_irqrestore(&p->sighand->siglock, flags); } out_unlock: @@ -1116,7 +1185,7 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); * is probably wrong. Should make it like BSD or SYSV. */ -static int kill_something_info(int sig, struct siginfo *info, int pid) +static int kill_something_info(int sig, struct siginfo *info, pid_t pid) { int ret; @@ -1136,7 +1205,8 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) struct task_struct * p; for_each_process(p) { - if (p->pid > 1 && !same_thread_group(p, current)) { + if (task_pid_vnr(p) > 1 && + !same_thread_group(p, current)) { int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) @@ -1154,16 +1224,9 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) * These are for backward compatibility with the rest of the kernel source. */ -/* - * These two are the most common entry points. They send a signal - * just to the specific thread. - */ int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { - int ret; - unsigned long flags; - /* * Make sure legacy kernel users don't send in bad values * (normal paths check this in check_kill_permission). @@ -1171,18 +1234,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p) if (!valid_signal(sig)) return -EINVAL; - /* - * We need the tasklist lock even for the specific - * thread case (when we don't need to follow the group - * lists) in order to avoid races with "p->sighand" - * going away or changing from under us. - */ - read_lock(&tasklist_lock); - spin_lock_irqsave(&p->sighand->siglock, flags); - ret = specific_send_sig_info(sig, info, p); - spin_unlock_irqrestore(&p->sighand->siglock, flags); - read_unlock(&tasklist_lock); - return ret; + return do_send_sig_info(sig, info, p, false); } #define __si_special(priv) \ @@ -1237,17 +1289,6 @@ int kill_pid(struct pid *pid, int sig, int priv) } EXPORT_SYMBOL(kill_pid); -int -kill_proc(pid_t pid, int sig, int priv) -{ - int ret; - - rcu_read_lock(); - ret = kill_pid_info(sig, __si_special(priv), find_pid(pid)); - rcu_read_unlock(); - return ret; -} - /* * These functions support sending signals using preallocated sigqueue * structures. This is needed "because realtime applications cannot @@ -1274,122 +1315,84 @@ void sigqueue_free(struct sigqueue *q) BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); /* - * If the signal is still pending remove it from the - * pending queue. We must hold ->siglock while testing - * q->list to serialize with collect_signal(). + * We must hold ->siglock while testing q->list + * to serialize with collect_signal() or with + * __exit_signal()->flush_sigqueue(). */ spin_lock_irqsave(lock, flags); + q->flags &= ~SIGQUEUE_PREALLOC; + /* + * If it is queued it will be freed when dequeued, + * like the "regular" sigqueue. + */ if (!list_empty(&q->list)) - list_del_init(&q->list); + q = NULL; spin_unlock_irqrestore(lock, flags); - q->flags &= ~SIGQUEUE_PREALLOC; - __sigqueue_free(q); + if (q) + __sigqueue_free(q); } -static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t, - struct sigpending *pending) +int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) { + int sig = q->info.si_signo; + struct sigpending *pending; + unsigned long flags; + int ret; + + BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); + + ret = -1; + if (!likely(lock_task_sighand(t, &flags))) + goto ret; + + ret = 1; /* the signal is ignored */ + if (!prepare_signal(sig, t, 0)) + goto out; + + ret = 0; if (unlikely(!list_empty(&q->list))) { /* * If an SI_TIMER entry is already queue just increment * the overrun count. */ - BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; - return 0; + goto out; } - - if (sig_ignored(t, sig)) - return 1; + q->info.si_overrun = 0; signalfd_notify(t, sig); + pending = group ? &t->signal->shared_pending : &t->pending; list_add_tail(&q->list, &pending->list); sigaddset(&pending->signal, sig); - return 0; -} - -int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) -{ - unsigned long flags; - int ret = -1; - - BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); - - /* - * The rcu based delayed sighand destroy makes it possible to - * run this without tasklist lock held. The task struct itself - * cannot go away as create_timer did get_task_struct(). - * - * We return -1, when the task is marked exiting, so - * posix_timer_event can redirect it to the group leader - */ - rcu_read_lock(); - - if (!likely(lock_task_sighand(p, &flags))) - goto out_err; - - ret = do_send_sigqueue(sig, q, p, &p->pending); - - if (!sigismember(&p->blocked, sig)) - signal_wake_up(p, sig == SIGKILL); - - unlock_task_sighand(p, &flags); -out_err: - rcu_read_unlock(); - - return ret; -} - -int -send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) -{ - unsigned long flags; - int ret; - - BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); - - read_lock(&tasklist_lock); - /* Since it_lock is held, p->sighand cannot be NULL. */ - spin_lock_irqsave(&p->sighand->siglock, flags); - handle_stop_signal(sig, p); - - ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending); - - __group_complete_signal(sig, p); - - spin_unlock_irqrestore(&p->sighand->siglock, flags); - read_unlock(&tasklist_lock); + complete_signal(sig, t, group); +out: + unlock_task_sighand(t, &flags); +ret: return ret; } /* - * Wake up any threads in the parent blocked in wait* syscalls. - */ -static inline void __wake_up_parent(struct task_struct *p, - struct task_struct *parent) -{ - wake_up_interruptible_sync(&parent->signal->wait_chldexit); -} - -/* * Let a parent know about the death of a child. * For a stopped/continued status change, use do_notify_parent_cldstop instead. + * + * Returns -1 if our parent ignored us and so we've switched to + * self-reaping, or else @sig. */ - -void do_notify_parent(struct task_struct *tsk, int sig) +int do_notify_parent(struct task_struct *tsk, int sig) { struct siginfo info; unsigned long flags; struct sighand_struct *psig; + int ret = sig; BUG_ON(sig == -1); /* do_notify_parent_cldstop should have been called instead. */ BUG_ON(task_is_stopped_or_traced(tsk)); - BUG_ON(!tsk->ptrace && + BUG_ON(!task_ptrace(tsk) && (tsk->group_leader != tsk || !thread_group_empty(tsk))); info.si_signo = sig; @@ -1408,15 +1411,13 @@ void do_notify_parent(struct task_struct *tsk, int sig) */ rcu_read_lock(); info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + info.si_uid = __task_cred(tsk)->uid; rcu_read_unlock(); - info.si_uid = tsk->uid; - - /* FIXME: find out whether or not this is supposed to be c*time. */ - info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, - tsk->signal->utime)); - info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, - tsk->signal->stime)); + info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, + tsk->signal->utime)); + info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, + tsk->signal->stime)); info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) @@ -1430,7 +1431,7 @@ void do_notify_parent(struct task_struct *tsk, int sig) psig = tsk->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); - if (!tsk->ptrace && sig == SIGCHLD && + if (!task_ptrace(tsk) && sig == SIGCHLD && (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { /* @@ -1448,14 +1449,16 @@ void do_notify_parent(struct task_struct *tsk, int sig) * is implementation-defined: we do (if you don't want * it, just use SIG_IGN instead). */ - tsk->exit_signal = -1; + ret = tsk->exit_signal = -1; if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) - sig = 0; + sig = -1; } if (valid_signal(sig) && sig > 0) __group_send_sig_info(sig, &info, tsk->parent); __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); + + return ret; } static void do_notify_parent_cldstop(struct task_struct *tsk, int why) @@ -1465,7 +1468,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) struct task_struct *parent; struct sighand_struct *sighand; - if (tsk->ptrace & PT_PTRACED) + if (task_ptrace(tsk)) parent = tsk->parent; else { tsk = tsk->group_leader; @@ -1478,14 +1481,12 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) * see comment in do_notify_parent() abot the following 3 lines */ rcu_read_lock(); - info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); + info.si_uid = __task_cred(tsk)->uid; rcu_read_unlock(); - info.si_uid = tsk->uid; - - /* FIXME: find out whether or not this is supposed to be c*time. */ - info.si_utime = cputime_to_jiffies(tsk->utime); - info.si_stime = cputime_to_jiffies(tsk->stime); + info.si_utime = cputime_to_clock_t(tsk->utime); + info.si_stime = cputime_to_clock_t(tsk->stime); info.si_code = why; switch (why) { @@ -1516,7 +1517,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) static inline int may_ptrace_stop(void) { - if (!likely(current->ptrace & PT_PTRACED)) + if (!likely(task_ptrace(current))) return 0; /* * Are we in the middle of do_coredump? @@ -1524,10 +1525,10 @@ static inline int may_ptrace_stop(void) * is a deadlock situation, and pointless because our tracer * is dead so don't allow us to stop. * If SIGKILL was already sent before the caller unlocked - * ->siglock we must see ->core_waiters != 0. Otherwise it + * ->siglock we must see ->core_state != NULL. Otherwise it * is safe to enter schedule(). */ - if (unlikely(current->mm->core_waiters) && + if (unlikely(current->mm->core_state) && unlikely(current->mm == current->parent->mm)) return 0; @@ -1540,9 +1541,8 @@ static inline int may_ptrace_stop(void) */ static int sigkill_pending(struct task_struct *tsk) { - return ((sigismember(&tsk->pending.signal, SIGKILL) || - sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) && - !unlikely(sigismember(&tsk->blocked, SIGKILL))); + return sigismember(&tsk->pending.signal, SIGKILL) || + sigismember(&tsk->signal->shared_pending.signal, SIGKILL); } /* @@ -1558,8 +1558,6 @@ static int sigkill_pending(struct task_struct *tsk) */ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) { - int killed = 0; - if (arch_ptrace_stop_needed(exit_code, info)) { /* * The arch code has something special to do before a @@ -1575,7 +1573,8 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) spin_unlock_irq(¤t->sighand->siglock); arch_ptrace_stop(exit_code, info); spin_lock_irq(¤t->sighand->siglock); - killed = sigkill_pending(current); + if (sigkill_pending(current)) + return; } /* @@ -1592,9 +1591,17 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) __set_current_state(TASK_TRACED); spin_unlock_irq(¤t->sighand->siglock); read_lock(&tasklist_lock); - if (!unlikely(killed) && may_ptrace_stop()) { + if (may_ptrace_stop()) { do_notify_parent_cldstop(current, CLD_TRAPPED); + /* + * Don't want to allow preemption here, because + * sys_ptrace() needs this task to be inactive. + * + * XXX: implement read_unlock_no_resched(). + */ + preempt_disable(); read_unlock(&tasklist_lock); + preempt_enable_no_resched(); schedule(); } else { /* @@ -1640,7 +1647,7 @@ void ptrace_notify(int exit_code) info.si_signo = SIGTRAP; info.si_code = exit_code; info.si_pid = task_pid_vnr(current); - info.si_uid = current->uid; + info.si_uid = current_uid(); /* Let the debugger run. */ spin_lock_irq(¤t->sighand->siglock); @@ -1648,29 +1655,6 @@ void ptrace_notify(int exit_code) spin_unlock_irq(¤t->sighand->siglock); } -static void -finish_stop(int stop_count) -{ - /* - * If there are no other threads in the group, or if there is - * a group stop in progress and we are the last to stop, - * report to the parent. When ptraced, every thread reports itself. - */ - if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { - read_lock(&tasklist_lock); - do_notify_parent_cldstop(current, CLD_STOPPED); - read_unlock(&tasklist_lock); - } - - do { - schedule(); - } while (try_to_freeze()); - /* - * Now we don't run again until continued. - */ - current->exit_code = 0; -} - /* * This performs the stopping for SIGSTOP and other stop signals. * We have to stop all threads in the thread group. @@ -1680,15 +1664,9 @@ finish_stop(int stop_count) static int do_signal_stop(int signr) { struct signal_struct *sig = current->signal; - int stop_count; + int notify; - if (sig->group_stop_count > 0) { - /* - * There is a group stop in progress. We don't need to - * start another one. - */ - stop_count = --sig->group_stop_count; - } else { + if (!sig->group_stop_count) { struct task_struct *t; if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || @@ -1700,7 +1678,7 @@ static int do_signal_stop(int signr) */ sig->group_exit_code = signr; - stop_count = 0; + sig->group_stop_count = 1; for (t = next_thread(current); t != current; t = next_thread(t)) /* * Setting state to TASK_STOPPED for a group @@ -1709,26 +1687,51 @@ static int do_signal_stop(int signr) */ if (!(t->flags & PF_EXITING) && !task_is_stopped_or_traced(t)) { - stop_count++; + sig->group_stop_count++; signal_wake_up(t, 0); } - sig->group_stop_count = stop_count; } + /* + * If there are no other threads in the group, or if there is + * a group stop in progress and we are the last to stop, report + * to the parent. When ptraced, every thread reports itself. + */ + notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; + notify = tracehook_notify_jctl(notify, CLD_STOPPED); + /* + * tracehook_notify_jctl() can drop and reacquire siglock, so + * we keep ->group_stop_count != 0 before the call. If SIGCONT + * or SIGKILL comes in between ->group_stop_count == 0. + */ + if (sig->group_stop_count) { + if (!--sig->group_stop_count) + sig->flags = SIGNAL_STOP_STOPPED; + current->exit_code = sig->group_exit_code; + __set_current_state(TASK_STOPPED); + } + spin_unlock_irq(¤t->sighand->siglock); - if (stop_count == 0) - sig->flags = SIGNAL_STOP_STOPPED; - current->exit_code = sig->group_exit_code; - __set_current_state(TASK_STOPPED); + if (notify) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current, notify); + read_unlock(&tasklist_lock); + } + + /* Now we don't run again until woken by SIGCONT or SIGKILL */ + do { + schedule(); + } while (try_to_freeze()); + + tracehook_finish_jctl(); + current->exit_code = 0; - spin_unlock_irq(¤t->sighand->siglock); - finish_stop(stop_count); return 1; } static int ptrace_signal(int signr, siginfo_t *info, struct pt_regs *regs, void *cookie) { - if (!(current->ptrace & PT_PTRACED)) + if (!task_ptrace(current)) return signr; ptrace_signal_deliver(regs, cookie); @@ -1752,7 +1755,7 @@ static int ptrace_signal(int signr, siginfo_t *info, info->si_errno = 0; info->si_code = SI_USER; info->si_pid = task_pid_vnr(current->parent); - info->si_uid = current->parent->uid; + info->si_uid = task_uid(current->parent); } /* If the (new) signal is now blocked, requeue it. */ @@ -1767,8 +1770,9 @@ static int ptrace_signal(int signr, siginfo_t *info, int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie) { - sigset_t *mask = ¤t->blocked; - int signr = 0; + struct sighand_struct *sighand = current->sighand; + struct signal_struct *signal = current->signal; + int signr; relock: /* @@ -1779,39 +1783,62 @@ relock: */ try_to_freeze(); - spin_lock_irq(¤t->sighand->siglock); - - if (unlikely(current->signal->flags & SIGNAL_CLD_MASK)) { - int why = (current->signal->flags & SIGNAL_STOP_CONTINUED) + spin_lock_irq(&sighand->siglock); + /* + * Every stopped thread goes here after wakeup. Check to see if + * we should notify the parent, prepare_signal(SIGCONT) encodes + * the CLD_ si_code into SIGNAL_CLD_MASK bits. + */ + if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { + int why = (signal->flags & SIGNAL_STOP_CONTINUED) ? CLD_CONTINUED : CLD_STOPPED; - current->signal->flags &= ~SIGNAL_CLD_MASK; - spin_unlock_irq(¤t->sighand->siglock); + signal->flags &= ~SIGNAL_CLD_MASK; - read_lock(&tasklist_lock); - do_notify_parent_cldstop(current->group_leader, why); - read_unlock(&tasklist_lock); + why = tracehook_notify_jctl(why, CLD_CONTINUED); + spin_unlock_irq(&sighand->siglock); + + if (why) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current->group_leader, why); + read_unlock(&tasklist_lock); + } goto relock; } for (;;) { struct k_sigaction *ka; - if (unlikely(current->signal->group_stop_count > 0) && + if (unlikely(signal->group_stop_count > 0) && do_signal_stop(0)) goto relock; - signr = dequeue_signal(current, mask, info); - - if (!signr) - break; /* will return 0 */ + /* + * Tracing can induce an artifical signal and choose sigaction. + * The return value in @signr determines the default action, + * but @info->si_signo is the signal number we will report. + */ + signr = tracehook_get_signal(current, regs, info, return_ka); + if (unlikely(signr < 0)) + goto relock; + if (unlikely(signr != 0)) + ka = return_ka; + else { + signr = dequeue_signal(current, ¤t->blocked, + info); - if (signr != SIGKILL) { - signr = ptrace_signal(signr, info, regs, cookie); if (!signr) - continue; + break; /* will return 0 */ + + if (signr != SIGKILL) { + signr = ptrace_signal(signr, info, + regs, cookie); + if (!signr) + continue; + } + + ka = &sighand->action[signr-1]; } - ka = ¤t->sighand->action[signr-1]; if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ continue; if (ka->sa.sa_handler != SIG_DFL) { @@ -1832,8 +1859,16 @@ relock: /* * Global init gets no signals it doesn't want. + * Container-init gets no signals it doesn't want from same + * container. + * + * Note that if global/container-init sees a sig_kernel_only() + * signal here, the signal must have been generated internally + * or must have come from an ancestor namespace. In either + * case, the signal cannot be dropped. */ - if (is_global_init(current)) + if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && + !sig_kernel_only(signr)) continue; if (sig_kernel_stop(signr)) { @@ -1848,17 +1883,17 @@ relock: * We need to check for that and bail out if necessary. */ if (signr != SIGSTOP) { - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irq(&sighand->siglock); /* signals can be posted during this window */ if (is_current_pgrp_orphaned()) goto relock; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irq(&sighand->siglock); } - if (likely(do_signal_stop(signr))) { + if (likely(do_signal_stop(info->si_signo))) { /* It released the siglock. */ goto relock; } @@ -1870,15 +1905,16 @@ relock: continue; } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irq(&sighand->siglock); /* * Anything else is fatal, maybe with a core dump. */ current->flags |= PF_SIGNALED; - if ((signr != SIGKILL) && print_fatal_signals) - print_fatal_signal(regs, signr); + if (sig_kernel_coredump(signr)) { + if (print_fatal_signals) + print_fatal_signal(regs, info->si_signo); /* * If it was able to dump core, this kills all * other threads in the group and synchronizes with @@ -1887,16 +1923,16 @@ relock: * first and our do_group_exit call below will use * that value and ignore the one we pass it. */ - do_coredump((long)signr, signr, regs); + do_coredump(info->si_signo, info->si_signo, regs); } /* * Death signals, no core dump. */ - do_group_exit(signr); + do_group_exit(info->si_signo); /* NOTREACHED */ } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irq(&sighand->siglock); return signr; } @@ -1930,14 +1966,14 @@ void exit_signals(struct task_struct *tsk) if (unlikely(tsk->signal->group_stop_count) && !--tsk->signal->group_stop_count) { tsk->signal->flags = SIGNAL_STOP_STOPPED; - group_stop = 1; + group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); } out: spin_unlock_irq(&tsk->sighand->siglock); if (unlikely(group_stop)) { read_lock(&tasklist_lock); - do_notify_parent_cldstop(tsk, CLD_STOPPED); + do_notify_parent_cldstop(tsk, group_stop); read_unlock(&tasklist_lock); } } @@ -1946,8 +1982,6 @@ EXPORT_SYMBOL(recalc_sigpending); EXPORT_SYMBOL_GPL(dequeue_signal); EXPORT_SYMBOL(flush_signals); EXPORT_SYMBOL(force_sig); -EXPORT_SYMBOL(kill_proc); -EXPORT_SYMBOL(ptrace_notify); EXPORT_SYMBOL(send_sig); EXPORT_SYMBOL(send_sig_info); EXPORT_SYMBOL(sigprocmask); @@ -1959,7 +1993,7 @@ EXPORT_SYMBOL(unblock_all_signals); * System call entry points. */ -asmlinkage long sys_restart_syscall(void) +SYSCALL_DEFINE0(restart_syscall) { struct restart_block *restart = ¤t_thread_info()->restart_block; return restart->fn(restart); @@ -2012,8 +2046,8 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) return error; } -asmlinkage long -sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) +SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, + sigset_t __user *, oset, size_t, sigsetsize) { int error = -EINVAL; sigset_t old_set, new_set; @@ -2072,8 +2106,7 @@ out: return error; } -asmlinkage long -sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) +SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) { return do_sigpending(set, sigsetsize); } @@ -2144,11 +2177,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) #endif -asmlinkage long -sys_rt_sigtimedwait(const sigset_t __user *uthese, - siginfo_t __user *uinfo, - const struct timespec __user *uts, - size_t sigsetsize) +SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, + siginfo_t __user *, uinfo, const struct timespec __user *, uts, + size_t, sigsetsize) { int ret, sig; sigset_t these; @@ -2221,8 +2252,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, return ret; } -asmlinkage long -sys_kill(int pid, int sig) +SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) { struct siginfo info; @@ -2230,44 +2260,54 @@ sys_kill(int pid, int sig) info.si_errno = 0; info.si_code = SI_USER; info.si_pid = task_tgid_vnr(current); - info.si_uid = current->uid; + info.si_uid = current_uid(); return kill_something_info(sig, &info, pid); } -static int do_tkill(int tgid, int pid, int sig) +static int +do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) { - int error; - struct siginfo info; struct task_struct *p; + int error = -ESRCH; - error = -ESRCH; - info.si_signo = sig; - info.si_errno = 0; - info.si_code = SI_TKILL; - info.si_pid = task_tgid_vnr(current); - info.si_uid = current->uid; - - read_lock(&tasklist_lock); + rcu_read_lock(); p = find_task_by_vpid(pid); if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { - error = check_kill_permission(sig, &info, p); + error = check_kill_permission(sig, info, p); /* * The null signal is a permissions and process existence * probe. No signal is actually delivered. */ - if (!error && sig && p->sighand) { - spin_lock_irq(&p->sighand->siglock); - handle_stop_signal(sig, p); - error = specific_send_sig_info(sig, &info, p); - spin_unlock_irq(&p->sighand->siglock); + if (!error && sig) { + error = do_send_sig_info(sig, info, p, false); + /* + * If lock_task_sighand() failed we pretend the task + * dies after receiving the signal. The window is tiny, + * and the signal is private anyway. + */ + if (unlikely(error == -ESRCH)) + error = 0; } } - read_unlock(&tasklist_lock); + rcu_read_unlock(); return error; } +static int do_tkill(pid_t tgid, pid_t pid, int sig) +{ + struct siginfo info; + + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_TKILL; + info.si_pid = task_tgid_vnr(current); + info.si_uid = current_uid(); + + return do_send_specific(tgid, pid, sig, &info); +} + /** * sys_tgkill - send signal to one specific thread * @tgid: the thread group ID of the thread @@ -2278,7 +2318,7 @@ static int do_tkill(int tgid, int pid, int sig) * exists but it's not belonging to the target process anymore. This * method solves the problem of threads exiting and PIDs getting reused. */ -asmlinkage long sys_tgkill(int tgid, int pid, int sig) +SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) { /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) @@ -2290,8 +2330,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig) /* * Send a signal to only one task, even if it's a CLONE_THREAD task. */ -asmlinkage long -sys_tkill(int pid, int sig) +SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) { /* This is only valid for single tasks */ if (pid <= 0) @@ -2300,8 +2339,8 @@ sys_tkill(int pid, int sig) return do_tkill(0, pid, sig); } -asmlinkage long -sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) +SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, + siginfo_t __user *, uinfo) { siginfo_t info; @@ -2318,6 +2357,32 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) return kill_proc_info(sig, &info, pid); } +long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) +{ + /* This is only valid for single tasks */ + if (pid <= 0 || tgid <= 0) + return -EINVAL; + + /* Not even root can pretend to send signals from the kernel. + Nor can they impersonate a kill(), which adds source info. */ + if (info->si_code >= 0) + return -EPERM; + info->si_signo = sig; + + return do_send_specific(tgid, pid, sig, info); +} + +SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, + siginfo_t __user *, uinfo) +{ + siginfo_t info; + + if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) + return -EFAULT; + + return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); +} + int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) { struct task_struct *t = current; @@ -2348,7 +2413,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) * (for example, SIGCHLD), shall cause the pending signal to * be discarded, whether or not it is blocked" */ - if (__sig_ignored(t, sig)) { + if (sig_handler_ignored(sig_handler(t, sig), sig)) { sigemptyset(&mask); sigaddset(&mask, sig); rm_from_queue_full(&mask, &t->signal->shared_pending); @@ -2369,11 +2434,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s stack_t oss; int error; - if (uoss) { - oss.ss_sp = (void __user *) current->sas_ss_sp; - oss.ss_size = current->sas_ss_size; - oss.ss_flags = sas_ss_flags(sp); - } + oss.ss_sp = (void __user *) current->sas_ss_sp; + oss.ss_size = current->sas_ss_size; + oss.ss_flags = sas_ss_flags(sp); if (uss) { void __user *ss_sp; @@ -2381,10 +2444,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s int ss_flags; error = -EFAULT; - if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) - || __get_user(ss_sp, &uss->ss_sp) - || __get_user(ss_flags, &uss->ss_flags) - || __get_user(ss_size, &uss->ss_size)) + if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) + goto out; + error = __get_user(ss_sp, &uss->ss_sp) | + __get_user(ss_flags, &uss->ss_flags) | + __get_user(ss_size, &uss->ss_size); + if (error) goto out; error = -EPERM; @@ -2416,21 +2481,23 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s current->sas_ss_size = ss_size; } + error = 0; if (uoss) { error = -EFAULT; - if (copy_to_user(uoss, &oss, sizeof(oss))) + if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) goto out; + error = __put_user(oss.ss_sp, &uoss->ss_sp) | + __put_user(oss.ss_size, &uoss->ss_size) | + __put_user(oss.ss_flags, &uoss->ss_flags); } - error = 0; out: return error; } #ifdef __ARCH_WANT_SYS_SIGPENDING -asmlinkage long -sys_sigpending(old_sigset_t __user *set) +SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) { return do_sigpending(set, sizeof(*set)); } @@ -2441,8 +2508,8 @@ sys_sigpending(old_sigset_t __user *set) /* Some platforms have their own version with special arguments others support only sys_rt_sigprocmask. */ -asmlinkage long -sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) +SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, + old_sigset_t __user *, oset) { int error; old_sigset_t old_set, new_set; @@ -2492,11 +2559,10 @@ out: #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ #ifdef __ARCH_WANT_SYS_RT_SIGACTION -asmlinkage long -sys_rt_sigaction(int sig, - const struct sigaction __user *act, - struct sigaction __user *oact, - size_t sigsetsize) +SYSCALL_DEFINE4(rt_sigaction, int, sig, + const struct sigaction __user *, act, + struct sigaction __user *, oact, + size_t, sigsetsize) { struct k_sigaction new_sa, old_sa; int ret = -EINVAL; @@ -2526,15 +2592,13 @@ out: /* * For backwards compatibility. Functionality superseded by sigprocmask. */ -asmlinkage long -sys_sgetmask(void) +SYSCALL_DEFINE0(sgetmask) { /* SMP safe */ return current->blocked.sig[0]; } -asmlinkage long -sys_ssetmask(int newmask) +SYSCALL_DEFINE1(ssetmask, int, newmask) { int old; @@ -2554,8 +2618,7 @@ sys_ssetmask(int newmask) /* * For backwards compatibility. Functionality superseded by sigaction. */ -asmlinkage unsigned long -sys_signal(int sig, __sighandler_t handler) +SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) { struct k_sigaction new_sa, old_sa; int ret; @@ -2572,8 +2635,7 @@ sys_signal(int sig, __sighandler_t handler) #ifdef __ARCH_WANT_SYS_PAUSE -asmlinkage long -sys_pause(void) +SYSCALL_DEFINE0(pause) { current->state = TASK_INTERRUPTIBLE; schedule(); @@ -2583,7 +2645,7 @@ sys_pause(void) #endif #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND -asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) +SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) { sigset_t newset; @@ -2603,7 +2665,7 @@ asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) current->state = TASK_INTERRUPTIBLE; schedule(); - set_thread_flag(TIF_RESTORE_SIGMASK); + set_restore_sigmask(); return -ERESTARTNOHAND; } #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */