X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fsignal.c;h=8072e568bbe0c41e68cffc8efa291488b4dcf0e7;hb=da7071d7e32d15149cc513f096a3638097b66387;hp=c5b65aa4c2bccfdcc82f919422f4b7589c2015f9;hpb=a9e88e84b5245da0a1dadb6ccca70ae84e93ccf6;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/signal.c b/kernel/signal.c index c5b65aa..8072e56 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -10,7 +10,6 @@ * to allow signals to be sent reliably. */ -#include #include #include #include @@ -22,20 +21,23 @@ #include #include #include -#include #include -#include #include +#include +#include +#include + #include #include #include #include +#include "audit.h" /* audit_signal_info() */ /* * SLAB caches for signal bits. */ -static kmem_cache_t *sigqueue_cachep; +static struct kmem_cache *sigqueue_cachep; /* * In POSIX a signal is sent either to a specific thread (Linux task) @@ -147,8 +149,7 @@ static kmem_cache_t *sigqueue_cachep; #define sig_kernel_stop(sig) \ (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) -#define sig_needs_tasklist(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK | M(SIGCONT))) +#define sig_needs_tasklist(sig) ((sig) == SIGCONT) #define sig_user_defined(t, signr) \ (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ @@ -270,18 +271,25 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, int override_rlimit) { struct sigqueue *q = NULL; + struct user_struct *user; - atomic_inc(&t->user->sigpending); + /* + * In order to avoid problems with "switch_user()", we want to make + * sure that the compiler doesn't re-load "t->user" + */ + user = t->user; + barrier(); + atomic_inc(&user->sigpending); if (override_rlimit || - atomic_read(&t->user->sigpending) <= + atomic_read(&user->sigpending) <= t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) q = kmem_cache_alloc(sigqueue_cachep, flags); if (unlikely(q == NULL)) { - atomic_dec(&t->user->sigpending); + atomic_dec(&user->sigpending); } else { INIT_LIST_HEAD(&q->list); q->flags = 0; - q->user = get_uid(t->user); + q->user = get_uid(user); } return(q); } @@ -295,7 +303,7 @@ static void __sigqueue_free(struct sigqueue *q) kmem_cache_free(sigqueue_cachep, q); } -static void flush_sigqueue(struct sigpending *queue) +void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; @@ -310,9 +318,7 @@ static void flush_sigqueue(struct sigpending *queue) /* * Flush all pending signals for a task. */ - -void -flush_signals(struct task_struct *t) +void flush_signals(struct task_struct *t) { unsigned long flags; @@ -324,109 +330,6 @@ flush_signals(struct task_struct *t) } /* - * This function expects the tasklist_lock write-locked. - */ -void __exit_sighand(struct task_struct *tsk) -{ - struct sighand_struct * sighand = tsk->sighand; - - /* Ok, we're done with the signal handlers */ - tsk->sighand = NULL; - if (atomic_dec_and_test(&sighand->count)) - kmem_cache_free(sighand_cachep, sighand); -} - -void exit_sighand(struct task_struct *tsk) -{ - write_lock_irq(&tasklist_lock); - rcu_read_lock(); - if (tsk->sighand != NULL) { - struct sighand_struct *sighand = rcu_dereference(tsk->sighand); - spin_lock(&sighand->siglock); - __exit_sighand(tsk); - spin_unlock(&sighand->siglock); - } - rcu_read_unlock(); - write_unlock_irq(&tasklist_lock); -} - -/* - * This function expects the tasklist_lock write-locked. - */ -void __exit_signal(struct task_struct *tsk) -{ - struct signal_struct * sig = tsk->signal; - struct sighand_struct * sighand; - - if (!sig) - BUG(); - if (!atomic_read(&sig->count)) - BUG(); - rcu_read_lock(); - sighand = rcu_dereference(tsk->sighand); - spin_lock(&sighand->siglock); - posix_cpu_timers_exit(tsk); - if (atomic_dec_and_test(&sig->count)) { - posix_cpu_timers_exit_group(tsk); - tsk->signal = NULL; - __exit_sighand(tsk); - spin_unlock(&sighand->siglock); - flush_sigqueue(&sig->shared_pending); - } else { - /* - * If there is any task waiting for the group exit - * then notify it: - */ - if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { - wake_up_process(sig->group_exit_task); - sig->group_exit_task = NULL; - } - if (tsk == sig->curr_target) - sig->curr_target = next_thread(tsk); - tsk->signal = NULL; - /* - * Accumulate here the counters for all threads but the - * group leader as they die, so they can be added into - * the process-wide totals when those are taken. - * The group leader stays around as a zombie as long - * as there are other threads. When it gets reaped, - * the exit.c code will add its counts into these totals. - * We won't ever get here for the group leader, since it - * will have been the last reference on the signal_struct. - */ - sig->utime = cputime_add(sig->utime, tsk->utime); - sig->stime = cputime_add(sig->stime, tsk->stime); - sig->min_flt += tsk->min_flt; - sig->maj_flt += tsk->maj_flt; - sig->nvcsw += tsk->nvcsw; - sig->nivcsw += tsk->nivcsw; - sig->sched_time += tsk->sched_time; - __exit_sighand(tsk); - spin_unlock(&sighand->siglock); - sig = NULL; /* Marker for below. */ - } - rcu_read_unlock(); - clear_tsk_thread_flag(tsk,TIF_SIGPENDING); - flush_sigqueue(&tsk->pending); - if (sig) { - /* - * We are cleaning up the signal_struct here. - */ - exit_thread_group_keys(sig); - kmem_cache_free(signal_cachep, sig); - } -} - -void exit_signal(struct task_struct *tsk) -{ - atomic_dec(&tsk->signal->live); - - write_lock_irq(&tasklist_lock); - __exit_signal(tsk); - write_unlock_irq(&tasklist_lock); -} - -/* * Flush all handlers for a task. */ @@ -525,9 +428,8 @@ static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info) { - int sig = 0; + int sig = next_signal(pending, mask); - sig = next_signal(pending, mask); if (sig) { if (current->notifier) { if (sigismember(current->notifier_mask, sig)) { @@ -540,9 +442,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, if (!collect_signal(sig, pending, info)) sig = 0; - } - recalc_sigpending(); return sig; } @@ -559,6 +459,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) if (!signr) signr = __dequeue_signal(&tsk->signal->shared_pending, mask, info); + recalc_sigpending_tsk(tsk); if (signr && unlikely(sig_kernel_stop(signr))) { /* * Set a marker that we have dequeued a stop signal. Our @@ -685,22 +586,20 @@ static int check_kill_permission(int sig, struct siginfo *info, error = -EPERM; if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) && ((sig != SIGCONT) || - (current->signal->session != t->signal->session)) + (process_session(current) != process_session(t))) && (current->euid ^ t->suid) && (current->euid ^ t->uid) && (current->uid ^ t->suid) && (current->uid ^ t->uid) && !capable(CAP_KILL)) return error; - error = security_task_kill(t, info, sig); + error = security_task_kill(t, info, sig, 0); if (!error) audit_signal_info(sig, t); /* Let audit system see the signal */ return error; } /* forward decl */ -static void do_notify_parent_cldstop(struct task_struct *tsk, - int to_self, - int why); +static void do_notify_parent_cldstop(struct task_struct *tsk, int why); /* * Handle magic process-wide effects of stop/continue signals. @@ -750,7 +649,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) p->signal->group_stop_count = 0; p->signal->flags = SIGNAL_STOP_CONTINUED; spin_unlock(&p->sighand->siglock); - do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED); + do_notify_parent_cldstop(p, CLD_STOPPED); spin_lock(&p->sighand->siglock); } rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); @@ -791,7 +690,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) p->signal->flags = SIGNAL_STOP_CONTINUED; p->signal->group_exit_code = 0; spin_unlock(&p->sighand->siglock); - do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED); + do_notify_parent_cldstop(p, CLD_CONTINUED); spin_lock(&p->sighand->siglock); } else { /* @@ -878,8 +777,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { int ret = 0; - if (!irqs_disabled()) - BUG(); + BUG_ON(!irqs_disabled()); assert_spin_locked(&t->sighand->siglock); /* Short-circuit ignored signals. */ @@ -902,22 +800,31 @@ out: /* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. + * + * Note: If we unblock the signal, we always reset it to SIG_DFL, + * since we do not want to have a signal handler that was blocked + * be invoked when user space had explicitly blocked it. + * + * We don't want to have recursive SIGSEGV's etc, for example. */ - int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) { unsigned long int flags; - int ret; + int ret, blocked, ignored; + struct k_sigaction *action; spin_lock_irqsave(&t->sighand->siglock, flags); - if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) { - t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; - } - if (sigismember(&t->blocked, sig)) { - sigdelset(&t->blocked, sig); + action = &t->sighand->action[sig-1]; + ignored = action->sa.sa_handler == SIG_IGN; + blocked = sigismember(&t->blocked, sig); + if (blocked || ignored) { + action->sa.sa_handler = SIG_DFL; + if (blocked) { + sigdelset(&t->blocked, sig); + recalc_sigpending_tsk(t); + } } - recalc_sigpending_tsk(t); ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sighand->siglock, flags); @@ -978,7 +885,6 @@ __group_complete_signal(int sig, struct task_struct *p) if (t == NULL) /* restart balancing at this thread */ t = p->signal->curr_target = p; - BUG_ON(t->tgid != p->tgid); while (!wants_signal(sig, t)) { t = next_thread(t); @@ -1160,65 +1066,68 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) } /* - * kill_pg_info() sends a signal to a process group: this is what the tty + * kill_pgrp_info() sends a signal to a process group: this is what the tty * control characters do (^C, ^Z etc) */ -int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) +int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) { struct task_struct *p = NULL; int retval, success; - if (pgrp <= 0) - return -EINVAL; - success = 0; retval = -ESRCH; - do_each_task_pid(pgrp, PIDTYPE_PGID, p) { + do_each_pid_task(pgrp, PIDTYPE_PGID, p) { int err = group_send_sig_info(sig, info, p); success |= !err; retval = err; - } while_each_task_pid(pgrp, PIDTYPE_PGID, p); + } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return success ? 0 : retval; } -int -kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) +int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) { int retval; read_lock(&tasklist_lock); - retval = __kill_pg_info(sig, info, pgrp); + retval = __kill_pgrp_info(sig, info, pgrp); read_unlock(&tasklist_lock); return retval; } -int -kill_proc_info(int sig, struct siginfo *info, pid_t pid) +int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) { int error; - int acquired_tasklist_lock = 0; struct task_struct *p; rcu_read_lock(); - if (unlikely(sig_needs_tasklist(sig))) { + if (unlikely(sig_needs_tasklist(sig))) read_lock(&tasklist_lock); - acquired_tasklist_lock = 1; - } - p = find_task_by_pid(pid); + + p = pid_task(pid, PIDTYPE_PID); error = -ESRCH; if (p) error = group_send_sig_info(sig, info, p); - if (unlikely(acquired_tasklist_lock)) + + if (unlikely(sig_needs_tasklist(sig))) read_unlock(&tasklist_lock); rcu_read_unlock(); return error; } -/* like kill_proc_info(), but doesn't use uid/euid of "current" */ -int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, - uid_t uid, uid_t euid) +static int kill_proc_info(int sig, struct siginfo *info, pid_t pid) +{ + int error; + rcu_read_lock(); + error = kill_pid_info(sig, info, find_pid(pid)); + rcu_read_unlock(); + return error; +} + +/* like kill_pid_info(), but doesn't use uid/euid of "current" */ +int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, + uid_t uid, uid_t euid, u32 secid) { int ret = -EINVAL; struct task_struct *p; @@ -1227,7 +1136,7 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, return ret; read_lock(&tasklist_lock); - p = find_task_by_pid(pid); + p = pid_task(pid, PIDTYPE_PID); if (!p) { ret = -ESRCH; goto out_unlock; @@ -1238,6 +1147,9 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, ret = -EPERM; goto out_unlock; } + ret = security_task_kill(p, info, sig, secid); + if (ret) + goto out_unlock; if (sig && p->sighand) { unsigned long flags; spin_lock_irqsave(&p->sighand->siglock, flags); @@ -1248,7 +1160,7 @@ out_unlock: read_unlock(&tasklist_lock); return ret; } -EXPORT_SYMBOL_GPL(kill_proc_info_as_uid); +EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); /* * kill_something_info() interprets pid in interesting ways just like kill(2). @@ -1259,8 +1171,10 @@ EXPORT_SYMBOL_GPL(kill_proc_info_as_uid); static int kill_something_info(int sig, struct siginfo *info, int pid) { + int ret; + rcu_read_lock(); if (!pid) { - return kill_pg_info(sig, info, process_group(current)); + ret = kill_pgrp_info(sig, info, task_pgrp(current)); } else if (pid == -1) { int retval = 0, count = 0; struct task_struct * p; @@ -1275,12 +1189,14 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) } } read_unlock(&tasklist_lock); - return count ? retval : -ESRCH; + ret = count ? retval : -ESRCH; } else if (pid < 0) { - return kill_pg_info(sig, info, -pid); + ret = kill_pgrp_info(sig, info, find_pid(-pid)); } else { - return kill_proc_info(sig, info, pid); + ret = kill_pid_info(sig, info, find_pid(pid)); } + rcu_read_unlock(); + return ret; } /* @@ -1366,11 +1282,17 @@ force_sigsegv(int sig, struct task_struct *p) return 0; } -int -kill_pg(pid_t pgrp, int sig, int priv) +int kill_pgrp(struct pid *pid, int sig, int priv) { - return kill_pg_info(sig, __si_special(priv), pgrp); + return kill_pgrp_info(sig, __si_special(priv), pid); } +EXPORT_SYMBOL(kill_pgrp); + +int kill_pid(struct pid *pid, int sig, int priv) +{ + return kill_pid_info(sig, __si_special(priv), pid); +} +EXPORT_SYMBOL(kill_pid); int kill_proc(pid_t pid, int sig, int priv) @@ -1418,12 +1340,10 @@ void sigqueue_free(struct sigqueue *q) __sigqueue_free(q); } -int -send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) +int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) { unsigned long flags; int ret = 0; - struct sighand_struct *sh; BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); @@ -1437,48 +1357,17 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) */ rcu_read_lock(); - if (unlikely(p->flags & PF_EXITING)) { + if (!likely(lock_task_sighand(p, &flags))) { ret = -1; goto out_err; } -retry: - sh = rcu_dereference(p->sighand); - - spin_lock_irqsave(&sh->siglock, flags); - if (p->sighand != sh) { - /* We raced with exec() in a multithreaded process... */ - spin_unlock_irqrestore(&sh->siglock, flags); - goto retry; - } - - /* - * We do the check here again to handle the following scenario: - * - * CPU 0 CPU 1 - * send_sigqueue - * check PF_EXITING - * interrupt exit code running - * __exit_signal - * lock sighand->siglock - * unlock sighand->siglock - * lock sh->siglock - * add(tsk->pending) flush_sigqueue(tsk->pending) - * - */ - - if (unlikely(p->flags & PF_EXITING)) { - ret = -1; - goto out; - } - if (unlikely(!list_empty(&q->list))) { /* * If an SI_TIMER entry is already queue just increment * the overrun count. */ - if (q->info.si_code != SI_TIMER) - BUG(); + BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; goto out; } @@ -1494,7 +1383,7 @@ retry: signal_wake_up(p, sig == SIGKILL); out: - spin_unlock_irqrestore(&sh->siglock, flags); + unlock_task_sighand(p, &flags); out_err: rcu_read_unlock(); @@ -1526,8 +1415,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) * the overrun count. Other uses should not try to * send the signal multiple times. */ - if (q->info.si_code != SI_TIMER) - BUG(); + BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; goto out; } @@ -1626,14 +1514,14 @@ void do_notify_parent(struct task_struct *tsk, int sig) spin_unlock_irqrestore(&psig->siglock, flags); } -static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why) +static void do_notify_parent_cldstop(struct task_struct *tsk, int why) { struct siginfo info; unsigned long flags; struct task_struct *parent; struct sighand_struct *sighand; - if (to_self) + if (tsk->ptrace & PT_PTRACED) parent = tsk->parent; else { tsk = tsk->group_leader; @@ -1676,6 +1564,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int w spin_unlock_irqrestore(&sighand->siglock, flags); } +static inline int may_ptrace_stop(void) +{ + if (!likely(current->ptrace & PT_PTRACED)) + return 0; + + if (unlikely(current->parent == current->real_parent && + (current->ptrace & PT_ATTACHED))) + return 0; + + if (unlikely(current->signal == current->parent->signal) && + unlikely(current->signal->flags & SIGNAL_GROUP_EXIT)) + return 0; + + /* + * Are we in the middle of do_coredump? + * If so and our tracer is also part of the coredump stopping + * is a deadlock situation, and pointless because our tracer + * is dead so don't allow us to stop. + * If SIGKILL was already sent before the caller unlocked + * ->siglock we must see ->core_waiters != 0. Otherwise it + * is safe to enter schedule(). + */ + if (unlikely(current->mm->core_waiters) && + unlikely(current->mm == current->parent->mm)) + return 0; + + return 1; +} + /* * This must be called with current->sighand->siglock held. * @@ -1702,13 +1619,10 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) /* Let the debugger run. */ set_current_state(TASK_TRACED); spin_unlock_irq(¤t->sighand->siglock); + try_to_freeze(); read_lock(&tasklist_lock); - if (likely(current->ptrace & PT_PTRACED) && - likely(current->parent != current->real_parent || - !(current->ptrace & PT_ATTACHED)) && - (likely(current->parent->signal != current->signal) || - !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { - do_notify_parent_cldstop(current, 1, CLD_TRAPPED); + if (may_ptrace_stop()) { + do_notify_parent_cldstop(current, CLD_TRAPPED); read_unlock(&tasklist_lock); schedule(); } else { @@ -1757,26 +1671,20 @@ void ptrace_notify(int exit_code) static void finish_stop(int stop_count) { - int to_self; - /* * If there are no other threads in the group, or if there is * a group stop in progress and we are the last to stop, * report to the parent. When ptraced, every thread reports itself. */ - if (stop_count < 0 || (current->ptrace & PT_PTRACED)) - to_self = 1; - else if (stop_count == 0) - to_self = 0; - else - goto out; - - read_lock(&tasklist_lock); - do_notify_parent_cldstop(current, to_self, CLD_STOPPED); - read_unlock(&tasklist_lock); + if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current, CLD_STOPPED); + read_unlock(&tasklist_lock); + } -out: - schedule(); + do { + schedule(); + } while (try_to_freeze()); /* * Now we don't run again until continued. */ @@ -1789,12 +1697,10 @@ out: * Returns nonzero if we've actually stopped and released the siglock. * Returns zero if we didn't stop and still hold the siglock. */ -static int -do_signal_stop(int signr) +static int do_signal_stop(int signr) { struct signal_struct *sig = current->signal; - struct sighand_struct *sighand = current->sighand; - int stop_count = -1; + int stop_count; if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) return 0; @@ -1804,86 +1710,37 @@ do_signal_stop(int signr) * There is a group stop in progress. We don't need to * start another one. */ - signr = sig->group_exit_code; stop_count = --sig->group_stop_count; - current->exit_code = signr; - set_current_state(TASK_STOPPED); - if (stop_count == 0) - sig->flags = SIGNAL_STOP_STOPPED; - spin_unlock_irq(&sighand->siglock); - } - else if (thread_group_empty(current)) { - /* - * Lock must be held through transition to stopped state. - */ - current->exit_code = current->signal->group_exit_code = signr; - set_current_state(TASK_STOPPED); - sig->flags = SIGNAL_STOP_STOPPED; - spin_unlock_irq(&sighand->siglock); - } - else { + } else { /* * There is no group stop already in progress. - * We must initiate one now, but that requires - * dropping siglock to get both the tasklist lock - * and siglock again in the proper order. Note that - * this allows an intervening SIGCONT to be posted. - * We need to check for that and bail out if necessary. + * We must initiate one now. */ struct task_struct *t; - spin_unlock_irq(&sighand->siglock); + sig->group_exit_code = signr; - /* signals can be posted during this window */ - - read_lock(&tasklist_lock); - spin_lock_irq(&sighand->siglock); - - if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) { + stop_count = 0; + for (t = next_thread(current); t != current; t = next_thread(t)) /* - * Another stop or continue happened while we - * didn't have the lock. We can just swallow this - * signal now. If we raced with a SIGCONT, that - * should have just cleared it now. If we raced - * with another processor delivering a stop signal, - * then the SIGCONT that wakes us up should clear it. + * Setting state to TASK_STOPPED for a group + * stop is always done with the siglock held, + * so this check has no races. */ - read_unlock(&tasklist_lock); - return 0; - } - - if (sig->group_stop_count == 0) { - sig->group_exit_code = signr; - stop_count = 0; - for (t = next_thread(current); t != current; - t = next_thread(t)) - /* - * Setting state to TASK_STOPPED for a group - * stop is always done with the siglock held, - * so this check has no races. - */ - if (!t->exit_state && - !(t->state & (TASK_STOPPED|TASK_TRACED))) { - stop_count++; - signal_wake_up(t, 0); - } - sig->group_stop_count = stop_count; - } - else { - /* A race with another thread while unlocked. */ - signr = sig->group_exit_code; - stop_count = --sig->group_stop_count; - } - - current->exit_code = signr; - set_current_state(TASK_STOPPED); - if (stop_count == 0) - sig->flags = SIGNAL_STOP_STOPPED; - - spin_unlock_irq(&sighand->siglock); - read_unlock(&tasklist_lock); + if (!t->exit_state && + !(t->state & (TASK_STOPPED|TASK_TRACED))) { + stop_count++; + signal_wake_up(t, 0); + } + sig->group_stop_count = stop_count; } + if (stop_count == 0) + sig->flags = SIGNAL_STOP_STOPPED; + current->exit_code = sig->group_exit_code; + __set_current_state(TASK_STOPPED); + + spin_unlock_irq(¤t->sighand->siglock); finish_stop(stop_count); return 1; } @@ -1957,9 +1814,9 @@ relock: /* Let the debugger run. */ ptrace_stop(signr, signr, info); - /* We're back. Did the debugger cancel the sig or group_exit? */ + /* We're back. Did the debugger cancel the sig? */ signr = current->exit_code; - if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT) + if (signr == 0) continue; current->exit_code = 0; @@ -2002,8 +1859,12 @@ relock: if (sig_kernel_ignore(signr)) /* Default is nothing. */ continue; - /* Init gets no signals it doesn't want. */ - if (current == child_reaper) + /* + * Init of a pid space gets no signals it doesn't want from + * within that pid space. It can of course get signals from + * its parent pid space. + */ + if (current == child_reaper(current)) continue; if (sig_kernel_stop(signr)) { @@ -2022,7 +1883,7 @@ relock: /* signals can be posted during this window */ - if (is_orphaned_pgrp(process_group(current))) + if (is_current_pgrp_orphaned()) goto relock; spin_lock_irq(¤t->sighand->siglock); @@ -2072,7 +1933,6 @@ EXPORT_SYMBOL(recalc_sigpending); EXPORT_SYMBOL_GPL(dequeue_signal); EXPORT_SYMBOL(flush_signals); EXPORT_SYMBOL(force_sig); -EXPORT_SYMBOL(kill_pg); EXPORT_SYMBOL(kill_proc); EXPORT_SYMBOL(ptrace_notify); EXPORT_SYMBOL(send_sig); @@ -2399,7 +2259,7 @@ static int do_tkill(int tgid, int pid, int sig) * @pid: the PID of the thread * @sig: signal to be sent * - * This syscall also checks the tgid and returns -ESRCH even if the PID + * This syscall also checks the @tgid and returns -ESRCH even if the PID * exists but it's not belonging to the target process anymore. This * method solves the problem of threads exiting and PIDs getting reused. */ @@ -2443,8 +2303,7 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) return kill_proc_info(sig, &info, pid); } -int -do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) +int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) { struct k_sigaction *k; sigset_t mask; @@ -2470,6 +2329,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) if (act) { sigdelsetmask(&act->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); + *k = *act; /* * POSIX 3.3.1.3: * "Setting a signal action to SIG_IGN for a signal that is @@ -2482,19 +2342,8 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) * be discarded, whether or not it is blocked" */ if (act->sa.sa_handler == SIG_IGN || - (act->sa.sa_handler == SIG_DFL && - sig_kernel_ignore(sig))) { - /* - * This is a fairly rare case, so we only take the - * tasklist_lock once we're sure we'll need it. - * Now we must do this little unlock and relock - * dance to maintain the lock hierarchy. - */ + (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { struct task_struct *t = current; - spin_unlock_irq(&t->sighand->siglock); - read_lock(&tasklist_lock); - spin_lock_irq(&t->sighand->siglock); - *k = *act; sigemptyset(&mask); sigaddset(&mask, sig); rm_from_queue_full(&mask, &t->signal->shared_pending); @@ -2503,12 +2352,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) recalc_sigpending_tsk(t); t = next_thread(t); } while (t != current); - spin_unlock_irq(¤t->sighand->siglock); - read_unlock(&tasklist_lock); - return 0; } - - *k = *act; } spin_unlock_irq(¤t->sighand->siglock); @@ -2760,6 +2604,11 @@ asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) } #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ +__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) +{ + return NULL; +} + void __init signals_init(void) { sigqueue_cachep =