* to allow signals to be sent reliably.
*/
-#include <linux/config.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/smp_lock.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
-#include <linux/posix-timers.h>
#include <linux/signal.h>
-#include <linux/audit.h>
#include <linux/capability.h>
+#include <linux/freezer.h>
+#include <linux/pid_namespace.h>
+#include <linux/nsproxy.h>
+
#include <asm/param.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/siginfo.h>
+#include "audit.h" /* audit_signal_info() */
/*
* SLAB caches for signal bits.
*/
-static kmem_cache_t *sigqueue_cachep;
+static struct kmem_cache *sigqueue_cachep;
/*
* In POSIX a signal is sent either to a specific thread (Linux task)
#define sig_kernel_stop(sig) \
(((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
+#define sig_needs_tasklist(sig) ((sig) == SIGCONT)
+
#define sig_user_defined(t, signr) \
(((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
int override_rlimit)
{
struct sigqueue *q = NULL;
+ struct user_struct *user;
- atomic_inc(&t->user->sigpending);
+ /*
+ * In order to avoid problems with "switch_user()", we want to make
+ * sure that the compiler doesn't re-load "t->user"
+ */
+ user = t->user;
+ barrier();
+ atomic_inc(&user->sigpending);
if (override_rlimit ||
- atomic_read(&t->user->sigpending) <=
+ atomic_read(&user->sigpending) <=
t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
q = kmem_cache_alloc(sigqueue_cachep, flags);
if (unlikely(q == NULL)) {
- atomic_dec(&t->user->sigpending);
+ atomic_dec(&user->sigpending);
} else {
INIT_LIST_HEAD(&q->list);
q->flags = 0;
- q->user = get_uid(t->user);
+ q->user = get_uid(user);
}
return(q);
}
kmem_cache_free(sigqueue_cachep, q);
}
-static void flush_sigqueue(struct sigpending *queue)
+void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
/*
* Flush all pending signals for a task.
*/
-
-void
-flush_signals(struct task_struct *t)
+void flush_signals(struct task_struct *t)
{
unsigned long flags;
}
/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_sighand(struct task_struct *tsk)
-{
- struct sighand_struct * sighand = tsk->sighand;
-
- /* Ok, we're done with the signal handlers */
- tsk->sighand = NULL;
- if (atomic_dec_and_test(&sighand->count))
- kmem_cache_free(sighand_cachep, sighand);
-}
-
-void exit_sighand(struct task_struct *tsk)
-{
- write_lock_irq(&tasklist_lock);
- rcu_read_lock();
- if (tsk->sighand != NULL) {
- struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
- spin_lock(&sighand->siglock);
- __exit_sighand(tsk);
- spin_unlock(&sighand->siglock);
- }
- rcu_read_unlock();
- write_unlock_irq(&tasklist_lock);
-}
-
-/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_signal(struct task_struct *tsk)
-{
- struct signal_struct * sig = tsk->signal;
- struct sighand_struct * sighand;
-
- if (!sig)
- BUG();
- if (!atomic_read(&sig->count))
- BUG();
- rcu_read_lock();
- sighand = rcu_dereference(tsk->sighand);
- spin_lock(&sighand->siglock);
- posix_cpu_timers_exit(tsk);
- if (atomic_dec_and_test(&sig->count)) {
- posix_cpu_timers_exit_group(tsk);
- tsk->signal = NULL;
- __exit_sighand(tsk);
- spin_unlock(&sighand->siglock);
- flush_sigqueue(&sig->shared_pending);
- } else {
- /*
- * If there is any task waiting for the group exit
- * then notify it:
- */
- if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
- wake_up_process(sig->group_exit_task);
- sig->group_exit_task = NULL;
- }
- if (tsk == sig->curr_target)
- sig->curr_target = next_thread(tsk);
- tsk->signal = NULL;
- /*
- * Accumulate here the counters for all threads but the
- * group leader as they die, so they can be added into
- * the process-wide totals when those are taken.
- * The group leader stays around as a zombie as long
- * as there are other threads. When it gets reaped,
- * the exit.c code will add its counts into these totals.
- * We won't ever get here for the group leader, since it
- * will have been the last reference on the signal_struct.
- */
- sig->utime = cputime_add(sig->utime, tsk->utime);
- sig->stime = cputime_add(sig->stime, tsk->stime);
- sig->min_flt += tsk->min_flt;
- sig->maj_flt += tsk->maj_flt;
- sig->nvcsw += tsk->nvcsw;
- sig->nivcsw += tsk->nivcsw;
- sig->sched_time += tsk->sched_time;
- __exit_sighand(tsk);
- spin_unlock(&sighand->siglock);
- sig = NULL; /* Marker for below. */
- }
- rcu_read_unlock();
- clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
- flush_sigqueue(&tsk->pending);
- if (sig) {
- /*
- * We are cleaning up the signal_struct here.
- */
- exit_thread_group_keys(sig);
- kmem_cache_free(signal_cachep, sig);
- }
-}
-
-void exit_signal(struct task_struct *tsk)
-{
- atomic_dec(&tsk->signal->live);
-
- write_lock_irq(&tasklist_lock);
- __exit_signal(tsk);
- write_unlock_irq(&tasklist_lock);
-}
-
-/*
* Flush all handlers for a task.
*/
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
siginfo_t *info)
{
- int sig = 0;
+ int sig = next_signal(pending, mask);
- sig = next_signal(pending, mask);
if (sig) {
if (current->notifier) {
if (sigismember(current->notifier_mask, sig)) {
if (!collect_signal(sig, pending, info))
sig = 0;
-
}
- recalc_sigpending();
return sig;
}
if (!signr)
signr = __dequeue_signal(&tsk->signal->shared_pending,
mask, info);
+ recalc_sigpending_tsk(tsk);
if (signr && unlikely(sig_kernel_stop(signr))) {
/*
* Set a marker that we have dequeued a stop signal. Our
error = -EPERM;
if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
&& ((sig != SIGCONT) ||
- (current->signal->session != t->signal->session))
+ (process_session(current) != process_session(t)))
&& (current->euid ^ t->suid) && (current->euid ^ t->uid)
&& (current->uid ^ t->suid) && (current->uid ^ t->uid)
&& !capable(CAP_KILL))
return error;
- error = security_task_kill(t, info, sig);
+ error = security_task_kill(t, info, sig, 0);
if (!error)
audit_signal_info(sig, t); /* Let audit system see the signal */
return error;
}
/* forward decl */
-static void do_notify_parent_cldstop(struct task_struct *tsk,
- int to_self,
- int why);
+static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
/*
* Handle magic process-wide effects of stop/continue signals.
p->signal->group_stop_count = 0;
p->signal->flags = SIGNAL_STOP_CONTINUED;
spin_unlock(&p->sighand->siglock);
- do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
+ do_notify_parent_cldstop(p, CLD_STOPPED);
spin_lock(&p->sighand->siglock);
}
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
p->signal->flags = SIGNAL_STOP_CONTINUED;
p->signal->group_exit_code = 0;
spin_unlock(&p->sighand->siglock);
- do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
+ do_notify_parent_cldstop(p, CLD_CONTINUED);
spin_lock(&p->sighand->siglock);
} else {
/*
{
int ret = 0;
- if (!irqs_disabled())
- BUG();
+ BUG_ON(!irqs_disabled());
assert_spin_locked(&t->sighand->siglock);
/* Short-circuit ignored signals. */
/*
* Force a signal that the process can't ignore: if necessary
* we unblock the signal and change any SIG_IGN to SIG_DFL.
+ *
+ * Note: If we unblock the signal, we always reset it to SIG_DFL,
+ * since we do not want to have a signal handler that was blocked
+ * be invoked when user space had explicitly blocked it.
+ *
+ * We don't want to have recursive SIGSEGV's etc, for example.
*/
-
int
force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
unsigned long int flags;
- int ret;
+ int ret, blocked, ignored;
+ struct k_sigaction *action;
spin_lock_irqsave(&t->sighand->siglock, flags);
- if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
- t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
- }
- if (sigismember(&t->blocked, sig)) {
- sigdelset(&t->blocked, sig);
+ action = &t->sighand->action[sig-1];
+ ignored = action->sa.sa_handler == SIG_IGN;
+ blocked = sigismember(&t->blocked, sig);
+ if (blocked || ignored) {
+ action->sa.sa_handler = SIG_DFL;
+ if (blocked) {
+ sigdelset(&t->blocked, sig);
+ recalc_sigpending_tsk(t);
+ }
}
- recalc_sigpending_tsk(t);
ret = specific_send_sig_info(sig, info, t);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
if (t == NULL)
/* restart balancing at this thread */
t = p->signal->curr_target = p;
- BUG_ON(t->tgid != p->tgid);
while (!wants_signal(sig, t)) {
t = next_thread(t);
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
+struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+{
+ struct sighand_struct *sighand;
+
+ for (;;) {
+ sighand = rcu_dereference(tsk->sighand);
+ if (unlikely(sighand == NULL))
+ break;
+
+ spin_lock_irqsave(&sighand->siglock, *flags);
+ if (likely(sighand == tsk->sighand))
+ break;
+ spin_unlock_irqrestore(&sighand->siglock, *flags);
+ }
+
+ return sighand;
+}
+
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
unsigned long flags;
- struct sighand_struct *sp;
int ret;
-retry:
ret = check_kill_permission(sig, info, p);
- if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
- spin_lock_irqsave(&sp->siglock, flags);
- if (p->sighand != sp) {
- spin_unlock_irqrestore(&sp->siglock, flags);
- goto retry;
- }
- if ((atomic_read(&sp->count) == 0) ||
- (atomic_read(&p->usage) == 0)) {
- spin_unlock_irqrestore(&sp->siglock, flags);
- return -ESRCH;
+
+ if (!ret && sig) {
+ ret = -ESRCH;
+ if (lock_task_sighand(p, &flags)) {
+ ret = __group_send_sig_info(sig, info, p);
+ unlock_task_sighand(p, &flags);
}
- ret = __group_send_sig_info(sig, info, p);
- spin_unlock_irqrestore(&sp->siglock, flags);
}
return ret;
}
/*
- * kill_pg_info() sends a signal to a process group: this is what the tty
+ * kill_pgrp_info() sends a signal to a process group: this is what the tty
* control characters do (^C, ^Z etc)
*/
-int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
+int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
{
struct task_struct *p = NULL;
int retval, success;
- if (pgrp <= 0)
- return -EINVAL;
-
success = 0;
retval = -ESRCH;
- do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
+ do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
int err = group_send_sig_info(sig, info, p);
success |= !err;
retval = err;
- } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
+ } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return success ? 0 : retval;
}
+int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
+{
+ int retval;
+
+ read_lock(&tasklist_lock);
+ retval = __kill_pgrp_info(sig, info, pgrp);
+ read_unlock(&tasklist_lock);
+
+ return retval;
+}
+
+int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
+{
+ if (pgrp <= 0)
+ return -EINVAL;
+
+ return __kill_pgrp_info(sig, info, find_pid(pgrp));
+}
+
int
kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
{
return retval;
}
-int
-kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
{
int error;
int acquired_tasklist_lock = 0;
struct task_struct *p;
rcu_read_lock();
- if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
+ if (unlikely(sig_needs_tasklist(sig))) {
read_lock(&tasklist_lock);
acquired_tasklist_lock = 1;
}
- p = find_task_by_pid(pid);
+ p = pid_task(pid, PIDTYPE_PID);
error = -ESRCH;
if (p)
error = group_send_sig_info(sig, info, p);
return error;
}
-/* like kill_proc_info(), but doesn't use uid/euid of "current" */
-int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
- uid_t uid, uid_t euid)
+static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+{
+ int error;
+ rcu_read_lock();
+ error = kill_pid_info(sig, info, find_pid(pid));
+ rcu_read_unlock();
+ return error;
+}
+
+/* like kill_pid_info(), but doesn't use uid/euid of "current" */
+int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
+ uid_t uid, uid_t euid, u32 secid)
{
int ret = -EINVAL;
struct task_struct *p;
return ret;
read_lock(&tasklist_lock);
- p = find_task_by_pid(pid);
+ p = pid_task(pid, PIDTYPE_PID);
if (!p) {
ret = -ESRCH;
goto out_unlock;
ret = -EPERM;
goto out_unlock;
}
+ ret = security_task_kill(p, info, sig, secid);
+ if (ret)
+ goto out_unlock;
if (sig && p->sighand) {
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
read_unlock(&tasklist_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
+EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
return 0;
}
+int kill_pgrp(struct pid *pid, int sig, int priv)
+{
+ return kill_pgrp_info(sig, __si_special(priv), pid);
+}
+EXPORT_SYMBOL(kill_pgrp);
+
+int kill_pid(struct pid *pid, int sig, int priv)
+{
+ return kill_pid_info(sig, __si_special(priv), pid);
+}
+EXPORT_SYMBOL(kill_pid);
+
int
kill_pg(pid_t pgrp, int sig, int priv)
{
__sigqueue_free(q);
}
-int
-send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
+int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
{
unsigned long flags;
int ret = 0;
- struct sighand_struct *sh;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
*/
rcu_read_lock();
- if (unlikely(p->flags & PF_EXITING)) {
+ if (!likely(lock_task_sighand(p, &flags))) {
ret = -1;
goto out_err;
}
-retry:
- sh = rcu_dereference(p->sighand);
-
- spin_lock_irqsave(&sh->siglock, flags);
- if (p->sighand != sh) {
- /* We raced with exec() in a multithreaded process... */
- spin_unlock_irqrestore(&sh->siglock, flags);
- goto retry;
- }
-
- /*
- * We do the check here again to handle the following scenario:
- *
- * CPU 0 CPU 1
- * send_sigqueue
- * check PF_EXITING
- * interrupt exit code running
- * __exit_signal
- * lock sighand->siglock
- * unlock sighand->siglock
- * lock sh->siglock
- * add(tsk->pending) flush_sigqueue(tsk->pending)
- *
- */
-
- if (unlikely(p->flags & PF_EXITING)) {
- ret = -1;
- goto out;
- }
-
if (unlikely(!list_empty(&q->list))) {
/*
* If an SI_TIMER entry is already queue just increment
* the overrun count.
*/
- if (q->info.si_code != SI_TIMER)
- BUG();
+ BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
goto out;
}
signal_wake_up(p, sig == SIGKILL);
out:
- spin_unlock_irqrestore(&sh->siglock, flags);
+ unlock_task_sighand(p, &flags);
out_err:
rcu_read_unlock();
* the overrun count. Other uses should not try to
* send the signal multiple times.
*/
- if (q->info.si_code != SI_TIMER)
- BUG();
+ BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
goto out;
}
spin_unlock_irqrestore(&psig->siglock, flags);
}
-static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
+static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
{
struct siginfo info;
unsigned long flags;
struct task_struct *parent;
struct sighand_struct *sighand;
- if (to_self)
+ if (tsk->ptrace & PT_PTRACED)
parent = tsk->parent;
else {
tsk = tsk->group_leader;
spin_unlock_irqrestore(&sighand->siglock, flags);
}
+static inline int may_ptrace_stop(void)
+{
+ if (!likely(current->ptrace & PT_PTRACED))
+ return 0;
+
+ if (unlikely(current->parent == current->real_parent &&
+ (current->ptrace & PT_ATTACHED)))
+ return 0;
+
+ if (unlikely(current->signal == current->parent->signal) &&
+ unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
+ return 0;
+
+ /*
+ * Are we in the middle of do_coredump?
+ * If so and our tracer is also part of the coredump stopping
+ * is a deadlock situation, and pointless because our tracer
+ * is dead so don't allow us to stop.
+ * If SIGKILL was already sent before the caller unlocked
+ * ->siglock we must see ->core_waiters != 0. Otherwise it
+ * is safe to enter schedule().
+ */
+ if (unlikely(current->mm->core_waiters) &&
+ unlikely(current->mm == current->parent->mm))
+ return 0;
+
+ return 1;
+}
+
/*
* This must be called with current->sighand->siglock held.
*
/* Let the debugger run. */
set_current_state(TASK_TRACED);
spin_unlock_irq(¤t->sighand->siglock);
+ try_to_freeze();
read_lock(&tasklist_lock);
- if (likely(current->ptrace & PT_PTRACED) &&
- likely(current->parent != current->real_parent ||
- !(current->ptrace & PT_ATTACHED)) &&
- (likely(current->parent->signal != current->signal) ||
- !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
- do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
+ if (may_ptrace_stop()) {
+ do_notify_parent_cldstop(current, CLD_TRAPPED);
read_unlock(&tasklist_lock);
schedule();
} else {
static void
finish_stop(int stop_count)
{
- int to_self;
-
/*
* If there are no other threads in the group, or if there is
* a group stop in progress and we are the last to stop,
* report to the parent. When ptraced, every thread reports itself.
*/
- if (stop_count < 0 || (current->ptrace & PT_PTRACED))
- to_self = 1;
- else if (stop_count == 0)
- to_self = 0;
- else
- goto out;
-
- read_lock(&tasklist_lock);
- do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
- read_unlock(&tasklist_lock);
+ if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
+ read_lock(&tasklist_lock);
+ do_notify_parent_cldstop(current, CLD_STOPPED);
+ read_unlock(&tasklist_lock);
+ }
-out:
- schedule();
+ do {
+ schedule();
+ } while (try_to_freeze());
/*
* Now we don't run again until continued.
*/
* Returns nonzero if we've actually stopped and released the siglock.
* Returns zero if we didn't stop and still hold the siglock.
*/
-static int
-do_signal_stop(int signr)
+static int do_signal_stop(int signr)
{
struct signal_struct *sig = current->signal;
- struct sighand_struct *sighand = current->sighand;
- int stop_count = -1;
+ int stop_count;
if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
return 0;
* There is a group stop in progress. We don't need to
* start another one.
*/
- signr = sig->group_exit_code;
stop_count = --sig->group_stop_count;
- current->exit_code = signr;
- set_current_state(TASK_STOPPED);
- if (stop_count == 0)
- sig->flags = SIGNAL_STOP_STOPPED;
- spin_unlock_irq(&sighand->siglock);
- }
- else if (thread_group_empty(current)) {
- /*
- * Lock must be held through transition to stopped state.
- */
- current->exit_code = current->signal->group_exit_code = signr;
- set_current_state(TASK_STOPPED);
- sig->flags = SIGNAL_STOP_STOPPED;
- spin_unlock_irq(&sighand->siglock);
- }
- else {
+ } else {
/*
* There is no group stop already in progress.
- * We must initiate one now, but that requires
- * dropping siglock to get both the tasklist lock
- * and siglock again in the proper order. Note that
- * this allows an intervening SIGCONT to be posted.
- * We need to check for that and bail out if necessary.
+ * We must initiate one now.
*/
struct task_struct *t;
- spin_unlock_irq(&sighand->siglock);
-
- /* signals can be posted during this window */
-
- read_lock(&tasklist_lock);
- spin_lock_irq(&sighand->siglock);
+ sig->group_exit_code = signr;
- if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
+ stop_count = 0;
+ for (t = next_thread(current); t != current; t = next_thread(t))
/*
- * Another stop or continue happened while we
- * didn't have the lock. We can just swallow this
- * signal now. If we raced with a SIGCONT, that
- * should have just cleared it now. If we raced
- * with another processor delivering a stop signal,
- * then the SIGCONT that wakes us up should clear it.
+ * Setting state to TASK_STOPPED for a group
+ * stop is always done with the siglock held,
+ * so this check has no races.
*/
- read_unlock(&tasklist_lock);
- return 0;
- }
-
- if (sig->group_stop_count == 0) {
- sig->group_exit_code = signr;
- stop_count = 0;
- for (t = next_thread(current); t != current;
- t = next_thread(t))
- /*
- * Setting state to TASK_STOPPED for a group
- * stop is always done with the siglock held,
- * so this check has no races.
- */
- if (!t->exit_state &&
- !(t->state & (TASK_STOPPED|TASK_TRACED))) {
- stop_count++;
- signal_wake_up(t, 0);
- }
- sig->group_stop_count = stop_count;
- }
- else {
- /* A race with another thread while unlocked. */
- signr = sig->group_exit_code;
- stop_count = --sig->group_stop_count;
- }
-
- current->exit_code = signr;
- set_current_state(TASK_STOPPED);
- if (stop_count == 0)
- sig->flags = SIGNAL_STOP_STOPPED;
-
- spin_unlock_irq(&sighand->siglock);
- read_unlock(&tasklist_lock);
+ if (!t->exit_state &&
+ !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+ stop_count++;
+ signal_wake_up(t, 0);
+ }
+ sig->group_stop_count = stop_count;
}
+ if (stop_count == 0)
+ sig->flags = SIGNAL_STOP_STOPPED;
+ current->exit_code = sig->group_exit_code;
+ __set_current_state(TASK_STOPPED);
+
+ spin_unlock_irq(¤t->sighand->siglock);
finish_stop(stop_count);
return 1;
}
/* Let the debugger run. */
ptrace_stop(signr, signr, info);
- /* We're back. Did the debugger cancel the sig or group_exit? */
+ /* We're back. Did the debugger cancel the sig? */
signr = current->exit_code;
- if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
+ if (signr == 0)
continue;
current->exit_code = 0;
if (sig_kernel_ignore(signr)) /* Default is nothing. */
continue;
- /* Init gets no signals it doesn't want. */
- if (current == child_reaper)
+ /*
+ * Init of a pid space gets no signals it doesn't want from
+ * within that pid space. It can of course get signals from
+ * its parent pid space.
+ */
+ if (current == child_reaper(current))
continue;
if (sig_kernel_stop(signr)) {
return kill_proc_info(sig, &info, pid);
}
-int
-do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
+int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
{
struct k_sigaction *k;
sigset_t mask;
if (act) {
sigdelsetmask(&act->sa.sa_mask,
sigmask(SIGKILL) | sigmask(SIGSTOP));
+ *k = *act;
/*
* POSIX 3.3.1.3:
* "Setting a signal action to SIG_IGN for a signal that is
* be discarded, whether or not it is blocked"
*/
if (act->sa.sa_handler == SIG_IGN ||
- (act->sa.sa_handler == SIG_DFL &&
- sig_kernel_ignore(sig))) {
- /*
- * This is a fairly rare case, so we only take the
- * tasklist_lock once we're sure we'll need it.
- * Now we must do this little unlock and relock
- * dance to maintain the lock hierarchy.
- */
+ (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
struct task_struct *t = current;
- spin_unlock_irq(&t->sighand->siglock);
- read_lock(&tasklist_lock);
- spin_lock_irq(&t->sighand->siglock);
- *k = *act;
sigemptyset(&mask);
sigaddset(&mask, sig);
rm_from_queue_full(&mask, &t->signal->shared_pending);
recalc_sigpending_tsk(t);
t = next_thread(t);
} while (t != current);
- spin_unlock_irq(¤t->sighand->siglock);
- read_unlock(&tasklist_lock);
- return 0;
}
-
- *k = *act;
}
spin_unlock_irq(¤t->sighand->siglock);
}
#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
+__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
void __init signals_init(void)
{
sigqueue_cachep =