hrtimers: fix inconsistent lock state on resume in hres_timers_resume
[safe/jmp/linux-2.6] / kernel / signal.c
index 9b6fda5..3152ac3 100644 (file)
  *             to allow signals to be sent reliably.
  */
 
-#include <linux/config.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/ptrace.h>
-#include <linux/posix-timers.h>
 #include <linux/signal.h>
-#include <linux/audit.h>
+#include <linux/signalfd.h>
+#include <linux/tracehook.h>
+#include <linux/capability.h>
+#include <linux/freezer.h>
+#include <linux/pid_namespace.h>
+#include <linux/nsproxy.h>
+#include <trace/sched.h>
+
 #include <asm/param.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/siginfo.h>
+#include "audit.h"     /* audit_signal_info() */
 
 /*
  * SLAB caches for signal bits.
  */
 
-static kmem_cache_t *sigqueue_cachep;
-
-/*
- * In POSIX a signal is sent either to a specific thread (Linux task)
- * or to the process as a whole (Linux thread group).  How the signal
- * is sent determines whether it's to one thread or the whole group,
- * which determines which signal mask(s) are involved in blocking it
- * from being delivered until later.  When the signal is delivered,
- * either it's caught or ignored by a user handler or it has a default
- * effect that applies to the whole thread group (POSIX process).
- *
- * The possible effects an unblocked signal set to SIG_DFL can have are:
- *   ignore    - Nothing Happens
- *   terminate - kill the process, i.e. all threads in the group,
- *               similar to exit_group.  The group leader (only) reports
- *               WIFSIGNALED status to its parent.
- *   coredump  - write a core dump file describing all threads using
- *               the same mm and then kill all those threads
- *   stop      - stop all the threads in the group, i.e. TASK_STOPPED state
- *
- * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
- * Other signals when not blocked and set to SIG_DFL behaves as follows.
- * The job control signals also have other special effects.
- *
- *     +--------------------+------------------+
- *     |  POSIX signal      |  default action  |
- *     +--------------------+------------------+
- *     |  SIGHUP            |  terminate       |
- *     |  SIGINT            |  terminate       |
- *     |  SIGQUIT           |  coredump        |
- *     |  SIGILL            |  coredump        |
- *     |  SIGTRAP           |  coredump        |
- *     |  SIGABRT/SIGIOT    |  coredump        |
- *     |  SIGBUS            |  coredump        |
- *     |  SIGFPE            |  coredump        |
- *     |  SIGKILL           |  terminate(+)    |
- *     |  SIGUSR1           |  terminate       |
- *     |  SIGSEGV           |  coredump        |
- *     |  SIGUSR2           |  terminate       |
- *     |  SIGPIPE           |  terminate       |
- *     |  SIGALRM           |  terminate       |
- *     |  SIGTERM           |  terminate       |
- *     |  SIGCHLD           |  ignore          |
- *     |  SIGCONT           |  ignore(*)       |
- *     |  SIGSTOP           |  stop(*)(+)      |
- *     |  SIGTSTP           |  stop(*)         |
- *     |  SIGTTIN           |  stop(*)         |
- *     |  SIGTTOU           |  stop(*)         |
- *     |  SIGURG            |  ignore          |
- *     |  SIGXCPU           |  coredump        |
- *     |  SIGXFSZ           |  coredump        |
- *     |  SIGVTALRM         |  terminate       |
- *     |  SIGPROF           |  terminate       |
- *     |  SIGPOLL/SIGIO     |  terminate       |
- *     |  SIGSYS/SIGUNUSED  |  coredump        |
- *     |  SIGSTKFLT         |  terminate       |
- *     |  SIGWINCH          |  ignore          |
- *     |  SIGPWR            |  terminate       |
- *     |  SIGRTMIN-SIGRTMAX |  terminate       |
- *     +--------------------+------------------+
- *     |  non-POSIX signal  |  default action  |
- *     +--------------------+------------------+
- *     |  SIGEMT            |  coredump        |
- *     +--------------------+------------------+
- *
- * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
- * (*) Special job control effects:
- * When SIGCONT is sent, it resumes the process (all threads in the group)
- * from TASK_STOPPED state and also clears any pending/queued stop signals
- * (any of those marked with "stop(*)").  This happens regardless of blocking,
- * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
- * any pending/queued SIGCONT signals; this happens regardless of blocking,
- * catching, or ignored the stop signal, though (except for SIGSTOP) the
- * default action of stopping the process may happen later or never.
- */
-
-#ifdef SIGEMT
-#define M_SIGEMT       M(SIGEMT)
-#else
-#define M_SIGEMT       0
-#endif
-
-#if SIGRTMIN > BITS_PER_LONG
-#define M(sig) (1ULL << ((sig)-1))
-#else
-#define M(sig) (1UL << ((sig)-1))
-#endif
-#define T(sig, mask) (M(sig) & (mask))
-
-#define SIG_KERNEL_ONLY_MASK (\
-       M(SIGKILL)   |  M(SIGSTOP)                                   )
-
-#define SIG_KERNEL_STOP_MASK (\
-       M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
-
-#define SIG_KERNEL_COREDUMP_MASK (\
-        M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
-        M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
-        M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
+static struct kmem_cache *sigqueue_cachep;
 
-#define SIG_KERNEL_IGNORE_MASK (\
-        M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
+DEFINE_TRACE(sched_signal_send);
 
-#define sig_kernel_only(sig) \
-               (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
-#define sig_kernel_coredump(sig) \
-               (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
-#define sig_kernel_ignore(sig) \
-               (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
-#define sig_kernel_stop(sig) \
-               (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
-
-#define sig_user_defined(t, signr) \
-       (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
-        ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
+static void __user *sig_handler(struct task_struct *t, int sig)
+{
+       return t->sighand->action[sig - 1].sa.sa_handler;
+}
 
-#define sig_fatal(t, signr) \
-       (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
-        (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
+static int sig_handler_ignored(void __user *handler, int sig)
+{
+       /* Is it explicitly or implicitly ignored? */
+       return handler == SIG_IGN ||
+               (handler == SIG_DFL && sig_kernel_ignore(sig));
+}
 
 static int sig_ignored(struct task_struct *t, int sig)
 {
-       void __user * handler;
-
-       /*
-        * Tracers always want to know about signals..
-        */
-       if (t->ptrace & PT_PTRACED)
-               return 0;
+       void __user *handler;
 
        /*
         * Blocked signals are never ignored, since the
         * signal handler may change by the time it is
         * unblocked.
         */
-       if (sigismember(&t->blocked, sig))
+       if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
                return 0;
 
-       /* Is it explicitly or implicitly ignored? */
-       handler = t->sighand->action[sig-1].sa.sa_handler;
-       return   handler == SIG_IGN ||
-               (handler == SIG_DFL && sig_kernel_ignore(sig));
+       handler = sig_handler(t, sig);
+       if (!sig_handler_ignored(handler, sig))
+               return 0;
+
+       /*
+        * Tracers may want to know about even ignored signals.
+        */
+       return !tracehook_consider_ignored_signal(t, sig, handler);
 }
 
 /*
@@ -210,26 +109,44 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 
 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 
-fastcall void recalc_sigpending_tsk(struct task_struct *t)
+static int recalc_sigpending_tsk(struct task_struct *t)
 {
        if (t->signal->group_stop_count > 0 ||
-           (freezing(t)) ||
            PENDING(&t->pending, &t->blocked) ||
-           PENDING(&t->signal->shared_pending, &t->blocked))
+           PENDING(&t->signal->shared_pending, &t->blocked)) {
                set_tsk_thread_flag(t, TIF_SIGPENDING);
-       else
-               clear_tsk_thread_flag(t, TIF_SIGPENDING);
+               return 1;
+       }
+       /*
+        * We must never clear the flag in another thread, or in current
+        * when it's possible the current syscall is returning -ERESTART*.
+        * So we don't clear it here, and only callers who know they should do.
+        */
+       return 0;
+}
+
+/*
+ * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
+ * This is superfluous when called on current, the wakeup is a harmless no-op.
+ */
+void recalc_sigpending_and_wake(struct task_struct *t)
+{
+       if (recalc_sigpending_tsk(t))
+               signal_wake_up(t, 0);
 }
 
 void recalc_sigpending(void)
 {
-       recalc_sigpending_tsk(current);
+       if (unlikely(tracehook_force_sigpending()))
+               set_thread_flag(TIF_SIGPENDING);
+       else if (!recalc_sigpending_tsk(current) && !freezing(current))
+               clear_thread_flag(TIF_SIGPENDING);
+
 }
 
 /* Given the mask, find the first available signal that should be serviced. */
 
-static int
-next_signal(struct sigpending *pending, sigset_t *mask)
+int next_signal(struct sigpending *pending, sigset_t *mask)
 {
        unsigned long i, *s, *m, x;
        int sig = 0;
@@ -262,27 +179,42 @@ next_signal(struct sigpending *pending, sigset_t *mask)
        return sig;
 }
 
+/*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ *   appopriate lock must be held to stop the target task from exiting
+ */
 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
                                         int override_rlimit)
 {
        struct sigqueue *q = NULL;
+       struct user_struct *user;
 
-       atomic_inc(&t->user->sigpending);
+       /*
+        * We won't get problems with the target's UID changing under us
+        * because changing it requires RCU be used, and if t != current, the
+        * caller must be holding the RCU readlock (by way of a spinlock) and
+        * we use RCU protection here
+        */
+       user = get_uid(__task_cred(t)->user);
+       atomic_inc(&user->sigpending);
        if (override_rlimit ||
-           atomic_read(&t->user->sigpending) <=
+           atomic_read(&user->sigpending) <=
                        t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
                q = kmem_cache_alloc(sigqueue_cachep, flags);
        if (unlikely(q == NULL)) {
-               atomic_dec(&t->user->sigpending);
+               atomic_dec(&user->sigpending);
+               free_uid(user);
        } else {
                INIT_LIST_HEAD(&q->list);
                q->flags = 0;
-               q->user = get_uid(t->user);
+               q->user = user;
        }
-       return(q);
+
+       return q;
 }
 
-static inline void __sigqueue_free(struct sigqueue *q)
+static void __sigqueue_free(struct sigqueue *q)
 {
        if (q->flags & SIGQUEUE_PREALLOC)
                return;
@@ -291,7 +223,7 @@ static inline void __sigqueue_free(struct sigqueue *q)
        kmem_cache_free(sigqueue_cachep, q);
 }
 
-static void flush_sigqueue(struct sigpending *queue)
+void flush_sigqueue(struct sigpending *queue)
 {
        struct sigqueue *q;
 
@@ -306,122 +238,59 @@ static void flush_sigqueue(struct sigpending *queue)
 /*
  * Flush all pending signals for a task.
  */
-
-void
-flush_signals(struct task_struct *t)
+void flush_signals(struct task_struct *t)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&t->sighand->siglock, flags);
-       clear_tsk_thread_flag(t,TIF_SIGPENDING);
+       clear_tsk_thread_flag(t, TIF_SIGPENDING);
        flush_sigqueue(&t->pending);
        flush_sigqueue(&t->signal->shared_pending);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
 }
 
-/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_sighand(struct task_struct *tsk)
+static void __flush_itimer_signals(struct sigpending *pending)
 {
-       struct sighand_struct * sighand = tsk->sighand;
+       sigset_t signal, retain;
+       struct sigqueue *q, *n;
 
-       /* Ok, we're done with the signal handlers */
-       tsk->sighand = NULL;
-       if (atomic_dec_and_test(&sighand->count))
-               sighand_free(sighand);
-}
+       signal = pending->signal;
+       sigemptyset(&retain);
 
-void exit_sighand(struct task_struct *tsk)
-{
-       write_lock_irq(&tasklist_lock);
-       rcu_read_lock();
-       if (tsk->sighand != NULL) {
-               struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
-               spin_lock(&sighand->siglock);
-               __exit_sighand(tsk);
-               spin_unlock(&sighand->siglock);
+       list_for_each_entry_safe(q, n, &pending->list, list) {
+               int sig = q->info.si_signo;
+
+               if (likely(q->info.si_code != SI_TIMER)) {
+                       sigaddset(&retain, sig);
+               } else {
+                       sigdelset(&signal, sig);
+                       list_del_init(&q->list);
+                       __sigqueue_free(q);
+               }
        }
-       rcu_read_unlock();
-       write_unlock_irq(&tasklist_lock);
+
+       sigorsets(&pending->signal, &signal, &retain);
 }
 
-/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_signal(struct task_struct *tsk)
+void flush_itimer_signals(void)
 {
-       struct signal_struct * sig = tsk->signal;
-       struct sighand_struct * sighand;
+       struct task_struct *tsk = current;
+       unsigned long flags;
 
-       if (!sig)
-               BUG();
-       if (!atomic_read(&sig->count))
-               BUG();
-       rcu_read_lock();
-       sighand = rcu_dereference(tsk->sighand);
-       spin_lock(&sighand->siglock);
-       posix_cpu_timers_exit(tsk);
-       if (atomic_dec_and_test(&sig->count)) {
-               posix_cpu_timers_exit_group(tsk);
-               if (tsk == sig->curr_target)
-                       sig->curr_target = next_thread(tsk);
-               tsk->signal = NULL;
-               __exit_sighand(tsk);
-               spin_unlock(&sighand->siglock);
-               flush_sigqueue(&sig->shared_pending);
-       } else {
-               /*
-                * If there is any task waiting for the group exit
-                * then notify it:
-                */
-               if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
-                       wake_up_process(sig->group_exit_task);
-                       sig->group_exit_task = NULL;
-               }
-               if (tsk == sig->curr_target)
-                       sig->curr_target = next_thread(tsk);
-               tsk->signal = NULL;
-               /*
-                * Accumulate here the counters for all threads but the
-                * group leader as they die, so they can be added into
-                * the process-wide totals when those are taken.
-                * The group leader stays around as a zombie as long
-                * as there are other threads.  When it gets reaped,
-                * the exit.c code will add its counts into these totals.
-                * We won't ever get here for the group leader, since it
-                * will have been the last reference on the signal_struct.
-                */
-               sig->utime = cputime_add(sig->utime, tsk->utime);
-               sig->stime = cputime_add(sig->stime, tsk->stime);
-               sig->min_flt += tsk->min_flt;
-               sig->maj_flt += tsk->maj_flt;
-               sig->nvcsw += tsk->nvcsw;
-               sig->nivcsw += tsk->nivcsw;
-               sig->sched_time += tsk->sched_time;
-               __exit_sighand(tsk);
-               spin_unlock(&sighand->siglock);
-               sig = NULL;     /* Marker for below.  */
-       }
-       rcu_read_unlock();
-       clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
-       flush_sigqueue(&tsk->pending);
-       if (sig) {
-               /*
-                * We are cleaning up the signal_struct here.
-                */
-               exit_thread_group_keys(sig);
-               kmem_cache_free(signal_cachep, sig);
-       }
+       spin_lock_irqsave(&tsk->sighand->siglock, flags);
+       __flush_itimer_signals(&tsk->pending);
+       __flush_itimer_signals(&tsk->signal->shared_pending);
+       spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 }
 
-void exit_signal(struct task_struct *tsk)
+void ignore_signals(struct task_struct *t)
 {
-       atomic_dec(&tsk->signal->live);
+       int i;
 
-       write_lock_irq(&tasklist_lock);
-       __exit_signal(tsk);
-       write_unlock_irq(&tasklist_lock);
+       for (i = 0; i < _NSIG; ++i)
+               t->sighand->action[i].sa.sa_handler = SIG_IGN;
+
+       flush_signals(t);
 }
 
 /*
@@ -442,6 +311,16 @@ flush_signal_handlers(struct task_struct *t, int force_default)
        }
 }
 
+int unhandled_signal(struct task_struct *tsk, int sig)
+{
+       void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
+       if (is_global_init(tsk))
+               return 1;
+       if (handler != SIG_IGN && handler != SIG_DFL)
+               return 0;
+       return !tracehook_consider_fatal_signal(tsk, sig, handler);
+}
+
 
 /* Notify the system that a driver wants to block all signals for this
  * process, and wants to be notified if any signals at all were to be
@@ -477,13 +356,9 @@ unblock_all_signals(void)
        spin_unlock_irqrestore(&current->sighand->siglock, flags);
 }
 
-static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 {
        struct sigqueue *q, *first = NULL;
-       int still_pending = 0;
-
-       if (unlikely(!sigismember(&list->signal, sig)))
-               return 0;
 
        /*
         * Collect the siginfo appropriate to this signal.  Check if
@@ -491,41 +366,37 @@ static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *in
        */
        list_for_each_entry(q, &list->list, list) {
                if (q->info.si_signo == sig) {
-                       if (first) {
-                               still_pending = 1;
-                               break;
-                       }
+                       if (first)
+                               goto still_pending;
                        first = q;
                }
        }
+
+       sigdelset(&list->signal, sig);
+
        if (first) {
+still_pending:
                list_del_init(&first->list);
                copy_siginfo(info, &first->info);
                __sigqueue_free(first);
-               if (!still_pending)
-                       sigdelset(&list->signal, sig);
        } else {
-
                /* Ok, it wasn't in the queue.  This must be
                   a fast-pathed signal or we must have been
                   out of queue space.  So zero out the info.
                 */
-               sigdelset(&list->signal, sig);
                info->si_signo = sig;
                info->si_errno = 0;
                info->si_code = 0;
                info->si_pid = 0;
                info->si_uid = 0;
        }
-       return 1;
 }
 
 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
                        siginfo_t *info)
 {
-       int sig = 0;
+       int sig = next_signal(pending, mask);
 
-       sig = next_signal(pending, mask);
        if (sig) {
                if (current->notifier) {
                        if (sigismember(current->notifier_mask, sig)) {
@@ -536,11 +407,8 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
                        }
                }
 
-               if (!collect_signal(sig, pending, info))
-                       sig = 0;
-                               
+               collect_signal(sig, pending, info);
        }
-       recalc_sigpending();
 
        return sig;
 }
@@ -553,29 +421,60 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  */
 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 {
-       int signr = __dequeue_signal(&tsk->pending, mask, info);
-       if (!signr)
+       int signr;
+
+       /* We only dequeue private signals from ourselves, we don't let
+        * signalfd steal them
+        */
+       signr = __dequeue_signal(&tsk->pending, mask, info);
+       if (!signr) {
                signr = __dequeue_signal(&tsk->signal->shared_pending,
                                         mask, info);
-       if (signr && unlikely(sig_kernel_stop(signr))) {
-               /*
-                * Set a marker that we have dequeued a stop signal.  Our
-                * caller might release the siglock and then the pending
-                * stop signal it is about to process is no longer in the
-                * pending bitmasks, but must still be cleared by a SIGCONT
-                * (and overruled by a SIGKILL).  So those cases clear this
-                * shared flag after we've set it.  Note that this flag may
-                * remain set after the signal we return is ignored or
-                * handled.  That doesn't matter because its only purpose
-                * is to alert stop-signal processing code when another
-                * processor has come along and cleared the flag.
-                */
-               if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
-                       tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
-       }
-       if ( signr &&
-            ((info->si_code & __SI_MASK) == __SI_TIMER) &&
-            info->si_sys_private){
+               /*
+                * itimer signal ?
+                *
+                * itimers are process shared and we restart periodic
+                * itimers in the signal delivery path to prevent DoS
+                * attacks in the high resolution timer case. This is
+                * compliant with the old way of self restarting
+                * itimers, as the SIGALRM is a legacy signal and only
+                * queued once. Changing the restart behaviour to
+                * restart the timer in the signal dequeue path is
+                * reducing the timer noise on heavy loaded !highres
+                * systems too.
+                */
+               if (unlikely(signr == SIGALRM)) {
+                       struct hrtimer *tmr = &tsk->signal->real_timer;
+
+                       if (!hrtimer_is_queued(tmr) &&
+                           tsk->signal->it_real_incr.tv64 != 0) {
+                               hrtimer_forward(tmr, tmr->base->get_time(),
+                                               tsk->signal->it_real_incr);
+                               hrtimer_restart(tmr);
+                       }
+               }
+       }
+
+       recalc_sigpending();
+       if (!signr)
+               return 0;
+
+       if (unlikely(sig_kernel_stop(signr))) {
+               /*
+                * Set a marker that we have dequeued a stop signal.  Our
+                * caller might release the siglock and then the pending
+                * stop signal it is about to process is no longer in the
+                * pending bitmasks, but must still be cleared by a SIGCONT
+                * (and overruled by a SIGKILL).  So those cases clear this
+                * shared flag after we've set it.  Note that this flag may
+                * remain set after the signal we return is ignored or
+                * handled.  That doesn't matter because its only purpose
+                * is to alert stop-signal processing code when another
+                * processor has come along and cleared the flag.
+                */
+               tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+       }
+       if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
                /*
                 * Release the siglock to ensure proper locking order
                 * of timer locks outside of siglocks.  Note, we leave
@@ -607,15 +506,15 @@ void signal_wake_up(struct task_struct *t, int resume)
        set_tsk_thread_flag(t, TIF_SIGPENDING);
 
        /*
-        * For SIGKILL, we want to wake it up in the stopped/traced case.
-        * We don't check t->state here because there is a race with it
+        * For SIGKILL, we want to wake it up in the stopped/traced/killable
+        * case. We don't check t->state here because there is a race with it
         * executing another processor and just now entering stopped state.
         * By using wake_up_state, we ensure the process will wake up and
         * handle its death signal.
         */
        mask = TASK_INTERRUPTIBLE;
        if (resume)
-               mask |= TASK_STOPPED | TASK_TRACED;
+               mask |= TASK_WAKEKILL;
        if (!wake_up_state(t, mask))
                kick_process(t);
 }
@@ -625,6 +524,33 @@ void signal_wake_up(struct task_struct *t, int resume)
  * Returns 1 if any signals were found.
  *
  * All callers must be holding the siglock.
+ *
+ * This version takes a sigset mask and looks at all signals,
+ * not just those in the first mask word.
+ */
+static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
+{
+       struct sigqueue *q, *n;
+       sigset_t m;
+
+       sigandsets(&m, mask, &s->signal);
+       if (sigisemptyset(&m))
+               return 0;
+
+       signandsets(&s->signal, &s->signal, mask);
+       list_for_each_entry_safe(q, n, &s->list, list) {
+               if (sigismember(mask, q->info.si_signo)) {
+                       list_del_init(&q->list);
+                       __sigqueue_free(q);
+               }
+       }
+       return 1;
+}
+/*
+ * Remove signals in mask from the pending set and queue.
+ * Returns 1 if any signals were found.
+ *
+ * All callers must be holding the siglock.
  */
 static int rm_from_queue(unsigned long mask, struct sigpending *s)
 {
@@ -646,90 +572,87 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
 
 /*
  * Bad permissions for sending the signal
+ * - the caller must hold at least the RCU read lock
  */
 static int check_kill_permission(int sig, struct siginfo *info,
                                 struct task_struct *t)
 {
-       int error = -EINVAL;
+       const struct cred *cred = current_cred(), *tcred;
+       struct pid *sid;
+       int error;
+
        if (!valid_signal(sig))
-               return error;
-       error = -EPERM;
-       if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
-           && ((sig != SIGCONT) ||
-               (current->signal->session != t->signal->session))
-           && (current->euid ^ t->suid) && (current->euid ^ t->uid)
-           && (current->uid ^ t->suid) && (current->uid ^ t->uid)
-           && !capable(CAP_KILL))
+               return -EINVAL;
+
+       if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
+               return 0;
+
+       error = audit_signal_info(sig, t); /* Let audit system see the signal */
+       if (error)
                return error;
 
-       error = security_task_kill(t, info, sig);
-       if (!error)
-               audit_signal_info(sig, t); /* Let audit system see the signal */
-       return error;
-}
+       tcred = __task_cred(t);
+       if ((cred->euid ^ tcred->suid) &&
+           (cred->euid ^ tcred->uid) &&
+           (cred->uid  ^ tcred->suid) &&
+           (cred->uid  ^ tcred->uid) &&
+           !capable(CAP_KILL)) {
+               switch (sig) {
+               case SIGCONT:
+                       sid = task_session(t);
+                       /*
+                        * We don't return the error if sid == NULL. The
+                        * task was unhashed, the caller must notice this.
+                        */
+                       if (!sid || sid == task_session(current))
+                               break;
+               default:
+                       return -EPERM;
+               }
+       }
 
-/* forward decl */
-static void do_notify_parent_cldstop(struct task_struct *tsk,
-                                    int to_self,
-                                    int why);
+       return security_task_kill(t, info, sig, 0);
+}
 
 /*
- * Handle magic process-wide effects of stop/continue signals.
- * Unlike the signal actions, these happen immediately at signal-generation
+ * Handle magic process-wide effects of stop/continue signals. Unlike
+ * the signal actions, these happen immediately at signal-generation
  * time regardless of blocking, ignoring, or handling.  This does the
  * actual continuing for SIGCONT, but not the actual stopping for stop
- * signals.  The process stop is done as a signal action for SIG_DFL.
+ * signals. The process stop is done as a signal action for SIG_DFL.
+ *
+ * Returns true if the signal should be actually delivered, otherwise
+ * it should be dropped.
  */
-static void handle_stop_signal(int sig, struct task_struct *p)
+static int prepare_signal(int sig, struct task_struct *p)
 {
+       struct signal_struct *signal = p->signal;
        struct task_struct *t;
 
-       if (p->signal->flags & SIGNAL_GROUP_EXIT)
+       if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
                /*
-                * The process is in the middle of dying already.
+                * The process is in the middle of dying, nothing to do.
                 */
-               return;
-
-       if (sig_kernel_stop(sig)) {
+       } else if (sig_kernel_stop(sig)) {
                /*
                 * This is a stop signal.  Remove SIGCONT from all queues.
                 */
-               rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
+               rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
                t = p;
                do {
                        rm_from_queue(sigmask(SIGCONT), &t->pending);
-                       t = next_thread(t);
-               } while (t != p);
+               } while_each_thread(p, t);
        } else if (sig == SIGCONT) {
+               unsigned int why;
                /*
                 * Remove all stop signals from all queues,
                 * and wake all threads.
                 */
-               if (unlikely(p->signal->group_stop_count > 0)) {
-                       /*
-                        * There was a group stop in progress.  We'll
-                        * pretend it finished before we got here.  We are
-                        * obliged to report it to the parent: if the
-                        * SIGSTOP happened "after" this SIGCONT, then it
-                        * would have cleared this pending SIGCONT.  If it
-                        * happened "before" this SIGCONT, then the parent
-                        * got the SIGCHLD about the stop finishing before
-                        * the continue happened.  We do the notification
-                        * now, and it's as if the stop had finished and
-                        * the SIGCHLD was pending on entry to this kill.
-                        */
-                       p->signal->group_stop_count = 0;
-                       p->signal->flags = SIGNAL_STOP_CONTINUED;
-                       spin_unlock(&p->sighand->siglock);
-                       do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
-                       spin_lock(&p->sighand->siglock);
-               }
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
+               rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
                t = p;
                do {
                        unsigned int state;
                        rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
-                       
                        /*
                         * If there is a handler for SIGCONT, we must make
                         * sure that no thread returns to user mode before
@@ -739,166 +662,53 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                         * running the handler.  With the TIF_SIGPENDING
                         * flag set, the thread will pause and acquire the
                         * siglock that we hold now and until we've queued
-                        * the pending signal. 
+                        * the pending signal.
                         *
                         * Wake up the stopped thread _after_ setting
                         * TIF_SIGPENDING
                         */
-                       state = TASK_STOPPED;
+                       state = __TASK_STOPPED;
                        if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
                                set_tsk_thread_flag(t, TIF_SIGPENDING);
                                state |= TASK_INTERRUPTIBLE;
                        }
                        wake_up_state(t, state);
+               } while_each_thread(p, t);
 
-                       t = next_thread(t);
-               } while (t != p);
+               /*
+                * Notify the parent with CLD_CONTINUED if we were stopped.
+                *
+                * If we were in the middle of a group stop, we pretend it
+                * was already finished, and then continued. Since SIGCHLD
+                * doesn't queue we report only CLD_STOPPED, as if the next
+                * CLD_CONTINUED was dropped.
+                */
+               why = 0;
+               if (signal->flags & SIGNAL_STOP_STOPPED)
+                       why |= SIGNAL_CLD_CONTINUED;
+               else if (signal->group_stop_count)
+                       why |= SIGNAL_CLD_STOPPED;
 
-               if (p->signal->flags & SIGNAL_STOP_STOPPED) {
+               if (why) {
                        /*
-                        * We were in fact stopped, and are now continued.
-                        * Notify the parent with CLD_CONTINUED.
+                        * The first thread which returns from finish_stop()
+                        * will take ->siglock, notice SIGNAL_CLD_MASK, and
+                        * notify its parent. See get_signal_to_deliver().
                         */
-                       p->signal->flags = SIGNAL_STOP_CONTINUED;
-                       p->signal->group_exit_code = 0;
-                       spin_unlock(&p->sighand->siglock);
-                       do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
-                       spin_lock(&p->sighand->siglock);
+                       signal->flags = why | SIGNAL_STOP_CONTINUED;
+                       signal->group_stop_count = 0;
+                       signal->group_exit_code = 0;
                } else {
                        /*
                         * We are not stopped, but there could be a stop
                         * signal in the middle of being processed after
                         * being removed from the queue.  Clear that too.
                         */
-                       p->signal->flags = 0;
+                       signal->flags &= ~SIGNAL_STOP_DEQUEUED;
                }
-       } else if (sig == SIGKILL) {
-               /*
-                * Make sure that any pending stop signal already dequeued
-                * is undone by the wakeup for SIGKILL.
-                */
-               p->signal->flags = 0;
        }
-}
-
-static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
-                       struct sigpending *signals)
-{
-       struct sigqueue * q = NULL;
-       int ret = 0;
 
-       /*
-        * fast-pathed signals for kernel-internal things like SIGSTOP
-        * or SIGKILL.
-        */
-       if (info == SEND_SIG_FORCED)
-               goto out_set;
-
-       /* Real-time signals must be queued if sent by sigqueue, or
-          some other real-time mechanism.  It is implementation
-          defined whether kill() does so.  We attempt to do so, on
-          the principle of least surprise, but since kill is not
-          allowed to fail with EAGAIN when low on memory we just
-          make sure at least one signal gets delivered and don't
-          pass on the info struct.  */
-
-       q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
-                                            (is_si_special(info) ||
-                                             info->si_code >= 0)));
-       if (q) {
-               list_add_tail(&q->list, &signals->list);
-               switch ((unsigned long) info) {
-               case (unsigned long) SEND_SIG_NOINFO:
-                       q->info.si_signo = sig;
-                       q->info.si_errno = 0;
-                       q->info.si_code = SI_USER;
-                       q->info.si_pid = current->pid;
-                       q->info.si_uid = current->uid;
-                       break;
-               case (unsigned long) SEND_SIG_PRIV:
-                       q->info.si_signo = sig;
-                       q->info.si_errno = 0;
-                       q->info.si_code = SI_KERNEL;
-                       q->info.si_pid = 0;
-                       q->info.si_uid = 0;
-                       break;
-               default:
-                       copy_siginfo(&q->info, info);
-                       break;
-               }
-       } else if (!is_si_special(info)) {
-               if (sig >= SIGRTMIN && info->si_code != SI_USER)
-               /*
-                * Queue overflow, abort.  We may abort if the signal was rt
-                * and sent by user using something other than kill().
-                */
-                       return -EAGAIN;
-       }
-
-out_set:
-       sigaddset(&signals->signal, sig);
-       return ret;
-}
-
-#define LEGACY_QUEUE(sigptr, sig) \
-       (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
-
-
-static int
-specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
-{
-       int ret = 0;
-
-       if (!irqs_disabled())
-               BUG();
-       assert_spin_locked(&t->sighand->siglock);
-
-       /* Short-circuit ignored signals.  */
-       if (sig_ignored(t, sig))
-               goto out;
-
-       /* Support queueing exactly one non-rt signal, so that we
-          can get more detailed information about the cause of
-          the signal. */
-       if (LEGACY_QUEUE(&t->pending, sig))
-               goto out;
-
-       ret = send_signal(sig, info, t, &t->pending);
-       if (!ret && !sigismember(&t->blocked, sig))
-               signal_wake_up(t, sig == SIGKILL);
-out:
-       return ret;
-}
-
-/*
- * Force a signal that the process can't ignore: if necessary
- * we unblock the signal and change any SIG_IGN to SIG_DFL.
- */
-
-int
-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
-{
-       unsigned long int flags;
-       int ret;
-
-       spin_lock_irqsave(&t->sighand->siglock, flags);
-       if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
-               t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
-       }
-       if (sigismember(&t->blocked, sig)) {
-               sigdelset(&t->blocked, sig);
-       }
-       recalc_sigpending_tsk(t);
-       ret = specific_send_sig_info(sig, info, t);
-       spin_unlock_irqrestore(&t->sighand->siglock, flags);
-
-       return ret;
-}
-
-void
-force_sig_specific(int sig, struct task_struct *t)
-{
-       force_sig_info(sig, SEND_SIG_FORCED, t);
+       return !sig_ignored(p, sig);
 }
 
 /*
@@ -917,14 +727,14 @@ static inline int wants_signal(int sig, struct task_struct *p)
                return 0;
        if (sig == SIGKILL)
                return 1;
-       if (p->state & (TASK_STOPPED | TASK_TRACED))
+       if (task_is_stopped_or_traced(p))
                return 0;
        return task_curr(p) || !signal_pending(p);
 }
 
-static void
-__group_complete_signal(int sig, struct task_struct *p)
+static void complete_signal(int sig, struct task_struct *p, int group)
 {
+       struct signal_struct *signal = p->signal;
        struct task_struct *t;
 
        /*
@@ -935,7 +745,7 @@ __group_complete_signal(int sig, struct task_struct *p)
         */
        if (wants_signal(sig, p))
                t = p;
-       else if (thread_group_empty(p))
+       else if (!group || thread_group_empty(p))
                /*
                 * There is just one thread and it does not need to be woken.
                 * It will dequeue unblocked signals before it runs again.
@@ -945,15 +755,10 @@ __group_complete_signal(int sig, struct task_struct *p)
                /*
                 * Otherwise try to find a suitable thread.
                 */
-               t = p->signal->curr_target;
-               if (t == NULL)
-                       /* restart balancing at this thread */
-                       t = p->signal->curr_target = p;
-               BUG_ON(t->tgid != p->tgid);
-
+               t = signal->curr_target;
                while (!wants_signal(sig, t)) {
                        t = next_thread(t);
-                       if (t == p->signal->curr_target)
+                       if (t == signal->curr_target)
                                /*
                                 * No thread needs to be woken.
                                 * Any eligible threads will see
@@ -961,16 +766,18 @@ __group_complete_signal(int sig, struct task_struct *p)
                                 */
                                return;
                }
-               p->signal->curr_target = t;
+               signal->curr_target = t;
        }
 
        /*
         * Found a killable thread.  If the signal will be fatal,
         * then start taking the whole group down immediately.
         */
-       if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
+       if (sig_fatal(p, sig) &&
+           !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
            !sigismember(&t->real_blocked, sig) &&
-           (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
+           (sig == SIGKILL ||
+            !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
                /*
                 * This signal will be fatal to the whole group.
                 */
@@ -981,40 +788,16 @@ __group_complete_signal(int sig, struct task_struct *p)
                         * running and doing things after a slower
                         * thread has the fatal signal pending.
                         */
-                       p->signal->flags = SIGNAL_GROUP_EXIT;
-                       p->signal->group_exit_code = sig;
-                       p->signal->group_stop_count = 0;
+                       signal->flags = SIGNAL_GROUP_EXIT;
+                       signal->group_exit_code = sig;
+                       signal->group_stop_count = 0;
                        t = p;
                        do {
                                sigaddset(&t->pending.signal, SIGKILL);
                                signal_wake_up(t, 1);
-                               t = next_thread(t);
-                       } while (t != p);
+                       } while_each_thread(p, t);
                        return;
                }
-
-               /*
-                * There will be a core dump.  We make all threads other
-                * than the chosen one go into a group stop so that nothing
-                * happens until it gets scheduled, takes the signal off
-                * the shared queue, and does the core dump.  This is a
-                * little more complicated than strictly necessary, but it
-                * keeps the signal state that winds up in the core dump
-                * unchanged from the death state, e.g. which thread had
-                * the core-dump signal unblocked.
-                */
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
-               p->signal->group_stop_count = 0;
-               p->signal->group_exit_task = t;
-               t = p;
-               do {
-                       p->signal->group_stop_count++;
-                       signal_wake_up(t, 0);
-                       t = next_thread(t);
-               } while (t != p);
-               wake_up_process(p->signal->group_exit_task);
-               return;
        }
 
        /*
@@ -1025,35 +808,174 @@ __group_complete_signal(int sig, struct task_struct *p)
        return;
 }
 
-int
-__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+static inline int legacy_queue(struct sigpending *signals, int sig)
 {
-       int ret = 0;
+       return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
+}
 
-       assert_spin_locked(&p->sighand->siglock);
-       handle_stop_signal(sig, p);
+static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+                       int group)
+{
+       struct sigpending *pending;
+       struct sigqueue *q;
 
-       /* Short-circuit ignored signals.  */
-       if (sig_ignored(p, sig))
-               return ret;
+       trace_sched_signal_send(sig, t);
 
-       if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
-               /* This is a non-RT signal and we already have one queued.  */
-               return ret;
+       assert_spin_locked(&t->sighand->siglock);
+       if (!prepare_signal(sig, t))
+               return 0;
 
+       pending = group ? &t->signal->shared_pending : &t->pending;
        /*
-        * Put this signal on the shared-pending queue, or fail with EAGAIN.
-        * We always use the shared queue for process-wide signals,
-        * to avoid several races.
+        * Short-circuit ignored signals and support queuing
+        * exactly one non-rt signal, so that we can get more
+        * detailed information about the cause of the signal.
         */
-       ret = send_signal(sig, info, p, &p->signal->shared_pending);
-       if (unlikely(ret))
-               return ret;
+       if (legacy_queue(pending, sig))
+               return 0;
+       /*
+        * fast-pathed signals for kernel-internal things like SIGSTOP
+        * or SIGKILL.
+        */
+       if (info == SEND_SIG_FORCED)
+               goto out_set;
 
-       __group_complete_signal(sig, p);
+       /* Real-time signals must be queued if sent by sigqueue, or
+          some other real-time mechanism.  It is implementation
+          defined whether kill() does so.  We attempt to do so, on
+          the principle of least surprise, but since kill is not
+          allowed to fail with EAGAIN when low on memory we just
+          make sure at least one signal gets delivered and don't
+          pass on the info struct.  */
+
+       q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+                                            (is_si_special(info) ||
+                                             info->si_code >= 0)));
+       if (q) {
+               list_add_tail(&q->list, &pending->list);
+               switch ((unsigned long) info) {
+               case (unsigned long) SEND_SIG_NOINFO:
+                       q->info.si_signo = sig;
+                       q->info.si_errno = 0;
+                       q->info.si_code = SI_USER;
+                       q->info.si_pid = task_tgid_nr_ns(current,
+                                                       task_active_pid_ns(t));
+                       q->info.si_uid = current_uid();
+                       break;
+               case (unsigned long) SEND_SIG_PRIV:
+                       q->info.si_signo = sig;
+                       q->info.si_errno = 0;
+                       q->info.si_code = SI_KERNEL;
+                       q->info.si_pid = 0;
+                       q->info.si_uid = 0;
+                       break;
+               default:
+                       copy_siginfo(&q->info, info);
+                       break;
+               }
+       } else if (!is_si_special(info)) {
+               if (sig >= SIGRTMIN && info->si_code != SI_USER)
+               /*
+                * Queue overflow, abort.  We may abort if the signal was rt
+                * and sent by user using something other than kill().
+                */
+                       return -EAGAIN;
+       }
+
+out_set:
+       signalfd_notify(t, sig);
+       sigaddset(&pending->signal, sig);
+       complete_signal(sig, t, group);
        return 0;
 }
 
+int print_fatal_signals;
+
+static void print_fatal_signal(struct pt_regs *regs, int signr)
+{
+       printk("%s/%d: potentially unexpected fatal signal %d.\n",
+               current->comm, task_pid_nr(current), signr);
+
+#if defined(__i386__) && !defined(__arch_um__)
+       printk("code at %08lx: ", regs->ip);
+       {
+               int i;
+               for (i = 0; i < 16; i++) {
+                       unsigned char insn;
+
+                       __get_user(insn, (unsigned char *)(regs->ip + i));
+                       printk("%02x ", insn);
+               }
+       }
+#endif
+       printk("\n");
+       show_regs(regs);
+}
+
+static int __init setup_print_fatal_signals(char *str)
+{
+       get_option (&str, &print_fatal_signals);
+
+       return 1;
+}
+
+__setup("print-fatal-signals=", setup_print_fatal_signals);
+
+int
+__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+{
+       return send_signal(sig, info, p, 1);
+}
+
+static int
+specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+{
+       return send_signal(sig, info, t, 0);
+}
+
+/*
+ * Force a signal that the process can't ignore: if necessary
+ * we unblock the signal and change any SIG_IGN to SIG_DFL.
+ *
+ * Note: If we unblock the signal, we always reset it to SIG_DFL,
+ * since we do not want to have a signal handler that was blocked
+ * be invoked when user space had explicitly blocked it.
+ *
+ * We don't want to have recursive SIGSEGV's etc, for example,
+ * that is why we also clear SIGNAL_UNKILLABLE.
+ */
+int
+force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+{
+       unsigned long int flags;
+       int ret, blocked, ignored;
+       struct k_sigaction *action;
+
+       spin_lock_irqsave(&t->sighand->siglock, flags);
+       action = &t->sighand->action[sig-1];
+       ignored = action->sa.sa_handler == SIG_IGN;
+       blocked = sigismember(&t->blocked, sig);
+       if (blocked || ignored) {
+               action->sa.sa_handler = SIG_DFL;
+               if (blocked) {
+                       sigdelset(&t->blocked, sig);
+                       recalc_sigpending_and_wake(t);
+               }
+       }
+       if (action->sa.sa_handler == SIG_DFL)
+               t->signal->flags &= ~SIGNAL_UNKILLABLE;
+       ret = specific_send_sig_info(sig, info, t);
+       spin_unlock_irqrestore(&t->sighand->siglock, flags);
+
+       return ret;
+}
+
+void
+force_sig_specific(int sig, struct task_struct *t)
+{
+       force_sig_info(sig, SEND_SIG_FORCED, t);
+}
+
 /*
  * Nuke all other threads in the group.
  */
@@ -1061,12 +983,8 @@ void zap_other_threads(struct task_struct *p)
 {
        struct task_struct *t;
 
-       p->signal->flags = SIGNAL_GROUP_EXIT;
        p->signal->group_stop_count = 0;
 
-       if (thread_group_empty(p))
-               return;
-
        for (t = next_thread(p); t != p; t = next_thread(t)) {
                /*
                 * Don't bother with already dead threads
@@ -1074,132 +992,142 @@ void zap_other_threads(struct task_struct *p)
                if (t->exit_state)
                        continue;
 
-               /*
-                * We don't want to notify the parent, since we are
-                * killed as part of a thread group due to another
-                * thread doing an execve() or similar. So set the
-                * exit signal to -1 to allow immediate reaping of
-                * the process.  But don't detach the thread group
-                * leader.
-                */
-               if (t != p->group_leader)
-                       t->exit_signal = -1;
-
                /* SIGKILL will be handled before any pending SIGSTOP */
                sigaddset(&t->pending.signal, SIGKILL);
                signal_wake_up(t, 1);
        }
 }
 
+int __fatal_signal_pending(struct task_struct *tsk)
+{
+       return sigismember(&tsk->pending.signal, SIGKILL);
+}
+EXPORT_SYMBOL(__fatal_signal_pending);
+
+struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+{
+       struct sighand_struct *sighand;
+
+       rcu_read_lock();
+       for (;;) {
+               sighand = rcu_dereference(tsk->sighand);
+               if (unlikely(sighand == NULL))
+                       break;
+
+               spin_lock_irqsave(&sighand->siglock, *flags);
+               if (likely(sighand == tsk->sighand))
+                       break;
+               spin_unlock_irqrestore(&sighand->siglock, *flags);
+       }
+       rcu_read_unlock();
+
+       return sighand;
+}
+
 /*
- * Must be called under rcu_read_lock() or with tasklist_lock read-held.
+ * send signal info to all the members of a group
+ * - the caller must hold the RCU read lock at least
  */
 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 {
        unsigned long flags;
-       struct sighand_struct *sp;
        int ret;
 
-retry:
        ret = check_kill_permission(sig, info, p);
-       if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
-               spin_lock_irqsave(&sp->siglock, flags);
-               if (p->sighand != sp) {
-                       spin_unlock_irqrestore(&sp->siglock, flags);
-                       goto retry;
-               }
-               if ((atomic_read(&sp->count) == 0) ||
-                               (atomic_read(&p->usage) == 0)) {
-                       spin_unlock_irqrestore(&sp->siglock, flags);
-                       return -ESRCH;
+
+       if (!ret && sig) {
+               ret = -ESRCH;
+               if (lock_task_sighand(p, &flags)) {
+                       ret = __group_send_sig_info(sig, info, p);
+                       unlock_task_sighand(p, &flags);
                }
-               ret = __group_send_sig_info(sig, info, p);
-               spin_unlock_irqrestore(&sp->siglock, flags);
        }
 
        return ret;
 }
 
 /*
- * kill_pg_info() sends a signal to a process group: this is what the tty
+ * __kill_pgrp_info() sends a signal to a process group: this is what the tty
  * control characters do (^C, ^Z etc)
+ * - the caller must hold at least a readlock on tasklist_lock
  */
-
-int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
+int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
 {
        struct task_struct *p = NULL;
        int retval, success;
 
-       if (pgrp <= 0)
-               return -EINVAL;
-
        success = 0;
        retval = -ESRCH;
-       do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
+       do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
                int err = group_send_sig_info(sig, info, p);
                success |= !err;
                retval = err;
-       } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
+       } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
        return success ? 0 : retval;
 }
 
-int
-kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
+int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
 {
-       int retval;
+       int error = -ESRCH;
+       struct task_struct *p;
 
-       read_lock(&tasklist_lock);
-       retval = __kill_pg_info(sig, info, pgrp);
-       read_unlock(&tasklist_lock);
+       rcu_read_lock();
+retry:
+       p = pid_task(pid, PIDTYPE_PID);
+       if (p) {
+               error = group_send_sig_info(sig, info, p);
+               if (unlikely(error == -ESRCH))
+                       /*
+                        * The task was unhashed in between, try again.
+                        * If it is dead, pid_task() will return NULL,
+                        * if we race with de_thread() it will find the
+                        * new leader.
+                        */
+                       goto retry;
+       }
+       rcu_read_unlock();
 
-       return retval;
+       return error;
 }
 
 int
 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
 {
        int error;
-       int acquired_tasklist_lock = 0;
-       struct task_struct *p;
-
        rcu_read_lock();
-       if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
-               read_lock(&tasklist_lock);
-               acquired_tasklist_lock = 1;
-       }
-       p = find_task_by_pid(pid);
-       error = -ESRCH;
-       if (p)
-               error = group_send_sig_info(sig, info, p);
-       if (unlikely(acquired_tasklist_lock))
-               read_unlock(&tasklist_lock);
+       error = kill_pid_info(sig, info, find_vpid(pid));
        rcu_read_unlock();
        return error;
 }
 
-/* like kill_proc_info(), but doesn't use uid/euid of "current" */
-int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
-                     uid_t uid, uid_t euid)
+/* like kill_pid_info(), but doesn't use uid/euid of "current" */
+int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
+                     uid_t uid, uid_t euid, u32 secid)
 {
        int ret = -EINVAL;
        struct task_struct *p;
+       const struct cred *pcred;
 
        if (!valid_signal(sig))
                return ret;
 
        read_lock(&tasklist_lock);
-       p = find_task_by_pid(pid);
+       p = pid_task(pid, PIDTYPE_PID);
        if (!p) {
                ret = -ESRCH;
                goto out_unlock;
        }
-       if ((!info || ((unsigned long)info != 1 &&
-                       (unsigned long)info != 2 && SI_FROMUSER(info)))
-           && (euid != p->suid) && (euid != p->uid)
-           && (uid != p->suid) && (uid != p->uid)) {
+       pcred = __task_cred(p);
+       if ((info == SEND_SIG_NOINFO ||
+            (!is_si_special(info) && SI_FROMUSER(info))) &&
+           euid != pcred->suid && euid != pcred->uid &&
+           uid  != pcred->suid && uid  != pcred->uid) {
                ret = -EPERM;
                goto out_unlock;
        }
+       ret = security_task_kill(p, info, sig, secid);
+       if (ret)
+               goto out_unlock;
        if (sig && p->sighand) {
                unsigned long flags;
                spin_lock_irqsave(&p->sighand->siglock, flags);
@@ -1210,7 +1138,7 @@ out_unlock:
        read_unlock(&tasklist_lock);
        return ret;
 }
-EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
+EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
 
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1219,30 +1147,39 @@ EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
  * is probably wrong.  Should make it like BSD or SYSV.
  */
 
-static int kill_something_info(int sig, struct siginfo *info, int pid)
+static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
 {
-       if (!pid) {
-               return kill_pg_info(sig, info, process_group(current));
-       } else if (pid == -1) {
+       int ret;
+
+       if (pid > 0) {
+               rcu_read_lock();
+               ret = kill_pid_info(sig, info, find_vpid(pid));
+               rcu_read_unlock();
+               return ret;
+       }
+
+       read_lock(&tasklist_lock);
+       if (pid != -1) {
+               ret = __kill_pgrp_info(sig, info,
+                               pid ? find_vpid(-pid) : task_pgrp(current));
+       } else {
                int retval = 0, count = 0;
                struct task_struct * p;
 
-               read_lock(&tasklist_lock);
                for_each_process(p) {
-                       if (p->pid > 1 && p->tgid != current->tgid) {
+                       if (task_pid_vnr(p) > 1 &&
+                                       !same_thread_group(p, current)) {
                                int err = group_send_sig_info(sig, info, p);
                                ++count;
                                if (err != -EPERM)
                                        retval = err;
                        }
                }
-               read_unlock(&tasklist_lock);
-               return count ? retval : -ESRCH;
-       } else if (pid < 0) {
-               return kill_pg_info(sig, info, -pid);
-       } else {
-               return kill_proc_info(sig, info, pid);
+               ret = count ? retval : -ESRCH;
        }
+       read_unlock(&tasklist_lock);
+
+       return ret;
 }
 
 /*
@@ -1250,8 +1187,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
  */
 
 /*
- * These two are the most common entry points.  They send a signal
- * just to the specific thread.
+ * The caller must ensure the task can't exit.
  */
 int
 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
@@ -1266,17 +1202,9 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        if (!valid_signal(sig))
                return -EINVAL;
 
-       /*
-        * We need the tasklist lock even for the specific
-        * thread case (when we don't need to follow the group
-        * lists) in order to avoid races with "p->sighand"
-        * going away or changing from under us.
-        */
-       read_lock(&tasklist_lock);  
        spin_lock_irqsave(&p->sighand->siglock, flags);
        ret = specific_send_sig_info(sig, info, p);
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
-       read_unlock(&tasklist_lock);
        return ret;
 }
 
@@ -1289,20 +1217,6 @@ send_sig(int sig, struct task_struct *p, int priv)
        return send_sig_info(sig, __si_special(priv), p);
 }
 
-/*
- * This is the entry point for "process-wide" signals.
- * They will go to an appropriate thread in the thread group.
- */
-int
-send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
-{
-       int ret;
-       read_lock(&tasklist_lock);
-       ret = group_send_sig_info(sig, info, p);
-       read_unlock(&tasklist_lock);
-       return ret;
-}
-
 void
 force_sig(int sig, struct task_struct *p)
 {
@@ -1328,17 +1242,23 @@ force_sigsegv(int sig, struct task_struct *p)
        return 0;
 }
 
-int
-kill_pg(pid_t pgrp, int sig, int priv)
+int kill_pgrp(struct pid *pid, int sig, int priv)
 {
-       return kill_pg_info(sig, __si_special(priv), pgrp);
+       int ret;
+
+       read_lock(&tasklist_lock);
+       ret = __kill_pgrp_info(sig, __si_special(priv), pid);
+       read_unlock(&tasklist_lock);
+
+       return ret;
 }
+EXPORT_SYMBOL(kill_pgrp);
 
-int
-kill_proc(pid_t pid, int sig, int priv)
+int kill_pid(struct pid *pid, int sig, int priv)
 {
-       return kill_proc_info(sig, __si_special(priv), pid);
+       return kill_pid_info(sig, __si_special(priv), pid);
 }
+EXPORT_SYMBOL(kill_pid);
 
 /*
  * These functions support sending signals using preallocated sigqueue
@@ -1359,153 +1279,68 @@ struct sigqueue *sigqueue_alloc(void)
        return(q);
 }
 
-void sigqueue_free(struct sigqueue *q)
-{
-       unsigned long flags;
-       BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-       /*
-        * If the signal is still pending remove it from the
-        * pending queue.
-        */
-       if (unlikely(!list_empty(&q->list))) {
-               spinlock_t *lock = &current->sighand->siglock;
-               read_lock(&tasklist_lock);
-               spin_lock_irqsave(lock, flags);
-               if (!list_empty(&q->list))
-                       list_del_init(&q->list);
-               spin_unlock_irqrestore(lock, flags);
-               read_unlock(&tasklist_lock);
-       }
-       q->flags &= ~SIGQUEUE_PREALLOC;
-       __sigqueue_free(q);
-}
-
-int
-send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
+void sigqueue_free(struct sigqueue *q)
 {
        unsigned long flags;
-       int ret = 0;
-       struct sighand_struct *sh;
+       spinlock_t *lock = &current->sighand->siglock;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-
        /*
-        * The rcu based delayed sighand destroy makes it possible to
-        * run this without tasklist lock held. The task struct itself
-        * cannot go away as create_timer did get_task_struct().
-        *
-        * We return -1, when the task is marked exiting, so
-        * posix_timer_event can redirect it to the group leader
+        * We must hold ->siglock while testing q->list
+        * to serialize with collect_signal() or with
+        * __exit_signal()->flush_sigqueue().
         */
-       rcu_read_lock();
-
-       if (unlikely(p->flags & PF_EXITING)) {
-               ret = -1;
-               goto out_err;
-       }
-
-retry:
-       sh = rcu_dereference(p->sighand);
-
-       spin_lock_irqsave(&sh->siglock, flags);
-       if (p->sighand != sh) {
-               /* We raced with exec() in a multithreaded process... */
-               spin_unlock_irqrestore(&sh->siglock, flags);
-               goto retry;
-       }
-
+       spin_lock_irqsave(lock, flags);
+       q->flags &= ~SIGQUEUE_PREALLOC;
        /*
-        * We do the check here again to handle the following scenario:
-        *
-        * CPU 0                CPU 1
-        * send_sigqueue
-        * check PF_EXITING
-        * interrupt            exit code running
-        *                      __exit_signal
-        *                      lock sighand->siglock
-        *                      unlock sighand->siglock
-        * lock sh->siglock
-        * add(tsk->pending)    flush_sigqueue(tsk->pending)
-        *
+        * If it is queued it will be freed when dequeued,
+        * like the "regular" sigqueue.
         */
+       if (!list_empty(&q->list))
+               q = NULL;
+       spin_unlock_irqrestore(lock, flags);
 
-       if (unlikely(p->flags & PF_EXITING)) {
-               ret = -1;
-               goto out;
-       }
-
-       if (unlikely(!list_empty(&q->list))) {
-               /*
-                * If an SI_TIMER entry is already queue just increment
-                * the overrun count.
-                */
-               if (q->info.si_code != SI_TIMER)
-                       BUG();
-               q->info.si_overrun++;
-               goto out;
-       }
-       /* Short-circuit ignored signals.  */
-       if (sig_ignored(p, sig)) {
-               ret = 1;
-               goto out;
-       }
-
-       list_add_tail(&q->list, &p->pending.list);
-       sigaddset(&p->pending.signal, sig);
-       if (!sigismember(&p->blocked, sig))
-               signal_wake_up(p, sig == SIGKILL);
-
-out:
-       spin_unlock_irqrestore(&sh->siglock, flags);
-out_err:
-       rcu_read_unlock();
-
-       return ret;
+       if (q)
+               __sigqueue_free(q);
 }
 
-int
-send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
+int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
 {
+       int sig = q->info.si_signo;
+       struct sigpending *pending;
        unsigned long flags;
-       int ret = 0;
+       int ret;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
 
-       read_lock(&tasklist_lock);
-       /* Since it_lock is held, p->sighand cannot be NULL. */
-       spin_lock_irqsave(&p->sighand->siglock, flags);
-       handle_stop_signal(sig, p);
+       ret = -1;
+       if (!likely(lock_task_sighand(t, &flags)))
+               goto ret;
 
-       /* Short-circuit ignored signals.  */
-       if (sig_ignored(p, sig)) {
-               ret = 1;
+       ret = 1; /* the signal is ignored */
+       if (!prepare_signal(sig, t))
                goto out;
-       }
 
+       ret = 0;
        if (unlikely(!list_empty(&q->list))) {
                /*
                 * If an SI_TIMER entry is already queue just increment
-                * the overrun count.  Other uses should not try to
-                * send the signal multiple times.
+                * the overrun count.
                 */
-               if (q->info.si_code != SI_TIMER)
-                       BUG();
+               BUG_ON(q->info.si_code != SI_TIMER);
                q->info.si_overrun++;
                goto out;
-       } 
-
-       /*
-        * Put this signal on the shared-pending queue.
-        * We always use the shared queue for process-wide signals,
-        * to avoid several races.
-        */
-       list_add_tail(&q->list, &p->signal->shared_pending.list);
-       sigaddset(&p->signal->shared_pending.signal, sig);
+       }
+       q->info.si_overrun = 0;
 
-       __group_complete_signal(sig, p);
+       signalfd_notify(t, sig);
+       pending = group ? &t->signal->shared_pending : &t->pending;
+       list_add_tail(&q->list, &pending->list);
+       sigaddset(&pending->signal, sig);
+       complete_signal(sig, t, group);
 out:
-       spin_unlock_irqrestore(&p->sighand->siglock, flags);
-       read_unlock(&tasklist_lock);
+       unlock_task_sighand(t, &flags);
+ret:
        return ret;
 }
 
@@ -1521,32 +1356,48 @@ static inline void __wake_up_parent(struct task_struct *p,
 /*
  * Let a parent know about the death of a child.
  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
+ *
+ * Returns -1 if our parent ignored us and so we've switched to
+ * self-reaping, or else @sig.
  */
-
-void do_notify_parent(struct task_struct *tsk, int sig)
+int do_notify_parent(struct task_struct *tsk, int sig)
 {
        struct siginfo info;
        unsigned long flags;
        struct sighand_struct *psig;
+       struct task_cputime cputime;
+       int ret = sig;
 
        BUG_ON(sig == -1);
 
        /* do_notify_parent_cldstop should have been called instead.  */
-       BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+       BUG_ON(task_is_stopped_or_traced(tsk));
 
        BUG_ON(!tsk->ptrace &&
               (tsk->group_leader != tsk || !thread_group_empty(tsk)));
 
        info.si_signo = sig;
        info.si_errno = 0;
-       info.si_pid = tsk->pid;
-       info.si_uid = tsk->uid;
+       /*
+        * we are under tasklist_lock here so our parent is tied to
+        * us and cannot exit and release its namespace.
+        *
+        * the only it can is to switch its nsproxy with sys_unshare,
+        * bu uncharing pid namespaces is not allowed, so we'll always
+        * see relevant namespace
+        *
+        * write_lock() currently calls preempt_disable() which is the
+        * same as rcu_read_lock(), but according to Oleg, this is not
+        * correct to rely on this
+        */
+       rcu_read_lock();
+       info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+       info.si_uid = __task_cred(tsk)->uid;
+       rcu_read_unlock();
 
-       /* FIXME: find out whether or not this is supposed to be c*time. */
-       info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
-                                                      tsk->signal->utime));
-       info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
-                                                      tsk->signal->stime));
+       thread_group_cputime(tsk, &cputime);
+       info.si_utime = cputime_to_jiffies(cputime.utime);
+       info.si_stime = cputime_to_jiffies(cputime.stime);
 
        info.si_status = tsk->exit_code & 0x7f;
        if (tsk->exit_code & 0x80)
@@ -1578,24 +1429,26 @@ void do_notify_parent(struct task_struct *tsk, int sig)
                 * is implementation-defined: we do (if you don't want
                 * it, just use SIG_IGN instead).
                 */
-               tsk->exit_signal = -1;
+               ret = tsk->exit_signal = -1;
                if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
-                       sig = 0;
+                       sig = -1;
        }
        if (valid_signal(sig) && sig > 0)
                __group_send_sig_info(sig, &info, tsk->parent);
        __wake_up_parent(tsk, tsk->parent);
        spin_unlock_irqrestore(&psig->siglock, flags);
+
+       return ret;
 }
 
-static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
+static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
 {
        struct siginfo info;
        unsigned long flags;
        struct task_struct *parent;
        struct sighand_struct *sighand;
 
-       if (to_self)
+       if (tsk->ptrace & PT_PTRACED)
                parent = tsk->parent;
        else {
                tsk = tsk->group_leader;
@@ -1604,12 +1457,16 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int w
 
        info.si_signo = SIGCHLD;
        info.si_errno = 0;
-       info.si_pid = tsk->pid;
-       info.si_uid = tsk->uid;
+       /*
+        * see comment in do_notify_parent() abot the following 3 lines
+        */
+       rcu_read_lock();
+       info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+       info.si_uid = __task_cred(tsk)->uid;
+       rcu_read_unlock();
 
-       /* FIXME: find out whether or not this is supposed to be c*time. */
-       info.si_utime = cputime_to_jiffies(tsk->utime);
-       info.si_stime = cputime_to_jiffies(tsk->stime);
+       info.si_utime = cputime_to_clock_t(tsk->utime);
+       info.si_stime = cputime_to_clock_t(tsk->stime);
 
        info.si_code = why;
        switch (why) {
@@ -1638,6 +1495,36 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int w
        spin_unlock_irqrestore(&sighand->siglock, flags);
 }
 
+static inline int may_ptrace_stop(void)
+{
+       if (!likely(current->ptrace & PT_PTRACED))
+               return 0;
+       /*
+        * Are we in the middle of do_coredump?
+        * If so and our tracer is also part of the coredump stopping
+        * is a deadlock situation, and pointless because our tracer
+        * is dead so don't allow us to stop.
+        * If SIGKILL was already sent before the caller unlocked
+        * ->siglock we must see ->core_state != NULL. Otherwise it
+        * is safe to enter schedule().
+        */
+       if (unlikely(current->mm->core_state) &&
+           unlikely(current->mm == current->parent->mm))
+               return 0;
+
+       return 1;
+}
+
+/*
+ * Return nonzero if there is a SIGKILL that should be waking us up.
+ * Called with the siglock held.
+ */
+static int sigkill_pending(struct task_struct *tsk)
+{
+       return  sigismember(&tsk->pending.signal, SIGKILL) ||
+               sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
+}
+
 /*
  * This must be called with current->sighand->siglock held.
  *
@@ -1646,11 +1533,30 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int w
  * That makes it a way to test a stopped process for
  * being ptrace-stopped vs being job-control-stopped.
  *
- * If we actually decide not to stop at all because the tracer is gone,
- * we leave nostop_code in current->exit_code.
+ * If we actually decide not to stop at all because the tracer
+ * is gone, we keep current->exit_code unless clear_code.
  */
-static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
+static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
 {
+       if (arch_ptrace_stop_needed(exit_code, info)) {
+               /*
+                * The arch code has something special to do before a
+                * ptrace stop.  This is allowed to block, e.g. for faults
+                * on user stack pages.  We can't keep the siglock while
+                * calling arch_ptrace_stop, so we must release it now.
+                * To preserve proper semantics, we must do this before
+                * any signal bookkeeping like checking group_stop_count.
+                * Meanwhile, a SIGKILL could come in before we retake the
+                * siglock.  That must prevent us from sleeping in TASK_TRACED.
+                * So after regaining the lock, we must check for SIGKILL.
+                */
+               spin_unlock_irq(&current->sighand->siglock);
+               arch_ptrace_stop(exit_code, info);
+               spin_lock_irq(&current->sighand->siglock);
+               if (sigkill_pending(current))
+                       return;
+       }
+
        /*
         * If there is a group stop in progress,
         * we must participate in the bookkeeping.
@@ -1662,28 +1568,32 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
        current->exit_code = exit_code;
 
        /* Let the debugger run.  */
-       set_current_state(TASK_TRACED);
+       __set_current_state(TASK_TRACED);
        spin_unlock_irq(&current->sighand->siglock);
        read_lock(&tasklist_lock);
-       if (likely(current->ptrace & PT_PTRACED) &&
-           likely(current->parent != current->real_parent ||
-                  !(current->ptrace & PT_ATTACHED)) &&
-           (likely(current->parent->signal != current->signal) ||
-            !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
-               do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
+       if (may_ptrace_stop()) {
+               do_notify_parent_cldstop(current, CLD_TRAPPED);
                read_unlock(&tasklist_lock);
                schedule();
        } else {
                /*
                 * By the time we got the lock, our tracer went away.
-                * Don't stop here.
+                * Don't drop the lock yet, another tracer may come.
                 */
+               __set_current_state(TASK_RUNNING);
+               if (clear_code)
+                       current->exit_code = 0;
                read_unlock(&tasklist_lock);
-               set_current_state(TASK_RUNNING);
-               current->exit_code = nostop_code;
        }
 
        /*
+        * While in TASK_TRACED, we were considered "frozen enough".
+        * Now that we woke up, it's crucial if we're supposed to be
+        * frozen that we freeze now before running anything substantial.
+        */
+       try_to_freeze();
+
+       /*
         * We are back.  Now reacquire the siglock before touching
         * last_siginfo, so that we are sure to have synchronized with
         * any signal-sending on another CPU that wants to examine it.
@@ -1694,8 +1604,9 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
        /*
         * Queued signals ignored us while we were stopped for tracing.
         * So check for any that we should take before resuming user mode.
+        * This sets TIF_SIGPENDING, but never clears it.
         */
-       recalc_sigpending();
+       recalc_sigpending_tsk(current);
 }
 
 void ptrace_notify(int exit_code)
@@ -1707,38 +1618,32 @@ void ptrace_notify(int exit_code)
        memset(&info, 0, sizeof info);
        info.si_signo = SIGTRAP;
        info.si_code = exit_code;
-       info.si_pid = current->pid;
-       info.si_uid = current->uid;
+       info.si_pid = task_pid_vnr(current);
+       info.si_uid = current_uid();
 
        /* Let the debugger run.  */
        spin_lock_irq(&current->sighand->siglock);
-       ptrace_stop(exit_code, 0, &info);
+       ptrace_stop(exit_code, 1, &info);
        spin_unlock_irq(&current->sighand->siglock);
 }
 
 static void
 finish_stop(int stop_count)
 {
-       int to_self;
-
        /*
         * If there are no other threads in the group, or if there is
         * a group stop in progress and we are the last to stop,
         * report to the parent.  When ptraced, every thread reports itself.
         */
-       if (stop_count < 0 || (current->ptrace & PT_PTRACED))
-               to_self = 1;
-       else if (stop_count == 0)
-               to_self = 0;
-       else
-               goto out;
-
-       read_lock(&tasklist_lock);
-       do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
-       read_unlock(&tasklist_lock);
+       if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
+               read_lock(&tasklist_lock);
+               do_notify_parent_cldstop(current, CLD_STOPPED);
+               read_unlock(&tasklist_lock);
+       }
 
-out:
-       schedule();
+       do {
+               schedule();
+       } while (try_to_freeze());
        /*
         * Now we don't run again until continued.
         */
@@ -1751,199 +1656,164 @@ out:
  * Returns nonzero if we've actually stopped and released the siglock.
  * Returns zero if we didn't stop and still hold the siglock.
  */
-static int
-do_signal_stop(int signr)
+static int do_signal_stop(int signr)
 {
        struct signal_struct *sig = current->signal;
-       struct sighand_struct *sighand = current->sighand;
-       int stop_count = -1;
-
-       if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
-               return 0;
+       int stop_count;
 
        if (sig->group_stop_count > 0) {
                /*
                 * There is a group stop in progress.  We don't need to
                 * start another one.
                 */
-               signr = sig->group_exit_code;
                stop_count = --sig->group_stop_count;
-               current->exit_code = signr;
-               set_current_state(TASK_STOPPED);
-               if (stop_count == 0)
-                       sig->flags = SIGNAL_STOP_STOPPED;
-               spin_unlock_irq(&sighand->siglock);
-       }
-       else if (thread_group_empty(current)) {
-               /*
-                * Lock must be held through transition to stopped state.
-                */
-               current->exit_code = current->signal->group_exit_code = signr;
-               set_current_state(TASK_STOPPED);
-               sig->flags = SIGNAL_STOP_STOPPED;
-               spin_unlock_irq(&sighand->siglock);
-       }
-       else {
+       } else {
+               struct task_struct *t;
+
+               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
+                   unlikely(signal_group_exit(sig)))
+                       return 0;
                /*
                 * There is no group stop already in progress.
-                * We must initiate one now, but that requires
-                * dropping siglock to get both the tasklist lock
-                * and siglock again in the proper order.  Note that
-                * this allows an intervening SIGCONT to be posted.
-                * We need to check for that and bail out if necessary.
+                * We must initiate one now.
                 */
-               struct task_struct *t;
-
-               spin_unlock_irq(&sighand->siglock);
-
-               /* signals can be posted during this window */
-
-               read_lock(&tasklist_lock);
-               spin_lock_irq(&sighand->siglock);
+               sig->group_exit_code = signr;
 
-               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
+               stop_count = 0;
+               for (t = next_thread(current); t != current; t = next_thread(t))
                        /*
-                        * Another stop or continue happened while we
-                        * didn't have the lock.  We can just swallow this
-                        * signal now.  If we raced with a SIGCONT, that
-                        * should have just cleared it now.  If we raced
-                        * with another processor delivering a stop signal,
-                        * then the SIGCONT that wakes us up should clear it.
+                        * Setting state to TASK_STOPPED for a group
+                        * stop is always done with the siglock held,
+                        * so this check has no races.
                         */
-                       read_unlock(&tasklist_lock);
-                       return 0;
-               }
-
-               if (sig->group_stop_count == 0) {
-                       sig->group_exit_code = signr;
-                       stop_count = 0;
-                       for (t = next_thread(current); t != current;
-                            t = next_thread(t))
-                               /*
-                                * Setting state to TASK_STOPPED for a group
-                                * stop is always done with the siglock held,
-                                * so this check has no races.
-                                */
-                               if (!t->exit_state &&
-                                   !(t->state & (TASK_STOPPED|TASK_TRACED))) {
-                                       stop_count++;
-                                       signal_wake_up(t, 0);
-                               }
-                       sig->group_stop_count = stop_count;
-               }
-               else {
-                       /* A race with another thread while unlocked.  */
-                       signr = sig->group_exit_code;
-                       stop_count = --sig->group_stop_count;
-               }
-
-               current->exit_code = signr;
-               set_current_state(TASK_STOPPED);
-               if (stop_count == 0)
-                       sig->flags = SIGNAL_STOP_STOPPED;
-
-               spin_unlock_irq(&sighand->siglock);
-               read_unlock(&tasklist_lock);
+                       if (!(t->flags & PF_EXITING) &&
+                           !task_is_stopped_or_traced(t)) {
+                               stop_count++;
+                               signal_wake_up(t, 0);
+                       }
+               sig->group_stop_count = stop_count;
        }
 
+       if (stop_count == 0)
+               sig->flags = SIGNAL_STOP_STOPPED;
+       current->exit_code = sig->group_exit_code;
+       __set_current_state(TASK_STOPPED);
+
+       spin_unlock_irq(&current->sighand->siglock);
        finish_stop(stop_count);
        return 1;
 }
 
-/*
- * Do appropriate magic when group_stop_count > 0.
- * We return nonzero if we stopped, after releasing the siglock.
- * We return zero if we still hold the siglock and should look
- * for another signal without checking group_stop_count again.
- */
-static inline int handle_group_stop(void)
+static int ptrace_signal(int signr, siginfo_t *info,
+                        struct pt_regs *regs, void *cookie)
 {
-       int stop_count;
+       if (!(current->ptrace & PT_PTRACED))
+               return signr;
 
-       if (current->signal->group_exit_task == current) {
-               /*
-                * Group stop is so we can do a core dump,
-                * We are the initiating thread, so get on with it.
-                */
-               current->signal->group_exit_task = NULL;
-               return 0;
+       ptrace_signal_deliver(regs, cookie);
+
+       /* Let the debugger run.  */
+       ptrace_stop(signr, 0, info);
+
+       /* We're back.  Did the debugger cancel the sig?  */
+       signr = current->exit_code;
+       if (signr == 0)
+               return signr;
+
+       current->exit_code = 0;
+
+       /* Update the siginfo structure if the signal has
+          changed.  If the debugger wanted something
+          specific in the siginfo structure then it should
+          have updated *info via PTRACE_SETSIGINFO.  */
+       if (signr != info->si_signo) {
+               info->si_signo = signr;
+               info->si_errno = 0;
+               info->si_code = SI_USER;
+               info->si_pid = task_pid_vnr(current->parent);
+               info->si_uid = task_uid(current->parent);
        }
 
-       if (current->signal->flags & SIGNAL_GROUP_EXIT)
-               /*
-                * Group stop is so another thread can do a core dump,
-                * or else we are racing against a death signal.
-                * Just punt the stop so we can get the next signal.
-                */
-               return 0;
+       /* If the (new) signal is now blocked, requeue it.  */
+       if (sigismember(&current->blocked, signr)) {
+               specific_send_sig_info(signr, info, current);
+               signr = 0;
+       }
 
-       /*
-        * There is a group stop in progress.  We stop
-        * without any associated signal being in our queue.
-        */
-       stop_count = --current->signal->group_stop_count;
-       if (stop_count == 0)
-               current->signal->flags = SIGNAL_STOP_STOPPED;
-       current->exit_code = current->signal->group_exit_code;
-       set_current_state(TASK_STOPPED);
-       spin_unlock_irq(&current->sighand->siglock);
-       finish_stop(stop_count);
-       return 1;
+       return signr;
 }
 
 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
                          struct pt_regs *regs, void *cookie)
 {
-       sigset_t *mask = &current->blocked;
-       int signr = 0;
+       struct sighand_struct *sighand = current->sighand;
+       struct signal_struct *signal = current->signal;
+       int signr;
 
 relock:
-       spin_lock_irq(&current->sighand->siglock);
-       for (;;) {
-               struct k_sigaction *ka;
+       /*
+        * We'll jump back here after any time we were stopped in TASK_STOPPED.
+        * While in TASK_STOPPED, we were considered "frozen enough".
+        * Now that we woke up, it's crucial if we're supposed to be
+        * frozen that we freeze now before running anything substantial.
+        */
+       try_to_freeze();
 
-               if (unlikely(current->signal->group_stop_count > 0) &&
-                   handle_group_stop())
-                       goto relock;
+       spin_lock_irq(&sighand->siglock);
+       /*
+        * Every stopped thread goes here after wakeup. Check to see if
+        * we should notify the parent, prepare_signal(SIGCONT) encodes
+        * the CLD_ si_code into SIGNAL_CLD_MASK bits.
+        */
+       if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
+               int why = (signal->flags & SIGNAL_STOP_CONTINUED)
+                               ? CLD_CONTINUED : CLD_STOPPED;
+               signal->flags &= ~SIGNAL_CLD_MASK;
+               spin_unlock_irq(&sighand->siglock);
 
-               signr = dequeue_signal(current, mask, info);
+               if (unlikely(!tracehook_notify_jctl(1, why)))
+                       goto relock;
 
-               if (!signr)
-                       break; /* will return 0 */
+               read_lock(&tasklist_lock);
+               do_notify_parent_cldstop(current->group_leader, why);
+               read_unlock(&tasklist_lock);
+               goto relock;
+       }
 
-               if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
-                       ptrace_signal_deliver(regs, cookie);
+       for (;;) {
+               struct k_sigaction *ka;
 
-                       /* Let the debugger run.  */
-                       ptrace_stop(signr, signr, info);
+               if (unlikely(signal->group_stop_count > 0) &&
+                   do_signal_stop(0))
+                       goto relock;
 
-                       /* We're back.  Did the debugger cancel the sig or group_exit? */
-                       signr = current->exit_code;
-                       if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
-                               continue;
+               /*
+                * Tracing can induce an artifical signal and choose sigaction.
+                * The return value in @signr determines the default action,
+                * but @info->si_signo is the signal number we will report.
+                */
+               signr = tracehook_get_signal(current, regs, info, return_ka);
+               if (unlikely(signr < 0))
+                       goto relock;
+               if (unlikely(signr != 0))
+                       ka = return_ka;
+               else {
+                       signr = dequeue_signal(current, &current->blocked,
+                                              info);
 
-                       current->exit_code = 0;
+                       if (!signr)
+                               break; /* will return 0 */
 
-                       /* Update the siginfo structure if the signal has
-                          changed.  If the debugger wanted something
-                          specific in the siginfo structure then it should
-                          have updated *info via PTRACE_SETSIGINFO.  */
-                       if (signr != info->si_signo) {
-                               info->si_signo = signr;
-                               info->si_errno = 0;
-                               info->si_code = SI_USER;
-                               info->si_pid = current->parent->pid;
-                               info->si_uid = current->parent->uid;
+                       if (signr != SIGKILL) {
+                               signr = ptrace_signal(signr, info,
+                                                     regs, cookie);
+                               if (!signr)
+                                       continue;
                        }
 
-                       /* If the (new) signal is now blocked, requeue it.  */
-                       if (sigismember(&current->blocked, signr)) {
-                               specific_send_sig_info(signr, info, current);
-                               continue;
-                       }
+                       ka = &sighand->action[signr-1];
                }
 
-               ka = &current->sighand->action[signr-1];
                if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
                        continue;
                if (ka->sa.sa_handler != SIG_DFL) {
@@ -1962,8 +1832,11 @@ relock:
                if (sig_kernel_ignore(signr)) /* Default is nothing. */
                        continue;
 
-               /* Init gets no signals it doesn't want.  */
-               if (current->pid == 1)
+               /*
+                * Global init gets no signals it doesn't want.
+                */
+               if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
+                   !signal_group_exit(signal))
                        continue;
 
                if (sig_kernel_stop(signr)) {
@@ -1978,17 +1851,17 @@ relock:
                         * We need to check for that and bail out if necessary.
                         */
                        if (signr != SIGSTOP) {
-                               spin_unlock_irq(&current->sighand->siglock);
+                               spin_unlock_irq(&sighand->siglock);
 
                                /* signals can be posted during this window */
 
-                               if (is_orphaned_pgrp(process_group(current)))
+                               if (is_current_pgrp_orphaned())
                                        goto relock;
 
-                               spin_lock_irq(&current->sighand->siglock);
+                               spin_lock_irq(&sighand->siglock);
                        }
 
-                       if (likely(do_signal_stop(signr))) {
+                       if (likely(do_signal_stop(info->si_signo))) {
                                /* It released the siglock.  */
                                goto relock;
                        }
@@ -2000,13 +1873,16 @@ relock:
                        continue;
                }
 
-               spin_unlock_irq(&current->sighand->siglock);
+               spin_unlock_irq(&sighand->siglock);
 
                /*
                 * Anything else is fatal, maybe with a core dump.
                 */
                current->flags |= PF_SIGNALED;
+
                if (sig_kernel_coredump(signr)) {
+                       if (print_fatal_signals)
+                               print_fatal_signal(regs, info->si_signo);
                        /*
                         * If it was able to dump core, this kills all
                         * other threads in the group and synchronizes with
@@ -2015,26 +1891,65 @@ relock:
                         * first and our do_group_exit call below will use
                         * that value and ignore the one we pass it.
                         */
-                       do_coredump((long)signr, signr, regs);
+                       do_coredump(info->si_signo, info->si_signo, regs);
                }
 
                /*
                 * Death signals, no core dump.
                 */
-               do_group_exit(signr);
+               do_group_exit(info->si_signo);
                /* NOTREACHED */
        }
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irq(&sighand->siglock);
        return signr;
 }
 
+void exit_signals(struct task_struct *tsk)
+{
+       int group_stop = 0;
+       struct task_struct *t;
+
+       if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
+               tsk->flags |= PF_EXITING;
+               return;
+       }
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       /*
+        * From now this task is not visible for group-wide signals,
+        * see wants_signal(), do_signal_stop().
+        */
+       tsk->flags |= PF_EXITING;
+       if (!signal_pending(tsk))
+               goto out;
+
+       /* It could be that __group_complete_signal() choose us to
+        * notify about group-wide signal. Another thread should be
+        * woken now to take the signal since we will not.
+        */
+       for (t = tsk; (t = next_thread(t)) != tsk; )
+               if (!signal_pending(t) && !(t->flags & PF_EXITING))
+                       recalc_sigpending_and_wake(t);
+
+       if (unlikely(tsk->signal->group_stop_count) &&
+                       !--tsk->signal->group_stop_count) {
+               tsk->signal->flags = SIGNAL_STOP_STOPPED;
+               group_stop = 1;
+       }
+out:
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
+               read_lock(&tasklist_lock);
+               do_notify_parent_cldstop(tsk, CLD_STOPPED);
+               read_unlock(&tasklist_lock);
+       }
+}
+
 EXPORT_SYMBOL(recalc_sigpending);
 EXPORT_SYMBOL_GPL(dequeue_signal);
 EXPORT_SYMBOL(flush_signals);
 EXPORT_SYMBOL(force_sig);
-EXPORT_SYMBOL(kill_pg);
-EXPORT_SYMBOL(kill_proc);
-EXPORT_SYMBOL(ptrace_notify);
 EXPORT_SYMBOL(send_sig);
 EXPORT_SYMBOL(send_sig_info);
 EXPORT_SYMBOL(sigprocmask);
@@ -2074,10 +1989,11 @@ long do_no_restart_syscall(struct restart_block *param)
 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
 {
        int error;
-       sigset_t old_block;
 
        spin_lock_irq(&current->sighand->siglock);
-       old_block = current->blocked;
+       if (oldset)
+               *oldset = current->blocked;
+
        error = 0;
        switch (how) {
        case SIG_BLOCK:
@@ -2094,8 +2010,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
        }
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
-       if (oldset)
-               *oldset = old_block;
+
        return error;
 }
 
@@ -2179,6 +2094,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
        /*
         * If you change siginfo_t structure, please be sure
         * this code is fixed accordingly.
+        * Please remember to update the signalfd_copyinfo() function
+        * inside fs/signalfd.c too, in case siginfo_t changes.
         * It should never copy any pad contained in the structure
         * to avoid security leaks, but must copy the generic
         * 3 ints plus the relevant union member.
@@ -2282,7 +2199,6 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
 
                        timeout = schedule_timeout_interruptible(timeout);
 
-                       try_to_freeze();
                        spin_lock_irq(&current->sighand->siglock);
                        sig = dequeue_signal(current, &these, &info);
                        current->blocked = current->real_blocked;
@@ -2308,48 +2224,51 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
 }
 
 asmlinkage long
-sys_kill(int pid, int sig)
+sys_kill(pid_t pid, int sig)
 {
        struct siginfo info;
 
        info.si_signo = sig;
        info.si_errno = 0;
        info.si_code = SI_USER;
-       info.si_pid = current->tgid;
-       info.si_uid = current->uid;
+       info.si_pid = task_tgid_vnr(current);
+       info.si_uid = current_uid();
 
        return kill_something_info(sig, &info, pid);
 }
 
-static int do_tkill(int tgid, int pid, int sig)
+static int do_tkill(pid_t tgid, pid_t pid, int sig)
 {
        int error;
        struct siginfo info;
        struct task_struct *p;
+       unsigned long flags;
 
        error = -ESRCH;
        info.si_signo = sig;
        info.si_errno = 0;
        info.si_code = SI_TKILL;
-       info.si_pid = current->tgid;
-       info.si_uid = current->uid;
+       info.si_pid = task_tgid_vnr(current);
+       info.si_uid = current_uid();
 
-       read_lock(&tasklist_lock);
-       p = find_task_by_pid(pid);
-       if (p && (tgid <= 0 || p->tgid == tgid)) {
+       rcu_read_lock();
+       p = find_task_by_vpid(pid);
+       if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
                error = check_kill_permission(sig, &info, p);
                /*
                 * The null signal is a permissions and process existence
                 * probe.  No signal is actually delivered.
+                *
+                * If lock_task_sighand() fails we pretend the task dies
+                * after receiving the signal. The window is tiny, and the
+                * signal is private anyway.
                 */
-               if (!error && sig && p->sighand) {
-                       spin_lock_irq(&p->sighand->siglock);
-                       handle_stop_signal(sig, p);
+               if (!error && sig && lock_task_sighand(p, &flags)) {
                        error = specific_send_sig_info(sig, &info, p);
-                       spin_unlock_irq(&p->sighand->siglock);
+                       unlock_task_sighand(p, &flags);
                }
        }
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        return error;
 }
@@ -2360,11 +2279,11 @@ static int do_tkill(int tgid, int pid, int sig)
  *  @pid: the PID of the thread
  *  @sig: signal to be sent
  *
- *  This syscall also checks the tgid and returns -ESRCH even if the PID
+ *  This syscall also checks the @tgid and returns -ESRCH even if the PID
  *  exists but it's not belonging to the target process anymore. This
  *  method solves the problem of threads exiting and PIDs getting reused.
  */
-asmlinkage long sys_tgkill(int tgid, int pid, int sig)
+asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
 {
        /* This is only valid for single tasks */
        if (pid <= 0 || tgid <= 0)
@@ -2377,7 +2296,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
  */
 asmlinkage long
-sys_tkill(int pid, int sig)
+sys_tkill(pid_t pid, int sig)
 {
        /* This is only valid for single tasks */
        if (pid <= 0)
@@ -2387,7 +2306,7 @@ sys_tkill(int pid, int sig)
 }
 
 asmlinkage long
-sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
+sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
 {
        siginfo_t info;
 
@@ -2404,30 +2323,25 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
        return kill_proc_info(sig, &info, pid);
 }
 
-int
-do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
+int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
 {
+       struct task_struct *t = current;
        struct k_sigaction *k;
+       sigset_t mask;
 
        if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
                return -EINVAL;
 
-       k = &current->sighand->action[sig-1];
+       k = &t->sighand->action[sig-1];
 
        spin_lock_irq(&current->sighand->siglock);
-       if (signal_pending(current)) {
-               /*
-                * If there might be a fatal signal pending on multiple
-                * threads, make sure we take it before changing the action.
-                */
-               spin_unlock_irq(&current->sighand->siglock);
-               return -ERESTARTNOINTR;
-       }
-
        if (oact)
                *oact = *k;
 
        if (act) {
+               sigdelsetmask(&act->sa.sa_mask,
+                             sigmask(SIGKILL) | sigmask(SIGSTOP));
+               *k = *act;
                /*
                 * POSIX 3.3.1.3:
                 *  "Setting a signal action to SIG_IGN for a signal that is
@@ -2439,36 +2353,15 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
                 *   (for example, SIGCHLD), shall cause the pending signal to
                 *   be discarded, whether or not it is blocked"
                 */
-               if (act->sa.sa_handler == SIG_IGN ||
-                   (act->sa.sa_handler == SIG_DFL &&
-                    sig_kernel_ignore(sig))) {
-                       /*
-                        * This is a fairly rare case, so we only take the
-                        * tasklist_lock once we're sure we'll need it.
-                        * Now we must do this little unlock and relock
-                        * dance to maintain the lock hierarchy.
-                        */
-                       struct task_struct *t = current;
-                       spin_unlock_irq(&t->sighand->siglock);
-                       read_lock(&tasklist_lock);
-                       spin_lock_irq(&t->sighand->siglock);
-                       *k = *act;
-                       sigdelsetmask(&k->sa.sa_mask,
-                                     sigmask(SIGKILL) | sigmask(SIGSTOP));
-                       rm_from_queue(sigmask(sig), &t->signal->shared_pending);
+               if (sig_handler_ignored(sig_handler(t, sig), sig)) {
+                       sigemptyset(&mask);
+                       sigaddset(&mask, sig);
+                       rm_from_queue_full(&mask, &t->signal->shared_pending);
                        do {
-                               rm_from_queue(sigmask(sig), &t->pending);
-                               recalc_sigpending_tsk(t);
+                               rm_from_queue_full(&mask, &t->pending);
                                t = next_thread(t);
                        } while (t != current);
-                       spin_unlock_irq(&current->sighand->siglock);
-                       read_unlock(&tasklist_lock);
-                       return 0;
                }
-
-               *k = *act;
-               sigdelsetmask(&k->sa.sa_mask,
-                             sigmask(SIGKILL) | sigmask(SIGSTOP));
        }
 
        spin_unlock_irq(&current->sighand->siglock);
@@ -2674,6 +2567,7 @@ sys_signal(int sig, __sighandler_t handler)
 
        new_sa.sa.sa_handler = handler;
        new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
+       sigemptyset(&new_sa.sa.sa_mask);
 
        ret = do_sigaction(sig, &new_sa, &old_sa);
 
@@ -2693,11 +2587,38 @@ sys_pause(void)
 
 #endif
 
+#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
+{
+       sigset_t newset;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (copy_from_user(&newset, unewset, sizeof(newset)))
+               return -EFAULT;
+       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
+
+       spin_lock_irq(&current->sighand->siglock);
+       current->saved_sigmask = current->blocked;
+       current->blocked = newset;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_restore_sigmask();
+       return -ERESTARTNOHAND;
+}
+#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
+
+__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
+{
+       return NULL;
+}
+
 void __init signals_init(void)
 {
-       sigqueue_cachep =
-               kmem_cache_create("sigqueue",
-                                 sizeof(struct sigqueue),
-                                 __alignof__(struct sigqueue),
-                                 SLAB_PANIC, NULL, NULL);
+       sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
 }