[PATCH] fbdev: Add 1366x768 (WXGA) mode to mode database
[safe/jmp/linux-2.6] / kernel / signal.c
index 8d6e64d..52adf53 100644 (file)
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/ptrace.h>
-#include <linux/posix-timers.h>
 #include <linux/signal.h>
-#include <linux/audit.h>
+#include <linux/capability.h>
 #include <asm/param.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/siginfo.h>
+#include "audit.h"     /* audit_signal_info() */
 
 /*
  * SLAB caches for signal bits.
@@ -146,6 +146,8 @@ static kmem_cache_t *sigqueue_cachep;
 #define sig_kernel_stop(sig) \
                (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
 
+#define sig_needs_tasklist(sig)        ((sig) == SIGCONT)
+
 #define sig_user_defined(t, signr) \
        (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
         ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
@@ -282,7 +284,7 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
        return(q);
 }
 
-static inline void __sigqueue_free(struct sigqueue *q)
+static void __sigqueue_free(struct sigqueue *q)
 {
        if (q->flags & SIGQUEUE_PREALLOC)
                return;
@@ -291,7 +293,7 @@ static inline void __sigqueue_free(struct sigqueue *q)
        kmem_cache_free(sigqueue_cachep, q);
 }
 
-static void flush_sigqueue(struct sigpending *queue)
+void flush_sigqueue(struct sigpending *queue)
 {
        struct sigqueue *q;
 
@@ -306,9 +308,7 @@ static void flush_sigqueue(struct sigpending *queue)
 /*
  * Flush all pending signals for a task.
  */
-
-void
-flush_signals(struct task_struct *t)
+void flush_signals(struct task_struct *t)
 {
        unsigned long flags;
 
@@ -320,99 +320,6 @@ flush_signals(struct task_struct *t)
 }
 
 /*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_sighand(struct task_struct *tsk)
-{
-       struct sighand_struct * sighand = tsk->sighand;
-
-       /* Ok, we're done with the signal handlers */
-       tsk->sighand = NULL;
-       if (atomic_dec_and_test(&sighand->count))
-               kmem_cache_free(sighand_cachep, sighand);
-}
-
-void exit_sighand(struct task_struct *tsk)
-{
-       write_lock_irq(&tasklist_lock);
-       __exit_sighand(tsk);
-       write_unlock_irq(&tasklist_lock);
-}
-
-/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_signal(struct task_struct *tsk)
-{
-       struct signal_struct * sig = tsk->signal;
-       struct sighand_struct * sighand = tsk->sighand;
-
-       if (!sig)
-               BUG();
-       if (!atomic_read(&sig->count))
-               BUG();
-       spin_lock(&sighand->siglock);
-       posix_cpu_timers_exit(tsk);
-       if (atomic_dec_and_test(&sig->count)) {
-               posix_cpu_timers_exit_group(tsk);
-               if (tsk == sig->curr_target)
-                       sig->curr_target = next_thread(tsk);
-               tsk->signal = NULL;
-               spin_unlock(&sighand->siglock);
-               flush_sigqueue(&sig->shared_pending);
-       } else {
-               /*
-                * If there is any task waiting for the group exit
-                * then notify it:
-                */
-               if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
-                       wake_up_process(sig->group_exit_task);
-                       sig->group_exit_task = NULL;
-               }
-               if (tsk == sig->curr_target)
-                       sig->curr_target = next_thread(tsk);
-               tsk->signal = NULL;
-               /*
-                * Accumulate here the counters for all threads but the
-                * group leader as they die, so they can be added into
-                * the process-wide totals when those are taken.
-                * The group leader stays around as a zombie as long
-                * as there are other threads.  When it gets reaped,
-                * the exit.c code will add its counts into these totals.
-                * We won't ever get here for the group leader, since it
-                * will have been the last reference on the signal_struct.
-                */
-               sig->utime = cputime_add(sig->utime, tsk->utime);
-               sig->stime = cputime_add(sig->stime, tsk->stime);
-               sig->min_flt += tsk->min_flt;
-               sig->maj_flt += tsk->maj_flt;
-               sig->nvcsw += tsk->nvcsw;
-               sig->nivcsw += tsk->nivcsw;
-               sig->sched_time += tsk->sched_time;
-               spin_unlock(&sighand->siglock);
-               sig = NULL;     /* Marker for below.  */
-       }
-       clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
-       flush_sigqueue(&tsk->pending);
-       if (sig) {
-               /*
-                * We are cleaning up the signal_struct here.
-                */
-               exit_thread_group_keys(sig);
-               kmem_cache_free(signal_cachep, sig);
-       }
-}
-
-void exit_signal(struct task_struct *tsk)
-{
-       atomic_dec(&tsk->signal->live);
-
-       write_lock_irq(&tasklist_lock);
-       __exit_signal(tsk);
-       write_unlock_irq(&tasklist_lock);
-}
-
-/*
  * Flush all handlers for a task.
  */
 
@@ -465,7 +372,7 @@ unblock_all_signals(void)
        spin_unlock_irqrestore(&current->sighand->siglock, flags);
 }
 
-static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 {
        struct sigqueue *q, *first = NULL;
        int still_pending = 0;
@@ -513,16 +420,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 {
        int sig = 0;
 
-       /* SIGKILL must have priority, otherwise it is quite easy
-        * to create an unkillable process, sending sig < SIGKILL
-        * to self */
-       if (unlikely(sigismember(&pending->signal, SIGKILL))) {
-               if (!sigismember(mask, SIGKILL))
-                       sig = SIGKILL;
-       }
-
-       if (likely(!sig))
-               sig = next_signal(pending, mask);
+       sig = next_signal(pending, mask);
        if (sig) {
                if (current->notifier) {
                        if (sigismember(current->notifier_mask, sig)) {
@@ -622,6 +520,33 @@ void signal_wake_up(struct task_struct *t, int resume)
  * Returns 1 if any signals were found.
  *
  * All callers must be holding the siglock.
+ *
+ * This version takes a sigset mask and looks at all signals,
+ * not just those in the first mask word.
+ */
+static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
+{
+       struct sigqueue *q, *n;
+       sigset_t m;
+
+       sigandsets(&m, mask, &s->signal);
+       if (sigisemptyset(&m))
+               return 0;
+
+       signandsets(&s->signal, &s->signal, mask);
+       list_for_each_entry_safe(q, n, &s->list, list) {
+               if (sigismember(mask, q->info.si_signo)) {
+                       list_del_init(&q->list);
+                       __sigqueue_free(q);
+               }
+       }
+       return 1;
+}
+/*
+ * Remove signals in mask from the pending set and queue.
+ * Returns 1 if any signals were found.
+ *
+ * All callers must be holding the siglock.
  */
 static int rm_from_queue(unsigned long mask, struct sigpending *s)
 {
@@ -651,8 +576,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
        if (!valid_signal(sig))
                return error;
        error = -EPERM;
-       if ((!info || ((unsigned long)info != 1 &&
-                       (unsigned long)info != 2 && SI_FROMUSER(info)))
+       if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
            && ((sig != SIGCONT) ||
                (current->signal->session != t->signal->session))
            && (current->euid ^ t->suid) && (current->euid ^ t->uid)
@@ -667,9 +591,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
 }
 
 /* forward decl */
-static void do_notify_parent_cldstop(struct task_struct *tsk,
-                                    int to_self,
-                                    int why);
+static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
 
 /*
  * Handle magic process-wide effects of stop/continue signals.
@@ -719,7 +641,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                        p->signal->group_stop_count = 0;
                        p->signal->flags = SIGNAL_STOP_CONTINUED;
                        spin_unlock(&p->sighand->siglock);
-                       do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
+                       do_notify_parent_cldstop(p, CLD_STOPPED);
                        spin_lock(&p->sighand->siglock);
                }
                rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
@@ -760,7 +682,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                        p->signal->flags = SIGNAL_STOP_CONTINUED;
                        p->signal->group_exit_code = 0;
                        spin_unlock(&p->sighand->siglock);
-                       do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
+                       do_notify_parent_cldstop(p, CLD_CONTINUED);
                        spin_lock(&p->sighand->siglock);
                } else {
                        /*
@@ -789,7 +711,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
         * fast-pathed signals for kernel-internal things like SIGSTOP
         * or SIGKILL.
         */
-       if ((unsigned long)info == 2)
+       if (info == SEND_SIG_FORCED)
                goto out_set;
 
        /* Real-time signals must be queued if sent by sigqueue, or
@@ -801,19 +723,19 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
           pass on the info struct.  */
 
        q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
-                                            ((unsigned long) info < 2 ||
+                                            (is_si_special(info) ||
                                              info->si_code >= 0)));
        if (q) {
                list_add_tail(&q->list, &signals->list);
                switch ((unsigned long) info) {
-               case 0:
+               case (unsigned long) SEND_SIG_NOINFO:
                        q->info.si_signo = sig;
                        q->info.si_errno = 0;
                        q->info.si_code = SI_USER;
                        q->info.si_pid = current->pid;
                        q->info.si_uid = current->uid;
                        break;
-               case 1:
+               case (unsigned long) SEND_SIG_PRIV:
                        q->info.si_signo = sig;
                        q->info.si_errno = 0;
                        q->info.si_code = SI_KERNEL;
@@ -824,20 +746,13 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
                        copy_siginfo(&q->info, info);
                        break;
                }
-       } else {
-               if (sig >= SIGRTMIN && info && (unsigned long)info != 1
-                  && info->si_code != SI_USER)
+       } else if (!is_si_special(info)) {
+               if (sig >= SIGRTMIN && info->si_code != SI_USER)
                /*
                 * Queue overflow, abort.  We may abort if the signal was rt
                 * and sent by user using something other than kill().
                 */
                        return -EAGAIN;
-               if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
-                       /*
-                        * Set up a return to indicate that we dropped 
-                        * the signal.
-                        */
-                       ret = info->si_sys_private;
        }
 
 out_set:
@@ -854,16 +769,9 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
        int ret = 0;
 
-       if (!irqs_disabled())
-               BUG();
+       BUG_ON(!irqs_disabled());
        assert_spin_locked(&t->sighand->siglock);
 
-       if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
-               /*
-                * Set up a return to indicate that we dropped the signal.
-                */
-               ret = info->si_sys_private;
-
        /* Short-circuit ignored signals.  */
        if (sig_ignored(t, sig))
                goto out;
@@ -893,11 +801,13 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
        int ret;
 
        spin_lock_irqsave(&t->sighand->siglock, flags);
-       if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
+       if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
                t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
+       }
+       if (sigismember(&t->blocked, sig)) {
                sigdelset(&t->blocked, sig);
-               recalc_sigpending_tsk(t);
        }
+       recalc_sigpending_tsk(t);
        ret = specific_send_sig_info(sig, info, t);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
 
@@ -907,15 +817,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 void
 force_sig_specific(int sig, struct task_struct *t)
 {
-       unsigned long int flags;
-
-       spin_lock_irqsave(&t->sighand->siglock, flags);
-       if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
-               t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
-       sigdelset(&t->blocked, sig);
-       recalc_sigpending_tsk(t);
-       specific_send_sig_info(sig, (void *)2, t);
-       spin_unlock_irqrestore(&t->sighand->siglock, flags);
+       force_sig_info(sig, SEND_SIG_FORCED, t);
 }
 
 /*
@@ -966,7 +868,6 @@ __group_complete_signal(int sig, struct task_struct *p)
                if (t == NULL)
                        /* restart balancing at this thread */
                        t = p->signal->curr_target = p;
-               BUG_ON(t->tgid != p->tgid);
 
                while (!wants_signal(sig, t)) {
                        t = next_thread(t);
@@ -1050,12 +951,6 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        assert_spin_locked(&p->sighand->siglock);
        handle_stop_signal(sig, p);
 
-       if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
-               /*
-                * Set up a return to indicate that we dropped the signal.
-                */
-               ret = info->si_sys_private;
-
        /* Short-circuit ignored signals.  */
        if (sig_ignored(p, sig))
                return ret;
@@ -1108,25 +1003,46 @@ void zap_other_threads(struct task_struct *p)
                if (t != p->group_leader)
                        t->exit_signal = -1;
 
+               /* SIGKILL will be handled before any pending SIGSTOP */
                sigaddset(&t->pending.signal, SIGKILL);
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
                signal_wake_up(t, 1);
        }
 }
 
 /*
- * Must be called with the tasklist_lock held for reading!
+ * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
+struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+{
+       struct sighand_struct *sighand;
+
+       for (;;) {
+               sighand = rcu_dereference(tsk->sighand);
+               if (unlikely(sighand == NULL))
+                       break;
+
+               spin_lock_irqsave(&sighand->siglock, *flags);
+               if (likely(sighand == tsk->sighand))
+                       break;
+               spin_unlock_irqrestore(&sighand->siglock, *flags);
+       }
+
+       return sighand;
+}
+
 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 {
        unsigned long flags;
        int ret;
 
        ret = check_kill_permission(sig, info, p);
-       if (!ret && sig && p->sighand) {
-               spin_lock_irqsave(&p->sighand->siglock, flags);
-               ret = __group_send_sig_info(sig, info, p);
-               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+
+       if (!ret && sig) {
+               ret = -ESRCH;
+               if (lock_task_sighand(p, &flags)) {
+                       ret = __group_send_sig_info(sig, info, p);
+                       unlock_task_sighand(p, &flags);
+               }
        }
 
        return ret;
@@ -1171,14 +1087,21 @@ int
 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
 {
        int error;
+       int acquired_tasklist_lock = 0;
        struct task_struct *p;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
+       if (unlikely(sig_needs_tasklist(sig))) {
+               read_lock(&tasklist_lock);
+               acquired_tasklist_lock = 1;
+       }
        p = find_task_by_pid(pid);
        error = -ESRCH;
        if (p)
                error = group_send_sig_info(sig, info, p);
-       read_unlock(&tasklist_lock);
+       if (unlikely(acquired_tasklist_lock))
+               read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return error;
 }
 
@@ -1198,8 +1121,7 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
                ret = -ESRCH;
                goto out_unlock;
        }
-       if ((!info || ((unsigned long)info != 1 &&
-                       (unsigned long)info != 2 && SI_FROMUSER(info)))
+       if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
            && (euid != p->suid) && (euid != p->uid)
            && (uid != p->suid) && (uid != p->uid)) {
                ret = -EPERM;
@@ -1285,10 +1207,13 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        return ret;
 }
 
+#define __si_special(priv) \
+       ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
+
 int
 send_sig(int sig, struct task_struct *p, int priv)
 {
-       return send_sig_info(sig, (void*)(long)(priv != 0), p);
+       return send_sig_info(sig, __si_special(priv), p);
 }
 
 /*
@@ -1308,7 +1233,7 @@ send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 void
 force_sig(int sig, struct task_struct *p)
 {
-       force_sig_info(sig, (void*)1L, p);
+       force_sig_info(sig, SEND_SIG_PRIV, p);
 }
 
 /*
@@ -1333,13 +1258,13 @@ force_sigsegv(int sig, struct task_struct *p)
 int
 kill_pg(pid_t pgrp, int sig, int priv)
 {
-       return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
+       return kill_pg_info(sig, __si_special(priv), pgrp);
 }
 
 int
 kill_proc(pid_t pid, int sig, int priv)
 {
-       return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
+       return kill_proc_info(sig, __si_special(priv), pid);
 }
 
 /*
@@ -1382,29 +1307,34 @@ void sigqueue_free(struct sigqueue *q)
        __sigqueue_free(q);
 }
 
-int
-send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
+int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
 {
        unsigned long flags;
        int ret = 0;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-       read_lock(&tasklist_lock);
 
-       if (unlikely(p->flags & PF_EXITING)) {
+       /*
+        * The rcu based delayed sighand destroy makes it possible to
+        * run this without tasklist lock held. The task struct itself
+        * cannot go away as create_timer did get_task_struct().
+        *
+        * We return -1, when the task is marked exiting, so
+        * posix_timer_event can redirect it to the group leader
+        */
+       rcu_read_lock();
+
+       if (!likely(lock_task_sighand(p, &flags))) {
                ret = -1;
                goto out_err;
        }
 
-       spin_lock_irqsave(&p->sighand->siglock, flags);
-
        if (unlikely(!list_empty(&q->list))) {
                /*
                 * If an SI_TIMER entry is already queue just increment
                 * the overrun count.
                 */
-               if (q->info.si_code != SI_TIMER)
-                       BUG();
+               BUG_ON(q->info.si_code != SI_TIMER);
                q->info.si_overrun++;
                goto out;
        }
@@ -1420,9 +1350,9 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
                signal_wake_up(p, sig == SIGKILL);
 
 out:
-       spin_unlock_irqrestore(&p->sighand->siglock, flags);
+       unlock_task_sighand(p, &flags);
 out_err:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        return ret;
 }
@@ -1434,7 +1364,9 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
        int ret = 0;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
+
        read_lock(&tasklist_lock);
+       /* Since it_lock is held, p->sighand cannot be NULL. */
        spin_lock_irqsave(&p->sighand->siglock, flags);
        handle_stop_signal(sig, p);
 
@@ -1450,8 +1382,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
                 * the overrun count.  Other uses should not try to
                 * send the signal multiple times.
                 */
-               if (q->info.si_code != SI_TIMER)
-                       BUG();
+               BUG_ON(q->info.si_code != SI_TIMER);
                q->info.si_overrun++;
                goto out;
        } 
@@ -1468,7 +1399,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
 out:
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
        read_unlock(&tasklist_lock);
-       return(ret);
+       return ret;
 }
 
 /*
@@ -1522,7 +1453,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
 
        psig = tsk->parent->sighand;
        spin_lock_irqsave(&psig->siglock, flags);
-       if (sig == SIGCHLD &&
+       if (!tsk->ptrace && sig == SIGCHLD &&
            (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
             (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
                /*
@@ -1550,14 +1481,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
        spin_unlock_irqrestore(&psig->siglock, flags);
 }
 
-static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
+static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
 {
        struct siginfo info;
        unsigned long flags;
        struct task_struct *parent;
        struct sighand_struct *sighand;
 
-       if (to_self)
+       if (tsk->ptrace & PT_PTRACED)
                parent = tsk->parent;
        else {
                tsk = tsk->group_leader;
@@ -1600,6 +1531,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int w
        spin_unlock_irqrestore(&sighand->siglock, flags);
 }
 
+static inline int may_ptrace_stop(void)
+{
+       if (!likely(current->ptrace & PT_PTRACED))
+               return 0;
+
+       if (unlikely(current->parent == current->real_parent &&
+                   (current->ptrace & PT_ATTACHED)))
+               return 0;
+
+       if (unlikely(current->signal == current->parent->signal) &&
+           unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
+               return 0;
+
+       /*
+        * Are we in the middle of do_coredump?
+        * If so and our tracer is also part of the coredump stopping
+        * is a deadlock situation, and pointless because our tracer
+        * is dead so don't allow us to stop.
+        * If SIGKILL was already sent before the caller unlocked
+        * ->siglock we must see ->core_waiters != 0. Otherwise it
+        * is safe to enter schedule().
+        */
+       if (unlikely(current->mm->core_waiters) &&
+           unlikely(current->mm == current->parent->mm))
+               return 0;
+
+       return 1;
+}
+
 /*
  * This must be called with current->sighand->siglock held.
  *
@@ -1626,13 +1586,10 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
        /* Let the debugger run.  */
        set_current_state(TASK_TRACED);
        spin_unlock_irq(&current->sighand->siglock);
+       try_to_freeze();
        read_lock(&tasklist_lock);
-       if (likely(current->ptrace & PT_PTRACED) &&
-           likely(current->parent != current->real_parent ||
-                  !(current->ptrace & PT_ATTACHED)) &&
-           (likely(current->parent->signal != current->signal) ||
-            !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
-               do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
+       if (may_ptrace_stop()) {
+               do_notify_parent_cldstop(current, CLD_TRAPPED);
                read_unlock(&tasklist_lock);
                schedule();
        } else {
@@ -1681,25 +1638,17 @@ void ptrace_notify(int exit_code)
 static void
 finish_stop(int stop_count)
 {
-       int to_self;
-
        /*
         * If there are no other threads in the group, or if there is
         * a group stop in progress and we are the last to stop,
         * report to the parent.  When ptraced, every thread reports itself.
         */
-       if (stop_count < 0 || (current->ptrace & PT_PTRACED))
-               to_self = 1;
-       else if (stop_count == 0)
-               to_self = 0;
-       else
-               goto out;
-
-       read_lock(&tasklist_lock);
-       do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
-       read_unlock(&tasklist_lock);
+       if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
+               read_lock(&tasklist_lock);
+               do_notify_parent_cldstop(current, CLD_STOPPED);
+               read_unlock(&tasklist_lock);
+       }
 
-out:
        schedule();
        /*
         * Now we don't run again until continued.
@@ -1713,12 +1662,10 @@ out:
  * Returns nonzero if we've actually stopped and released the siglock.
  * Returns zero if we didn't stop and still hold the siglock.
  */
-static int
-do_signal_stop(int signr)
+static int do_signal_stop(int signr)
 {
        struct signal_struct *sig = current->signal;
-       struct sighand_struct *sighand = current->sighand;
-       int stop_count = -1;
+       int stop_count;
 
        if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
                return 0;
@@ -1728,86 +1675,37 @@ do_signal_stop(int signr)
                 * There is a group stop in progress.  We don't need to
                 * start another one.
                 */
-               signr = sig->group_exit_code;
                stop_count = --sig->group_stop_count;
-               current->exit_code = signr;
-               set_current_state(TASK_STOPPED);
-               if (stop_count == 0)
-                       sig->flags = SIGNAL_STOP_STOPPED;
-               spin_unlock_irq(&sighand->siglock);
-       }
-       else if (thread_group_empty(current)) {
-               /*
-                * Lock must be held through transition to stopped state.
-                */
-               current->exit_code = current->signal->group_exit_code = signr;
-               set_current_state(TASK_STOPPED);
-               sig->flags = SIGNAL_STOP_STOPPED;
-               spin_unlock_irq(&sighand->siglock);
-       }
-       else {
+       } else {
                /*
                 * There is no group stop already in progress.
-                * We must initiate one now, but that requires
-                * dropping siglock to get both the tasklist lock
-                * and siglock again in the proper order.  Note that
-                * this allows an intervening SIGCONT to be posted.
-                * We need to check for that and bail out if necessary.
+                * We must initiate one now.
                 */
                struct task_struct *t;
 
-               spin_unlock_irq(&sighand->siglock);
+               sig->group_exit_code = signr;
 
-               /* signals can be posted during this window */
-
-               read_lock(&tasklist_lock);
-               spin_lock_irq(&sighand->siglock);
-
-               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
+               stop_count = 0;
+               for (t = next_thread(current); t != current; t = next_thread(t))
                        /*
-                        * Another stop or continue happened while we
-                        * didn't have the lock.  We can just swallow this
-                        * signal now.  If we raced with a SIGCONT, that
-                        * should have just cleared it now.  If we raced
-                        * with another processor delivering a stop signal,
-                        * then the SIGCONT that wakes us up should clear it.
+                        * Setting state to TASK_STOPPED for a group
+                        * stop is always done with the siglock held,
+                        * so this check has no races.
                         */
-                       read_unlock(&tasklist_lock);
-                       return 0;
-               }
-
-               if (sig->group_stop_count == 0) {
-                       sig->group_exit_code = signr;
-                       stop_count = 0;
-                       for (t = next_thread(current); t != current;
-                            t = next_thread(t))
-                               /*
-                                * Setting state to TASK_STOPPED for a group
-                                * stop is always done with the siglock held,
-                                * so this check has no races.
-                                */
-                               if (!t->exit_state &&
-                                   !(t->state & (TASK_STOPPED|TASK_TRACED))) {
-                                       stop_count++;
-                                       signal_wake_up(t, 0);
-                               }
-                       sig->group_stop_count = stop_count;
-               }
-               else {
-                       /* A race with another thread while unlocked.  */
-                       signr = sig->group_exit_code;
-                       stop_count = --sig->group_stop_count;
-               }
-
-               current->exit_code = signr;
-               set_current_state(TASK_STOPPED);
-               if (stop_count == 0)
-                       sig->flags = SIGNAL_STOP_STOPPED;
-
-               spin_unlock_irq(&sighand->siglock);
-               read_unlock(&tasklist_lock);
+                       if (!t->exit_state &&
+                           !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+                               stop_count++;
+                               signal_wake_up(t, 0);
+                       }
+               sig->group_stop_count = stop_count;
        }
 
+       if (stop_count == 0)
+               sig->flags = SIGNAL_STOP_STOPPED;
+       current->exit_code = sig->group_exit_code;
+       __set_current_state(TASK_STOPPED);
+
+       spin_unlock_irq(&current->sighand->siglock);
        finish_stop(stop_count);
        return 1;
 }
@@ -1818,7 +1716,7 @@ do_signal_stop(int signr)
  * We return zero if we still hold the siglock and should look
  * for another signal without checking group_stop_count again.
  */
-static inline int handle_group_stop(void)
+static int handle_group_stop(void)
 {
        int stop_count;
 
@@ -1859,6 +1757,8 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
        sigset_t *mask = &current->blocked;
        int signr = 0;
 
+       try_to_freeze();
+
 relock:
        spin_lock_irq(&current->sighand->siglock);
        for (;;) {
@@ -1925,7 +1825,7 @@ relock:
                        continue;
 
                /* Init gets no signals it doesn't want.  */
-               if (current->pid == 1)
+               if (current == child_reaper)
                        continue;
 
                if (sig_kernel_stop(signr)) {
@@ -2036,10 +1936,11 @@ long do_no_restart_syscall(struct restart_block *param)
 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
 {
        int error;
-       sigset_t old_block;
 
        spin_lock_irq(&current->sighand->siglock);
-       old_block = current->blocked;
+       if (oldset)
+               *oldset = current->blocked;
+
        error = 0;
        switch (how) {
        case SIG_BLOCK:
@@ -2056,8 +1957,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
        }
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
-       if (oldset)
-               *oldset = old_block;
+
        return error;
 }
 
@@ -2244,7 +2144,6 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
 
                        timeout = schedule_timeout_interruptible(timeout);
 
-                       try_to_freeze();
                        spin_lock_irq(&current->sighand->siglock);
                        sig = dequeue_signal(current, &these, &info);
                        current->blocked = current->real_blocked;
@@ -2283,26 +2182,13 @@ sys_kill(int pid, int sig)
        return kill_something_info(sig, &info, pid);
 }
 
-/**
- *  sys_tgkill - send signal to one specific thread
- *  @tgid: the thread group ID of the thread
- *  @pid: the PID of the thread
- *  @sig: signal to be sent
- *
- *  This syscall also checks the tgid and returns -ESRCH even if the PID
- *  exists but it's not belonging to the target process anymore. This
- *  method solves the problem of threads exiting and PIDs getting reused.
- */
-asmlinkage long sys_tgkill(int tgid, int pid, int sig)
+static int do_tkill(int tgid, int pid, int sig)
 {
-       struct siginfo info;
        int error;
+       struct siginfo info;
        struct task_struct *p;
 
-       /* This is only valid for single tasks */
-       if (pid <= 0 || tgid <= 0)
-               return -EINVAL;
-
+       error = -ESRCH;
        info.si_signo = sig;
        info.si_errno = 0;
        info.si_code = SI_TKILL;
@@ -2311,8 +2197,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
 
        read_lock(&tasklist_lock);
        p = find_task_by_pid(pid);
-       error = -ESRCH;
-       if (p && (p->tgid == tgid)) {
+       if (p && (tgid <= 0 || p->tgid == tgid)) {
                error = check_kill_permission(sig, &info, p);
                /*
                 * The null signal is a permissions and process existence
@@ -2326,47 +2211,40 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
                }
        }
        read_unlock(&tasklist_lock);
+
        return error;
 }
 
+/**
+ *  sys_tgkill - send signal to one specific thread
+ *  @tgid: the thread group ID of the thread
+ *  @pid: the PID of the thread
+ *  @sig: signal to be sent
+ *
+ *  This syscall also checks the tgid and returns -ESRCH even if the PID
+ *  exists but it's not belonging to the target process anymore. This
+ *  method solves the problem of threads exiting and PIDs getting reused.
+ */
+asmlinkage long sys_tgkill(int tgid, int pid, int sig)
+{
+       /* This is only valid for single tasks */
+       if (pid <= 0 || tgid <= 0)
+               return -EINVAL;
+
+       return do_tkill(tgid, pid, sig);
+}
+
 /*
  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
  */
 asmlinkage long
 sys_tkill(int pid, int sig)
 {
-       struct siginfo info;
-       int error;
-       struct task_struct *p;
-
        /* This is only valid for single tasks */
        if (pid <= 0)
                return -EINVAL;
 
-       info.si_signo = sig;
-       info.si_errno = 0;
-       info.si_code = SI_TKILL;
-       info.si_pid = current->tgid;
-       info.si_uid = current->uid;
-
-       read_lock(&tasklist_lock);
-       p = find_task_by_pid(pid);
-       error = -ESRCH;
-       if (p) {
-               error = check_kill_permission(sig, &info, p);
-               /*
-                * The null signal is a permissions and process existence
-                * probe.  No signal is actually delivered.
-                */
-               if (!error && sig && p->sighand) {
-                       spin_lock_irq(&p->sighand->siglock);
-                       handle_stop_signal(sig, p);
-                       error = specific_send_sig_info(sig, &info, p);
-                       spin_unlock_irq(&p->sighand->siglock);
-               }
-       }
-       read_unlock(&tasklist_lock);
-       return error;
+       return do_tkill(0, pid, sig);
 }
 
 asmlinkage long
@@ -2387,10 +2265,10 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
        return kill_proc_info(sig, &info, pid);
 }
 
-int
-do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
+int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
 {
        struct k_sigaction *k;
+       sigset_t mask;
 
        if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
                return -EINVAL;
@@ -2411,6 +2289,9 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
                *oact = *k;
 
        if (act) {
+               sigdelsetmask(&act->sa.sa_mask,
+                             sigmask(SIGKILL) | sigmask(SIGSTOP));
+               *k = *act;
                /*
                 * POSIX 3.3.1.3:
                 *  "Setting a signal action to SIG_IGN for a signal that is
@@ -2423,35 +2304,17 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
                 *   be discarded, whether or not it is blocked"
                 */
                if (act->sa.sa_handler == SIG_IGN ||
-                   (act->sa.sa_handler == SIG_DFL &&
-                    sig_kernel_ignore(sig))) {
-                       /*
-                        * This is a fairly rare case, so we only take the
-                        * tasklist_lock once we're sure we'll need it.
-                        * Now we must do this little unlock and relock
-                        * dance to maintain the lock hierarchy.
-                        */
+                  (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
                        struct task_struct *t = current;
-                       spin_unlock_irq(&t->sighand->siglock);
-                       read_lock(&tasklist_lock);
-                       spin_lock_irq(&t->sighand->siglock);
-                       *k = *act;
-                       sigdelsetmask(&k->sa.sa_mask,
-                                     sigmask(SIGKILL) | sigmask(SIGSTOP));
-                       rm_from_queue(sigmask(sig), &t->signal->shared_pending);
+                       sigemptyset(&mask);
+                       sigaddset(&mask, sig);
+                       rm_from_queue_full(&mask, &t->signal->shared_pending);
                        do {
-                               rm_from_queue(sigmask(sig), &t->pending);
+                               rm_from_queue_full(&mask, &t->pending);
                                recalc_sigpending_tsk(t);
                                t = next_thread(t);
                        } while (t != current);
-                       spin_unlock_irq(&current->sighand->siglock);
-                       read_unlock(&tasklist_lock);
-                       return 0;
                }
-
-               *k = *act;
-               sigdelsetmask(&k->sa.sa_mask,
-                             sigmask(SIGKILL) | sigmask(SIGSTOP));
        }
 
        spin_unlock_irq(&current->sighand->siglock);
@@ -2657,6 +2520,7 @@ sys_signal(int sig, __sighandler_t handler)
 
        new_sa.sa.sa_handler = handler;
        new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
+       sigemptyset(&new_sa.sa.sa_mask);
 
        ret = do_sigaction(sig, &new_sa, &old_sa);
 
@@ -2676,6 +2540,32 @@ sys_pause(void)
 
 #endif
 
+#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
+{
+       sigset_t newset;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (copy_from_user(&newset, unewset, sizeof(newset)))
+               return -EFAULT;
+       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
+
+       spin_lock_irq(&current->sighand->siglock);
+       current->saved_sigmask = current->blocked;
+       current->blocked = newset;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       return -ERESTARTNOHAND;
+}
+#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
+
 void __init signals_init(void)
 {
        sigqueue_cachep =