mac80211: fix deauth before assoc
[safe/jmp/linux-2.6] / kernel / signal.c
index d7c7f3c..dbd7fe0 100644 (file)
@@ -159,6 +159,10 @@ void recalc_sigpending(void)
 
 /* Given the mask, find the first available signal that should be serviced. */
 
+#define SYNCHRONOUS_MASK \
+       (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
+        sigmask(SIGTRAP) | sigmask(SIGFPE))
+
 int next_signal(struct sigpending *pending, sigset_t *mask)
 {
        unsigned long i, *s, *m, x;
@@ -166,26 +170,39 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
 
        s = pending->signal.sig;
        m = mask->sig;
+
+       /*
+        * Handle the first word specially: it contains the
+        * synchronous signals that need to be dequeued first.
+        */
+       x = *s &~ *m;
+       if (x) {
+               if (x & SYNCHRONOUS_MASK)
+                       x &= SYNCHRONOUS_MASK;
+               sig = ffz(~x) + 1;
+               return sig;
+       }
+
        switch (_NSIG_WORDS) {
        default:
-               for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
-                       if ((x = *s &~ *m) != 0) {
-                               sig = ffz(~x) + i*_NSIG_BPW + 1;
-                               break;
-                       }
+               for (i = 1; i < _NSIG_WORDS; ++i) {
+                       x = *++s &~ *++m;
+                       if (!x)
+                               continue;
+                       sig = ffz(~x) + i*_NSIG_BPW + 1;
+                       break;
+               }
                break;
 
-       case 2: if ((x = s[0] &~ m[0]) != 0)
-                       sig = 1;
-               else if ((x = s[1] &~ m[1]) != 0)
-                       sig = _NSIG_BPW + 1;
-               else
+       case 2:
+               x = s[1] &~ m[1];
+               if (!x)
                        break;
-               sig += ffz(~x);
+               sig = ffz(~x) + _NSIG_BPW + 1;
                break;
 
-       case 1: if ((x = *s &~ *m) != 0)
-                       sig = ffz(~x) + 1;
+       case 1:
+               /* Nothing to do */
                break;
        }
 
@@ -218,17 +235,17 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
        struct user_struct *user;
 
        /*
-        * We won't get problems with the target's UID changing under us
-        * because changing it requires RCU be used, and if t != current, the
-        * caller must be holding the RCU readlock (by way of a spinlock) and
-        * we use RCU protection here
+        * Protect access to @t credentials. This can go away when all
+        * callers hold rcu read lock.
         */
+       rcu_read_lock();
        user = get_uid(__task_cred(t)->user);
        atomic_inc(&user->sigpending);
+       rcu_read_unlock();
 
        if (override_rlimit ||
            atomic_read(&user->sigpending) <=
-                       t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
+                       task_rlimit(t, RLIMIT_SIGPENDING)) {
                q = kmem_cache_alloc(sigqueue_cachep, flags);
        } else {
                print_dropped_signal(sig);
@@ -979,7 +996,8 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
                for (i = 0; i < 16; i++) {
                        unsigned char insn;
 
-                       __get_user(insn, (unsigned char *)(regs->ip + i));
+                       if (get_user(insn, (unsigned char *)(regs->ip + i)))
+                               break;
                        printk("%02x ", insn);
                }
        }
@@ -1062,12 +1080,6 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
        return ret;
 }
 
-void
-force_sig_specific(int sig, struct task_struct *t)
-{
-       force_sig_info(sig, SEND_SIG_FORCED, t);
-}
-
 /*
  * Nuke all other threads in the group.
  */
@@ -1185,11 +1197,12 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
        int ret = -EINVAL;
        struct task_struct *p;
        const struct cred *pcred;
+       unsigned long flags;
 
        if (!valid_signal(sig))
                return ret;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = pid_task(pid, PIDTYPE_PID);
        if (!p) {
                ret = -ESRCH;
@@ -1205,14 +1218,16 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
        ret = security_task_kill(p, info, sig, secid);
        if (ret)
                goto out_unlock;
-       if (sig && p->sighand) {
-               unsigned long flags;
-               spin_lock_irqsave(&p->sighand->siglock, flags);
-               ret = __send_signal(sig, info, p, 1, 0);
-               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+
+       if (sig) {
+               if (lock_task_sighand(p, &flags)) {
+                       ret = __send_signal(sig, info, p, 1, 0);
+                       unlock_task_sighand(p, &flags);
+               } else
+                       ret = -ESRCH;
        }
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return ret;
 }
 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
@@ -1846,11 +1861,6 @@ relock:
 
        for (;;) {
                struct k_sigaction *ka;
-
-               if (unlikely(signal->group_stop_count > 0) &&
-                   do_signal_stop(0))
-                       goto relock;
-
                /*
                 * Tracing can induce an artifical signal and choose sigaction.
                 * The return value in @signr determines the default action,
@@ -1862,6 +1872,10 @@ relock:
                if (unlikely(signr != 0))
                        ka = return_ka;
                else {
+                       if (unlikely(signal->group_stop_count > 0) &&
+                           do_signal_stop(0))
+                               goto relock;
+
                        signr = dequeue_signal(current, &current->blocked,
                                               info);