xfs: remove nr_to_write writeback windup.
[safe/jmp/linux-2.6] / kernel / signal.c
index c6d7a24..906ae5a 100644 (file)
 #include <linux/ptrace.h>
 #include <linux/signal.h>
 #include <linux/signalfd.h>
+#include <linux/ratelimit.h>
 #include <linux/tracehook.h>
 #include <linux/capability.h>
 #include <linux/freezer.h>
 #include <linux/pid_namespace.h>
 #include <linux/nsproxy.h>
-#include <trace/events/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/signal.h>
 
 #include <asm/param.h>
 #include <asm/uaccess.h>
@@ -41,6 +43,8 @@
 
 static struct kmem_cache *sigqueue_cachep;
 
+int print_fatal_signals __read_mostly;
+
 static void __user *sig_handler(struct task_struct *t, int sig)
 {
        return t->sighand->action[sig - 1].sa.sa_handler;
@@ -155,62 +159,98 @@ void recalc_sigpending(void)
 
 /* Given the mask, find the first available signal that should be serviced. */
 
+#define SYNCHRONOUS_MASK \
+       (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
+        sigmask(SIGTRAP) | sigmask(SIGFPE))
+
 int next_signal(struct sigpending *pending, sigset_t *mask)
 {
        unsigned long i, *s, *m, x;
        int sig = 0;
-       
+
        s = pending->signal.sig;
        m = mask->sig;
+
+       /*
+        * Handle the first word specially: it contains the
+        * synchronous signals that need to be dequeued first.
+        */
+       x = *s &~ *m;
+       if (x) {
+               if (x & SYNCHRONOUS_MASK)
+                       x &= SYNCHRONOUS_MASK;
+               sig = ffz(~x) + 1;
+               return sig;
+       }
+
        switch (_NSIG_WORDS) {
        default:
-               for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
-                       if ((x = *s &~ *m) != 0) {
-                               sig = ffz(~x) + i*_NSIG_BPW + 1;
-                               break;
-                       }
+               for (i = 1; i < _NSIG_WORDS; ++i) {
+                       x = *++s &~ *++m;
+                       if (!x)
+                               continue;
+                       sig = ffz(~x) + i*_NSIG_BPW + 1;
+                       break;
+               }
                break;
 
-       case 2: if ((x = s[0] &~ m[0]) != 0)
-                       sig = 1;
-               else if ((x = s[1] &~ m[1]) != 0)
-                       sig = _NSIG_BPW + 1;
-               else
+       case 2:
+               x = s[1] &~ m[1];
+               if (!x)
                        break;
-               sig += ffz(~x);
+               sig = ffz(~x) + _NSIG_BPW + 1;
                break;
 
-       case 1: if ((x = *s &~ *m) != 0)
-                       sig = ffz(~x) + 1;
+       case 1:
+               /* Nothing to do */
                break;
        }
-       
+
        return sig;
 }
 
+static inline void print_dropped_signal(int sig)
+{
+       static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
+       if (!print_fatal_signals)
+               return;
+
+       if (!__ratelimit(&ratelimit_state))
+               return;
+
+       printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
+                               current->comm, current->pid, sig);
+}
+
 /*
  * allocate a new signal queue record
  * - this may be called without locks if and only if t == current, otherwise an
  *   appopriate lock must be held to stop the target task from exiting
  */
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
-                                        int override_rlimit)
+static struct sigqueue *
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
 {
        struct sigqueue *q = NULL;
        struct user_struct *user;
 
        /*
-        * We won't get problems with the target's UID changing under us
-        * because changing it requires RCU be used, and if t != current, the
-        * caller must be holding the RCU readlock (by way of a spinlock) and
-        * we use RCU protection here
+        * Protect access to @t credentials. This can go away when all
+        * callers hold rcu read lock.
         */
+       rcu_read_lock();
        user = get_uid(__task_cred(t)->user);
        atomic_inc(&user->sigpending);
+       rcu_read_unlock();
+
        if (override_rlimit ||
            atomic_read(&user->sigpending) <=
-                       t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
+                       task_rlimit(t, RLIMIT_SIGPENDING)) {
                q = kmem_cache_alloc(sigqueue_cachep, flags);
+       } else {
+               print_dropped_signal(sig);
+       }
+
        if (unlikely(q == NULL)) {
                atomic_dec(&user->sigpending);
                free_uid(user);
@@ -400,7 +440,7 @@ still_pending:
                 */
                info->si_signo = sig;
                info->si_errno = 0;
-               info->si_code = 0;
+               info->si_code = SI_USER;
                info->si_pid = 0;
                info->si_uid = 0;
        }
@@ -584,6 +624,17 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
        return 1;
 }
 
+static inline int is_si_special(const struct siginfo *info)
+{
+       return info <= SEND_SIG_FORCED;
+}
+
+static inline bool si_fromuser(const struct siginfo *info)
+{
+       return info == SEND_SIG_NOINFO ||
+               (!is_si_special(info) && SI_FROMUSER(info));
+}
+
 /*
  * Bad permissions for sending the signal
  * - the caller must hold at least the RCU read lock
@@ -591,22 +642,24 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
 static int check_kill_permission(int sig, struct siginfo *info,
                                 struct task_struct *t)
 {
-       const struct cred *cred = current_cred(), *tcred;
+       const struct cred *cred, *tcred;
        struct pid *sid;
        int error;
 
        if (!valid_signal(sig))
                return -EINVAL;
 
-       if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
+       if (!si_fromuser(info))
                return 0;
 
        error = audit_signal_info(sig, t); /* Let audit system see the signal */
        if (error)
                return error;
 
+       cred = current_cred();
        tcred = __task_cred(t);
-       if ((cred->euid ^ tcred->suid) &&
+       if (!same_thread_group(current, t) &&
+           (cred->euid ^ tcred->suid) &&
            (cred->euid ^ tcred->uid) &&
            (cred->uid  ^ tcred->suid) &&
            (cred->uid  ^ tcred->uid) &&
@@ -834,7 +887,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
        struct sigqueue *q;
        int override_rlimit;
 
-       trace_sched_signal_send(sig, t);
+       trace_signal_generate(sig, info, t);
 
        assert_spin_locked(&t->sighand->siglock);
 
@@ -869,7 +922,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
        else
                override_rlimit = 0;
 
-       q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
+       q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
                override_rlimit);
        if (q) {
                list_add_tail(&q->list, &pending->list);
@@ -896,12 +949,21 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                        break;
                }
        } else if (!is_si_special(info)) {
-               if (sig >= SIGRTMIN && info->si_code != SI_USER)
-               /*
-                * Queue overflow, abort.  We may abort if the signal was rt
-                * and sent by user using something other than kill().
-                */
+               if (sig >= SIGRTMIN && info->si_code != SI_USER) {
+                       /*
+                        * Queue overflow, abort.  We may abort if the
+                        * signal was rt and sent by user using something
+                        * other than kill().
+                        */
+                       trace_signal_overflow_fail(sig, group, info);
                        return -EAGAIN;
+               } else {
+                       /*
+                        * This is a silent loss of information.  We still
+                        * send the signal, but the *info bits are lost.
+                        */
+                       trace_signal_lose_info(sig, group, info);
+               }
        }
 
 out_set:
@@ -917,16 +979,13 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
        int from_ancestor_ns = 0;
 
 #ifdef CONFIG_PID_NS
-       if (!is_si_special(info) && SI_FROMUSER(info) &&
-                       task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0)
-               from_ancestor_ns = 1;
+       from_ancestor_ns = si_fromuser(info) &&
+                          !task_pid_nr_ns(current, task_active_pid_ns(t));
 #endif
 
        return __send_signal(sig, info, t, group, from_ancestor_ns);
 }
 
-int print_fatal_signals;
-
 static void print_fatal_signal(struct pt_regs *regs, int signr)
 {
        printk("%s/%d: potentially unexpected fatal signal %d.\n",
@@ -939,7 +998,8 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
                for (i = 0; i < 16; i++) {
                        unsigned char insn;
 
-                       __get_user(insn, (unsigned char *)(regs->ip + i));
+                       if (get_user(insn, (unsigned char *)(regs->ip + i)))
+                               break;
                        printk("%02x ", insn);
                }
        }
@@ -1022,39 +1082,28 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
        return ret;
 }
 
-void
-force_sig_specific(int sig, struct task_struct *t)
-{
-       force_sig_info(sig, SEND_SIG_FORCED, t);
-}
-
 /*
  * Nuke all other threads in the group.
  */
-void zap_other_threads(struct task_struct *p)
+int zap_other_threads(struct task_struct *p)
 {
-       struct task_struct *t;
+       struct task_struct *t = p;
+       int count = 0;
 
        p->signal->group_stop_count = 0;
 
-       for (t = next_thread(p); t != p; t = next_thread(t)) {
-               /*
-                * Don't bother with already dead threads
-                */
+       while_each_thread(p, t) {
+               count++;
+
+               /* Don't bother with already dead threads */
                if (t->exit_state)
                        continue;
-
-               /* SIGKILL will be handled before any pending SIGSTOP */
                sigaddset(&t->pending.signal, SIGKILL);
                signal_wake_up(t, 1);
        }
-}
 
-int __fatal_signal_pending(struct task_struct *tsk)
-{
-       return sigismember(&tsk->pending.signal, SIGKILL);
+       return count;
 }
-EXPORT_SYMBOL(__fatal_signal_pending);
 
 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
 {
@@ -1151,19 +1200,19 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
        int ret = -EINVAL;
        struct task_struct *p;
        const struct cred *pcred;
+       unsigned long flags;
 
        if (!valid_signal(sig))
                return ret;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = pid_task(pid, PIDTYPE_PID);
        if (!p) {
                ret = -ESRCH;
                goto out_unlock;
        }
        pcred = __task_cred(p);
-       if ((info == SEND_SIG_NOINFO ||
-            (!is_si_special(info) && SI_FROMUSER(info))) &&
+       if (si_fromuser(info) &&
            euid != pcred->suid && euid != pcred->uid &&
            uid  != pcred->suid && uid  != pcred->uid) {
                ret = -EPERM;
@@ -1172,14 +1221,16 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
        ret = security_task_kill(p, info, sig, secid);
        if (ret)
                goto out_unlock;
-       if (sig && p->sighand) {
-               unsigned long flags;
-               spin_lock_irqsave(&p->sighand->siglock, flags);
-               ret = __send_signal(sig, info, p, 1, 0);
-               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+
+       if (sig) {
+               if (lock_task_sighand(p, &flags)) {
+                       ret = __send_signal(sig, info, p, 1, 0);
+                       unlock_task_sighand(p, &flags);
+               } else
+                       ret = -ESRCH;
        }
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return ret;
 }
 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
@@ -1299,19 +1350,19 @@ EXPORT_SYMBOL(kill_pid);
  * These functions support sending signals using preallocated sigqueue
  * structures.  This is needed "because realtime applications cannot
  * afford to lose notifications of asynchronous events, like timer
- * expirations or I/O completions".  In the case of Posix Timers 
+ * expirations or I/O completions".  In the case of Posix Timers
  * we allocate the sigqueue structure from the timer_create.  If this
  * allocation fails we are able to report the failure to the application
  * with an EAGAIN error.
  */
 struct sigqueue *sigqueue_alloc(void)
 {
-       struct sigqueue *q;
+       struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
 
-       if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
+       if (q)
                q->flags |= SIGQUEUE_PREALLOC;
-       return(q);
+
+       return q;
 }
 
 void sigqueue_free(struct sigqueue *q)
@@ -1813,11 +1864,6 @@ relock:
 
        for (;;) {
                struct k_sigaction *ka;
-
-               if (unlikely(signal->group_stop_count > 0) &&
-                   do_signal_stop(0))
-                       goto relock;
-
                /*
                 * Tracing can induce an artifical signal and choose sigaction.
                 * The return value in @signr determines the default action,
@@ -1829,6 +1875,10 @@ relock:
                if (unlikely(signr != 0))
                        ka = return_ka;
                else {
+                       if (unlikely(signal->group_stop_count > 0) &&
+                           do_signal_stop(0))
+                               goto relock;
+
                        signr = dequeue_signal(current, &current->blocked,
                                               info);
 
@@ -1845,6 +1895,9 @@ relock:
                        ka = &sighand->action[signr-1];
                }
 
+               /* Trace actually delivered signals. */
+               trace_signal_deliver(signr, info, ka);
+
                if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
                        continue;
                if (ka->sa.sa_handler != SIG_DFL) {
@@ -2685,3 +2738,43 @@ void __init signals_init(void)
 {
        sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
 }
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/kdb.h>
+/*
+ * kdb_send_sig_info - Allows kdb to send signals without exposing
+ * signal internals.  This function checks if the required locks are
+ * available before calling the main signal code, to avoid kdb
+ * deadlocks.
+ */
+void
+kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
+{
+       static struct task_struct *kdb_prev_t;
+       int sig, new_t;
+       if (!spin_trylock(&t->sighand->siglock)) {
+               kdb_printf("Can't do kill command now.\n"
+                          "The sigmask lock is held somewhere else in "
+                          "kernel, try again later\n");
+               return;
+       }
+       spin_unlock(&t->sighand->siglock);
+       new_t = kdb_prev_t != t;
+       kdb_prev_t = t;
+       if (t->state != TASK_RUNNING && new_t) {
+               kdb_printf("Process is not RUNNING, sending a signal from "
+                          "kdb risks deadlock\n"
+                          "on the run queue locks. "
+                          "The signal has _not_ been sent.\n"
+                          "Reissue the kill command if you want to risk "
+                          "the deadlock.\n");
+               return;
+       }
+       sig = info->si_signo;
+       if (send_sig_info(sig, info, t))
+               kdb_printf("Fail to deliver Signal %d to process %d.\n",
+                          sig, t->pid);
+       else
+               kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
+}
+#endif /* CONFIG_KGDB_KDB */