remove final fastcall users
[safe/jmp/linux-2.6] / kernel / signal.c
index e2a7d4b..84917fe 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
@@ -22,6 +21,7 @@
 #include <linux/syscalls.h>
 #include <linux/ptrace.h>
 #include <linux/signal.h>
+#include <linux/signalfd.h>
 #include <linux/capability.h>
 #include <linux/freezer.h>
 #include <linux/pid_namespace.h>
 
 static struct kmem_cache *sigqueue_cachep;
 
-/*
- * In POSIX a signal is sent either to a specific thread (Linux task)
- * or to the process as a whole (Linux thread group).  How the signal
- * is sent determines whether it's to one thread or the whole group,
- * which determines which signal mask(s) are involved in blocking it
- * from being delivered until later.  When the signal is delivered,
- * either it's caught or ignored by a user handler or it has a default
- * effect that applies to the whole thread group (POSIX process).
- *
- * The possible effects an unblocked signal set to SIG_DFL can have are:
- *   ignore    - Nothing Happens
- *   terminate - kill the process, i.e. all threads in the group,
- *               similar to exit_group.  The group leader (only) reports
- *               WIFSIGNALED status to its parent.
- *   coredump  - write a core dump file describing all threads using
- *               the same mm and then kill all those threads
- *   stop      - stop all the threads in the group, i.e. TASK_STOPPED state
- *
- * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
- * Other signals when not blocked and set to SIG_DFL behaves as follows.
- * The job control signals also have other special effects.
- *
- *     +--------------------+------------------+
- *     |  POSIX signal      |  default action  |
- *     +--------------------+------------------+
- *     |  SIGHUP            |  terminate       |
- *     |  SIGINT            |  terminate       |
- *     |  SIGQUIT           |  coredump        |
- *     |  SIGILL            |  coredump        |
- *     |  SIGTRAP           |  coredump        |
- *     |  SIGABRT/SIGIOT    |  coredump        |
- *     |  SIGBUS            |  coredump        |
- *     |  SIGFPE            |  coredump        |
- *     |  SIGKILL           |  terminate(+)    |
- *     |  SIGUSR1           |  terminate       |
- *     |  SIGSEGV           |  coredump        |
- *     |  SIGUSR2           |  terminate       |
- *     |  SIGPIPE           |  terminate       |
- *     |  SIGALRM           |  terminate       |
- *     |  SIGTERM           |  terminate       |
- *     |  SIGCHLD           |  ignore          |
- *     |  SIGCONT           |  ignore(*)       |
- *     |  SIGSTOP           |  stop(*)(+)      |
- *     |  SIGTSTP           |  stop(*)         |
- *     |  SIGTTIN           |  stop(*)         |
- *     |  SIGTTOU           |  stop(*)         |
- *     |  SIGURG            |  ignore          |
- *     |  SIGXCPU           |  coredump        |
- *     |  SIGXFSZ           |  coredump        |
- *     |  SIGVTALRM         |  terminate       |
- *     |  SIGPROF           |  terminate       |
- *     |  SIGPOLL/SIGIO     |  terminate       |
- *     |  SIGSYS/SIGUNUSED  |  coredump        |
- *     |  SIGSTKFLT         |  terminate       |
- *     |  SIGWINCH          |  ignore          |
- *     |  SIGPWR            |  terminate       |
- *     |  SIGRTMIN-SIGRTMAX |  terminate       |
- *     +--------------------+------------------+
- *     |  non-POSIX signal  |  default action  |
- *     +--------------------+------------------+
- *     |  SIGEMT            |  coredump        |
- *     +--------------------+------------------+
- *
- * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
- * (*) Special job control effects:
- * When SIGCONT is sent, it resumes the process (all threads in the group)
- * from TASK_STOPPED state and also clears any pending/queued stop signals
- * (any of those marked with "stop(*)").  This happens regardless of blocking,
- * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
- * any pending/queued SIGCONT signals; this happens regardless of blocking,
- * catching, or ignored the stop signal, though (except for SIGSTOP) the
- * default action of stopping the process may happen later or never.
- */
-
-#ifdef SIGEMT
-#define M_SIGEMT       M(SIGEMT)
-#else
-#define M_SIGEMT       0
-#endif
-
-#if SIGRTMIN > BITS_PER_LONG
-#define M(sig) (1ULL << ((sig)-1))
-#else
-#define M(sig) (1UL << ((sig)-1))
-#endif
-#define T(sig, mask) (M(sig) & (mask))
-
-#define SIG_KERNEL_ONLY_MASK (\
-       M(SIGKILL)   |  M(SIGSTOP)                                   )
-
-#define SIG_KERNEL_STOP_MASK (\
-       M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
-
-#define SIG_KERNEL_COREDUMP_MASK (\
-        M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
-        M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
-        M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
-
-#define SIG_KERNEL_IGNORE_MASK (\
-        M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
-
-#define sig_kernel_only(sig) \
-               (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
-#define sig_kernel_coredump(sig) \
-               (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
-#define sig_kernel_ignore(sig) \
-               (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
-#define sig_kernel_stop(sig) \
-               (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
-
-#define sig_needs_tasklist(sig)        ((sig) == SIGCONT)
-
-#define sig_user_defined(t, signr) \
-       (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
-        ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
-
-#define sig_fatal(t, signr) \
-       (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
-        (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
 
 static int sig_ignored(struct task_struct *t, int sig)
 {
@@ -174,7 +55,7 @@ static int sig_ignored(struct task_struct *t, int sig)
         * signal handler may change by the time it is
         * unblocked.
         */
-       if (sigismember(&t->blocked, sig))
+       if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
                return 0;
 
        /* Is it explicitly or implicitly ignored? */
@@ -215,26 +96,42 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 
 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 
-fastcall void recalc_sigpending_tsk(struct task_struct *t)
+static int recalc_sigpending_tsk(struct task_struct *t)
 {
        if (t->signal->group_stop_count > 0 ||
-           (freezing(t)) ||
            PENDING(&t->pending, &t->blocked) ||
-           PENDING(&t->signal->shared_pending, &t->blocked))
+           PENDING(&t->signal->shared_pending, &t->blocked)) {
                set_tsk_thread_flag(t, TIF_SIGPENDING);
-       else
-               clear_tsk_thread_flag(t, TIF_SIGPENDING);
+               return 1;
+       }
+       /*
+        * We must never clear the flag in another thread, or in current
+        * when it's possible the current syscall is returning -ERESTART*.
+        * So we don't clear it here, and only callers who know they should do.
+        */
+       return 0;
+}
+
+/*
+ * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
+ * This is superfluous when called on current, the wakeup is a harmless no-op.
+ */
+void recalc_sigpending_and_wake(struct task_struct *t)
+{
+       if (recalc_sigpending_tsk(t))
+               signal_wake_up(t, 0);
 }
 
 void recalc_sigpending(void)
 {
-       recalc_sigpending_tsk(current);
+       if (!recalc_sigpending_tsk(current) && !freezing(current))
+               clear_thread_flag(TIF_SIGPENDING);
+
 }
 
 /* Given the mask, find the first available signal that should be serviced. */
 
-static int
-next_signal(struct sigpending *pending, sigset_t *mask)
+int next_signal(struct sigpending *pending, sigset_t *mask)
 {
        unsigned long i, *s, *m, x;
        int sig = 0;
@@ -329,6 +226,16 @@ void flush_signals(struct task_struct *t)
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
 }
 
+void ignore_signals(struct task_struct *t)
+{
+       int i;
+
+       for (i = 0; i < _NSIG; ++i)
+               t->sighand->action[i].sa.sa_handler = SIG_IGN;
+
+       flush_signals(t);
+}
+
 /*
  * Flush all handlers for a task.
  */
@@ -347,6 +254,16 @@ flush_signal_handlers(struct task_struct *t, int force_default)
        }
 }
 
+int unhandled_signal(struct task_struct *tsk, int sig)
+{
+       if (is_global_init(tsk))
+               return 1;
+       if (tsk->ptrace & PT_PTRACED)
+               return 0;
+       return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
+               (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
+}
+
 
 /* Notify the system that a driver wants to block all signals for this
  * process, and wants to be notified if any signals at all were to be
@@ -455,7 +372,12 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  */
 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 {
-       int signr = __dequeue_signal(&tsk->pending, mask, info);
+       int signr = 0;
+
+       /* We only dequeue private signals from ourselves, we don't let
+        * signalfd steal them
+        */
+       signr = __dequeue_signal(&tsk->pending, mask, info);
        if (!signr) {
                signr = __dequeue_signal(&tsk->signal->shared_pending,
                                         mask, info);
@@ -483,7 +405,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                        }
                }
        }
-       recalc_sigpending_tsk(tsk);
+       recalc_sigpending();
        if (signr && unlikely(sig_kernel_stop(signr))) {
                /*
                 * Set a marker that we have dequeued a stop signal.  Our
@@ -500,7 +422,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
                        tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
        }
-       if ( signr &&
+       if (signr &&
             ((info->si_code & __SI_MASK) == __SI_TIMER) &&
             info->si_sys_private){
                /*
@@ -534,15 +456,15 @@ void signal_wake_up(struct task_struct *t, int resume)
        set_tsk_thread_flag(t, TIF_SIGPENDING);
 
        /*
-        * For SIGKILL, we want to wake it up in the stopped/traced case.
-        * We don't check t->state here because there is a race with it
+        * For SIGKILL, we want to wake it up in the stopped/traced/killable
+        * case. We don't check t->state here because there is a race with it
         * executing another processor and just now entering stopped state.
         * By using wake_up_state, we ensure the process will wake up and
         * handle its death signal.
         */
        mask = TASK_INTERRUPTIBLE;
        if (resume)
-               mask |= TASK_STOPPED | TASK_TRACED;
+               mask |= TASK_WAKEKILL;
        if (!wake_up_state(t, mask))
                kick_process(t);
 }
@@ -607,19 +529,21 @@ static int check_kill_permission(int sig, struct siginfo *info,
        int error = -EINVAL;
        if (!valid_signal(sig))
                return error;
-       error = -EPERM;
-       if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
-           && ((sig != SIGCONT) ||
-               (process_session(current) != process_session(t)))
-           && (current->euid ^ t->suid) && (current->euid ^ t->uid)
-           && (current->uid ^ t->suid) && (current->uid ^ t->uid)
-           && !capable(CAP_KILL))
+
+       if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
+               error = audit_signal_info(sig, t); /* Let audit system see the signal */
+               if (error)
+                       return error;
+               error = -EPERM;
+               if (((sig != SIGCONT) ||
+                       (task_session_nr(current) != task_session_nr(t)))
+                   && (current->euid ^ t->suid) && (current->euid ^ t->uid)
+                   && (current->uid ^ t->suid) && (current->uid ^ t->uid)
+                   && !capable(CAP_KILL))
                return error;
+       }
 
-       error = security_task_kill(t, info, sig, 0);
-       if (!error)
-               audit_signal_info(sig, t); /* Let audit system see the signal */
-       return error;
+       return security_task_kill(t, info, sig, 0);
 }
 
 /* forward decl */
@@ -696,7 +620,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                         * Wake up the stopped thread _after_ setting
                         * TIF_SIGPENDING
                         */
-                       state = TASK_STOPPED;
+                       state = __TASK_STOPPED;
                        if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
                                set_tsk_thread_flag(t, TIF_SIGPENDING);
                                state |= TASK_INTERRUPTIBLE;
@@ -740,6 +664,12 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
        int ret = 0;
 
        /*
+        * Deliver the signal to listening signalfds. This must be called
+        * with the sighand lock held.
+        */
+       signalfd_notify(t, sig);
+
+       /*
         * fast-pathed signals for kernel-internal things like SIGSTOP
         * or SIGKILL.
         */
@@ -764,7 +694,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
                        q->info.si_signo = sig;
                        q->info.si_errno = 0;
                        q->info.si_code = SI_USER;
-                       q->info.si_pid = current->pid;
+                       q->info.si_pid = task_pid_vnr(current);
                        q->info.si_uid = current->uid;
                        break;
                case (unsigned long) SEND_SIG_PRIV:
@@ -795,6 +725,37 @@ out_set:
 #define LEGACY_QUEUE(sigptr, sig) \
        (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
 
+int print_fatal_signals;
+
+static void print_fatal_signal(struct pt_regs *regs, int signr)
+{
+       printk("%s/%d: potentially unexpected fatal signal %d.\n",
+               current->comm, task_pid_nr(current), signr);
+
+#if defined(__i386__) && !defined(__arch_um__)
+       printk("code at %08lx: ", regs->ip);
+       {
+               int i;
+               for (i = 0; i < 16; i++) {
+                       unsigned char insn;
+
+                       __get_user(insn, (unsigned char *)(regs->ip + i));
+                       printk("%02x ", insn);
+               }
+       }
+#endif
+       printk("\n");
+       show_regs(regs);
+}
+
+static int __init setup_print_fatal_signals(char *str)
+{
+       get_option (&str, &print_fatal_signals);
+
+       return 1;
+}
+
+__setup("print-fatal-signals=", setup_print_fatal_signals);
 
 static int
 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
@@ -846,7 +807,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
                action->sa.sa_handler = SIG_DFL;
                if (blocked) {
                        sigdelset(&t->blocked, sig);
-                       recalc_sigpending_tsk(t);
+                       recalc_sigpending_and_wake(t);
                }
        }
        ret = specific_send_sig_info(sig, info, t);
@@ -877,7 +838,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
                return 0;
        if (sig == SIGKILL)
                return 1;
-       if (p->state & (TASK_STOPPED | TASK_TRACED))
+       if (task_is_stopped_or_traced(p))
                return 0;
        return task_curr(p) || !signal_pending(p);
 }
@@ -947,33 +908,9 @@ __group_complete_signal(int sig, struct task_struct *p)
                        do {
                                sigaddset(&t->pending.signal, SIGKILL);
                                signal_wake_up(t, 1);
-                               t = next_thread(t);
-                       } while (t != p);
+                       } while_each_thread(p, t);
                        return;
                }
-
-               /*
-                * There will be a core dump.  We make all threads other
-                * than the chosen one go into a group stop so that nothing
-                * happens until it gets scheduled, takes the signal off
-                * the shared queue, and does the core dump.  This is a
-                * little more complicated than strictly necessary, but it
-                * keeps the signal state that winds up in the core dump
-                * unchanged from the death state, e.g. which thread had
-                * the core-dump signal unblocked.
-                */
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
-               p->signal->group_stop_count = 0;
-               p->signal->group_exit_task = t;
-               t = p;
-               do {
-                       p->signal->group_stop_count++;
-                       signal_wake_up(t, 0);
-                       t = next_thread(t);
-               } while (t != p);
-               wake_up_process(p->signal->group_exit_task);
-               return;
        }
 
        /*
@@ -1020,12 +957,8 @@ void zap_other_threads(struct task_struct *p)
 {
        struct task_struct *t;
 
-       p->signal->flags = SIGNAL_GROUP_EXIT;
        p->signal->group_stop_count = 0;
 
-       if (thread_group_empty(p))
-               return;
-
        for (t = next_thread(p); t != p; t = next_thread(t)) {
                /*
                 * Don't bother with already dead threads
@@ -1033,23 +966,18 @@ void zap_other_threads(struct task_struct *p)
                if (t->exit_state)
                        continue;
 
-               /*
-                * We don't want to notify the parent, since we are
-                * killed as part of a thread group due to another
-                * thread doing an execve() or similar. So set the
-                * exit signal to -1 to allow immediate reaping of
-                * the process.  But don't detach the thread group
-                * leader.
-                */
-               if (t != p->group_leader)
-                       t->exit_signal = -1;
-
                /* SIGKILL will be handled before any pending SIGSTOP */
                sigaddset(&t->pending.signal, SIGKILL);
                signal_wake_up(t, 1);
        }
 }
 
+int __fatal_signal_pending(struct task_struct *tsk)
+{
+       return sigismember(&tsk->pending.signal, SIGKILL);
+}
+EXPORT_SYMBOL(__fatal_signal_pending);
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
@@ -1090,7 +1018,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 }
 
 /*
- * kill_pgrp_info() sends a signal to a process group: this is what the tty
+ * __kill_pgrp_info() sends a signal to a process group: this is what the tty
  * control characters do (^C, ^Z etc)
  */
 
@@ -1109,30 +1037,28 @@ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
        return success ? 0 : retval;
 }
 
-int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
-{
-       int retval;
-
-       read_lock(&tasklist_lock);
-       retval = __kill_pgrp_info(sig, info, pgrp);
-       read_unlock(&tasklist_lock);
-
-       return retval;
-}
-
 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
 {
-       int error;
+       int error = -ESRCH;
        struct task_struct *p;
 
        rcu_read_lock();
        if (unlikely(sig_needs_tasklist(sig)))
                read_lock(&tasklist_lock);
 
+retry:
        p = pid_task(pid, PIDTYPE_PID);
-       error = -ESRCH;
-       if (p)
+       if (p) {
                error = group_send_sig_info(sig, info, p);
+               if (unlikely(error == -ESRCH))
+                       /*
+                        * The task was unhashed in between, try again.
+                        * If it is dead, pid_task() will return NULL,
+                        * if we race with de_thread() it will find the
+                        * new leader.
+                        */
+                       goto retry;
+       }
 
        if (unlikely(sig_needs_tasklist(sig)))
                read_unlock(&tasklist_lock);
@@ -1140,11 +1066,12 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
        return error;
 }
 
-static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+int
+kill_proc_info(int sig, struct siginfo *info, pid_t pid)
 {
        int error;
        rcu_read_lock();
-       error = kill_pid_info(sig, info, find_pid(pid));
+       error = kill_pid_info(sig, info, find_vpid(pid));
        rcu_read_unlock();
        return error;
 }
@@ -1196,30 +1123,34 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
 static int kill_something_info(int sig, struct siginfo *info, int pid)
 {
        int ret;
-       rcu_read_lock();
-       if (!pid) {
-               ret = kill_pgrp_info(sig, info, task_pgrp(current));
-       } else if (pid == -1) {
+
+       if (pid > 0) {
+               rcu_read_lock();
+               ret = kill_pid_info(sig, info, find_vpid(pid));
+               rcu_read_unlock();
+               return ret;
+       }
+
+       read_lock(&tasklist_lock);
+       if (pid != -1) {
+               ret = __kill_pgrp_info(sig, info,
+                               pid ? find_vpid(-pid) : task_pgrp(current));
+       } else {
                int retval = 0, count = 0;
                struct task_struct * p;
 
-               read_lock(&tasklist_lock);
                for_each_process(p) {
-                       if (p->pid > 1 && p->tgid != current->tgid) {
+                       if (p->pid > 1 && !same_thread_group(p, current)) {
                                int err = group_send_sig_info(sig, info, p);
                                ++count;
                                if (err != -EPERM)
                                        retval = err;
                        }
                }
-               read_unlock(&tasklist_lock);
                ret = count ? retval : -ESRCH;
-       } else if (pid < 0) {
-               ret = kill_pgrp_info(sig, info, find_pid(-pid));
-       } else {
-               ret = kill_pid_info(sig, info, find_pid(pid));
        }
-       rcu_read_unlock();
+       read_unlock(&tasklist_lock);
+
        return ret;
 }
 
@@ -1267,20 +1198,6 @@ send_sig(int sig, struct task_struct *p, int priv)
        return send_sig_info(sig, __si_special(priv), p);
 }
 
-/*
- * This is the entry point for "process-wide" signals.
- * They will go to an appropriate thread in the thread group.
- */
-int
-send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
-{
-       int ret;
-       read_lock(&tasklist_lock);
-       ret = group_send_sig_info(sig, info, p);
-       read_unlock(&tasklist_lock);
-       return ret;
-}
-
 void
 force_sig(int sig, struct task_struct *p)
 {
@@ -1308,7 +1225,13 @@ force_sigsegv(int sig, struct task_struct *p)
 
 int kill_pgrp(struct pid *pid, int sig, int priv)
 {
-       return kill_pgrp_info(sig, __si_special(priv), pid);
+       int ret;
+
+       read_lock(&tasklist_lock);
+       ret = __kill_pgrp_info(sig, __si_special(priv), pid);
+       read_unlock(&tasklist_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL(kill_pgrp);
 
@@ -1321,7 +1244,12 @@ EXPORT_SYMBOL(kill_pid);
 int
 kill_proc(pid_t pid, int sig, int priv)
 {
-       return kill_proc_info(sig, __si_special(priv), pid);
+       int ret;
+
+       rcu_read_lock();
+       ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
+       rcu_read_unlock();
+       return ret;
 }
 
 /*
@@ -1346,20 +1274,19 @@ struct sigqueue *sigqueue_alloc(void)
 void sigqueue_free(struct sigqueue *q)
 {
        unsigned long flags;
+       spinlock_t *lock = &current->sighand->siglock;
+
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
        /*
         * If the signal is still pending remove it from the
-        * pending queue.
+        * pending queue. We must hold ->siglock while testing
+        * q->list to serialize with collect_signal().
         */
-       if (unlikely(!list_empty(&q->list))) {
-               spinlock_t *lock = &current->sighand->siglock;
-               read_lock(&tasklist_lock);
-               spin_lock_irqsave(lock, flags);
-               if (!list_empty(&q->list))
-                       list_del_init(&q->list);
-               spin_unlock_irqrestore(lock, flags);
-               read_unlock(&tasklist_lock);
-       }
+       spin_lock_irqsave(lock, flags);
+       if (!list_empty(&q->list))
+               list_del_init(&q->list);
+       spin_unlock_irqrestore(lock, flags);
+
        q->flags &= ~SIGQUEUE_PREALLOC;
        __sigqueue_free(q);
 }
@@ -1400,6 +1327,11 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
                ret = 1;
                goto out;
        }
+       /*
+        * Deliver the signal to listening signalfds. This must be called
+        * with the sighand lock held.
+        */
+       signalfd_notify(p, sig);
 
        list_add_tail(&q->list, &p->pending.list);
        sigaddset(&p->pending.signal, sig);
@@ -1443,6 +1375,11 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
                q->info.si_overrun++;
                goto out;
        } 
+       /*
+        * Deliver the signal to listening signalfds. This must be called
+        * with the sighand lock held.
+        */
+       signalfd_notify(p, sig);
 
        /*
         * Put this signal on the shared-pending queue.
@@ -1482,14 +1419,29 @@ void do_notify_parent(struct task_struct *tsk, int sig)
        BUG_ON(sig == -1);
 
        /* do_notify_parent_cldstop should have been called instead.  */
-       BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+       BUG_ON(task_is_stopped_or_traced(tsk));
 
        BUG_ON(!tsk->ptrace &&
               (tsk->group_leader != tsk || !thread_group_empty(tsk)));
 
        info.si_signo = sig;
        info.si_errno = 0;
-       info.si_pid = tsk->pid;
+       /*
+        * we are under tasklist_lock here so our parent is tied to
+        * us and cannot exit and release its namespace.
+        *
+        * the only it can is to switch its nsproxy with sys_unshare,
+        * bu uncharing pid namespaces is not allowed, so we'll always
+        * see relevant namespace
+        *
+        * write_lock() currently calls preempt_disable() which is the
+        * same as rcu_read_lock(), but according to Oleg, this is not
+        * correct to rely on this
+        */
+       rcu_read_lock();
+       info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+       rcu_read_unlock();
+
        info.si_uid = tsk->uid;
 
        /* FIXME: find out whether or not this is supposed to be c*time. */
@@ -1554,7 +1506,13 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
 
        info.si_signo = SIGCHLD;
        info.si_errno = 0;
-       info.si_pid = tsk->pid;
+       /*
+        * see comment in do_notify_parent() abot the following 3 lines
+        */
+       rcu_read_lock();
+       info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+       rcu_read_unlock();
+
        info.si_uid = tsk->uid;
 
        /* FIXME: find out whether or not this is supposed to be c*time. */
@@ -1592,15 +1550,6 @@ static inline int may_ptrace_stop(void)
 {
        if (!likely(current->ptrace & PT_PTRACED))
                return 0;
-
-       if (unlikely(current->parent == current->real_parent &&
-                   (current->ptrace & PT_ATTACHED)))
-               return 0;
-
-       if (unlikely(current->signal == current->parent->signal) &&
-           unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
-               return 0;
-
        /*
         * Are we in the middle of do_coredump?
         * If so and our tracer is also part of the coredump stopping
@@ -1618,6 +1567,17 @@ static inline int may_ptrace_stop(void)
 }
 
 /*
+ * Return nonzero if there is a SIGKILL that should be waking us up.
+ * Called with the siglock held.
+ */
+static int sigkill_pending(struct task_struct *tsk)
+{
+       return ((sigismember(&tsk->pending.signal, SIGKILL) ||
+                sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
+               !unlikely(sigismember(&tsk->blocked, SIGKILL)));
+}
+
+/*
  * This must be called with current->sighand->siglock held.
  *
  * This should be the path for all ptrace stops.
@@ -1625,11 +1585,31 @@ static inline int may_ptrace_stop(void)
  * That makes it a way to test a stopped process for
  * being ptrace-stopped vs being job-control-stopped.
  *
- * If we actually decide not to stop at all because the tracer is gone,
- * we leave nostop_code in current->exit_code.
+ * If we actually decide not to stop at all because the tracer
+ * is gone, we keep current->exit_code unless clear_code.
  */
-static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
+static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
 {
+       int killed = 0;
+
+       if (arch_ptrace_stop_needed(exit_code, info)) {
+               /*
+                * The arch code has something special to do before a
+                * ptrace stop.  This is allowed to block, e.g. for faults
+                * on user stack pages.  We can't keep the siglock while
+                * calling arch_ptrace_stop, so we must release it now.
+                * To preserve proper semantics, we must do this before
+                * any signal bookkeeping like checking group_stop_count.
+                * Meanwhile, a SIGKILL could come in before we retake the
+                * siglock.  That must prevent us from sleeping in TASK_TRACED.
+                * So after regaining the lock, we must check for SIGKILL.
+                */
+               spin_unlock_irq(&current->sighand->siglock);
+               arch_ptrace_stop(exit_code, info);
+               spin_lock_irq(&current->sighand->siglock);
+               killed = sigkill_pending(current);
+       }
+
        /*
         * If there is a group stop in progress,
         * we must participate in the bookkeeping.
@@ -1641,22 +1621,23 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
        current->exit_code = exit_code;
 
        /* Let the debugger run.  */
-       set_current_state(TASK_TRACED);
+       __set_current_state(TASK_TRACED);
        spin_unlock_irq(&current->sighand->siglock);
        try_to_freeze();
        read_lock(&tasklist_lock);
-       if (may_ptrace_stop()) {
+       if (!unlikely(killed) && may_ptrace_stop()) {
                do_notify_parent_cldstop(current, CLD_TRAPPED);
                read_unlock(&tasklist_lock);
                schedule();
        } else {
                /*
                 * By the time we got the lock, our tracer went away.
-                * Don't stop here.
+                * Don't drop the lock yet, another tracer may come.
                 */
+               __set_current_state(TASK_RUNNING);
+               if (clear_code)
+                       current->exit_code = 0;
                read_unlock(&tasklist_lock);
-               set_current_state(TASK_RUNNING);
-               current->exit_code = nostop_code;
        }
 
        /*
@@ -1670,8 +1651,9 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
        /*
         * Queued signals ignored us while we were stopped for tracing.
         * So check for any that we should take before resuming user mode.
+        * This sets TIF_SIGPENDING, but never clears it.
         */
-       recalc_sigpending();
+       recalc_sigpending_tsk(current);
 }
 
 void ptrace_notify(int exit_code)
@@ -1683,12 +1665,12 @@ void ptrace_notify(int exit_code)
        memset(&info, 0, sizeof info);
        info.si_signo = SIGTRAP;
        info.si_code = exit_code;
-       info.si_pid = current->pid;
+       info.si_pid = task_pid_vnr(current);
        info.si_uid = current->uid;
 
        /* Let the debugger run.  */
        spin_lock_irq(&current->sighand->siglock);
-       ptrace_stop(exit_code, 0, &info);
+       ptrace_stop(exit_code, 1, &info);
        spin_unlock_irq(&current->sighand->siglock);
 }
 
@@ -1726,9 +1708,6 @@ static int do_signal_stop(int signr)
        struct signal_struct *sig = current->signal;
        int stop_count;
 
-       if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
-               return 0;
-
        if (sig->group_stop_count > 0) {
                /*
                 * There is a group stop in progress.  We don't need to
@@ -1736,12 +1715,15 @@ static int do_signal_stop(int signr)
                 */
                stop_count = --sig->group_stop_count;
        } else {
+               struct task_struct *t;
+
+               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
+                   unlikely(sig->group_exit_task))
+                       return 0;
                /*
                 * There is no group stop already in progress.
                 * We must initiate one now.
                 */
-               struct task_struct *t;
-
                sig->group_exit_code = signr;
 
                stop_count = 0;
@@ -1751,8 +1733,8 @@ static int do_signal_stop(int signr)
                         * stop is always done with the siglock held,
                         * so this check has no races.
                         */
-                       if (!t->exit_state &&
-                           !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+                       if (!(t->flags & PF_EXITING) &&
+                           !task_is_stopped_or_traced(t)) {
                                stop_count++;
                                signal_wake_up(t, 0);
                        }
@@ -1769,47 +1751,6 @@ static int do_signal_stop(int signr)
        return 1;
 }
 
-/*
- * Do appropriate magic when group_stop_count > 0.
- * We return nonzero if we stopped, after releasing the siglock.
- * We return zero if we still hold the siglock and should look
- * for another signal without checking group_stop_count again.
- */
-static int handle_group_stop(void)
-{
-       int stop_count;
-
-       if (current->signal->group_exit_task == current) {
-               /*
-                * Group stop is so we can do a core dump,
-                * We are the initiating thread, so get on with it.
-                */
-               current->signal->group_exit_task = NULL;
-               return 0;
-       }
-
-       if (current->signal->flags & SIGNAL_GROUP_EXIT)
-               /*
-                * Group stop is so another thread can do a core dump,
-                * or else we are racing against a death signal.
-                * Just punt the stop so we can get the next signal.
-                */
-               return 0;
-
-       /*
-        * There is a group stop in progress.  We stop
-        * without any associated signal being in our queue.
-        */
-       stop_count = --current->signal->group_stop_count;
-       if (stop_count == 0)
-               current->signal->flags = SIGNAL_STOP_STOPPED;
-       current->exit_code = current->signal->group_exit_code;
-       set_current_state(TASK_STOPPED);
-       spin_unlock_irq(&current->sighand->siglock);
-       finish_stop(stop_count);
-       return 1;
-}
-
 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
                          struct pt_regs *regs, void *cookie)
 {
@@ -1824,7 +1765,7 @@ relock:
                struct k_sigaction *ka;
 
                if (unlikely(current->signal->group_stop_count > 0) &&
-                   handle_group_stop())
+                   do_signal_stop(0))
                        goto relock;
 
                signr = dequeue_signal(current, mask, info);
@@ -1836,7 +1777,7 @@ relock:
                        ptrace_signal_deliver(regs, cookie);
 
                        /* Let the debugger run.  */
-                       ptrace_stop(signr, signr, info);
+                       ptrace_stop(signr, 0, info);
 
                        /* We're back.  Did the debugger cancel the sig?  */
                        signr = current->exit_code;
@@ -1853,7 +1794,7 @@ relock:
                                info->si_signo = signr;
                                info->si_errno = 0;
                                info->si_code = SI_USER;
-                               info->si_pid = current->parent->pid;
+                               info->si_pid = task_pid_vnr(current->parent);
                                info->si_uid = current->parent->uid;
                        }
 
@@ -1884,11 +1825,9 @@ relock:
                        continue;
 
                /*
-                * Init of a pid space gets no signals it doesn't want from
-                * within that pid space. It can of course get signals from
-                * its parent pid space.
+                * Global init gets no signals it doesn't want.
                 */
-               if (current == child_reaper(current))
+               if (is_global_init(current))
                        continue;
 
                if (sig_kernel_stop(signr)) {
@@ -1931,6 +1870,8 @@ relock:
                 * Anything else is fatal, maybe with a core dump.
                 */
                current->flags |= PF_SIGNALED;
+               if ((signr != SIGKILL) && print_fatal_signals)
+                       print_fatal_signal(regs, signr);
                if (sig_kernel_coredump(signr)) {
                        /*
                         * If it was able to dump core, this kills all
@@ -1953,6 +1894,48 @@ relock:
        return signr;
 }
 
+void exit_signals(struct task_struct *tsk)
+{
+       int group_stop = 0;
+       struct task_struct *t;
+
+       if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
+               tsk->flags |= PF_EXITING;
+               return;
+       }
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       /*
+        * From now this task is not visible for group-wide signals,
+        * see wants_signal(), do_signal_stop().
+        */
+       tsk->flags |= PF_EXITING;
+       if (!signal_pending(tsk))
+               goto out;
+
+       /* It could be that __group_complete_signal() choose us to
+        * notify about group-wide signal. Another thread should be
+        * woken now to take the signal since we will not.
+        */
+       for (t = tsk; (t = next_thread(t)) != tsk; )
+               if (!signal_pending(t) && !(t->flags & PF_EXITING))
+                       recalc_sigpending_and_wake(t);
+
+       if (unlikely(tsk->signal->group_stop_count) &&
+                       !--tsk->signal->group_stop_count) {
+               tsk->signal->flags = SIGNAL_STOP_STOPPED;
+               group_stop = 1;
+       }
+out:
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       if (unlikely(group_stop)) {
+               read_lock(&tasklist_lock);
+               do_notify_parent_cldstop(tsk, CLD_STOPPED);
+               read_unlock(&tasklist_lock);
+       }
+}
+
 EXPORT_SYMBOL(recalc_sigpending);
 EXPORT_SYMBOL_GPL(dequeue_signal);
 EXPORT_SYMBOL(flush_signals);
@@ -2103,6 +2086,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
        /*
         * If you change siginfo_t structure, please be sure
         * this code is fixed accordingly.
+        * Please remember to update the signalfd_copyinfo() function
+        * inside fs/signalfd.c too, in case siginfo_t changes.
         * It should never copy any pad contained in the structure
         * to avoid security leaks, but must copy the generic
         * 3 ints plus the relevant union member.
@@ -2238,7 +2223,7 @@ sys_kill(int pid, int sig)
        info.si_signo = sig;
        info.si_errno = 0;
        info.si_code = SI_USER;
-       info.si_pid = current->tgid;
+       info.si_pid = task_tgid_vnr(current);
        info.si_uid = current->uid;
 
        return kill_something_info(sig, &info, pid);
@@ -2254,12 +2239,12 @@ static int do_tkill(int tgid, int pid, int sig)
        info.si_signo = sig;
        info.si_errno = 0;
        info.si_code = SI_TKILL;
-       info.si_pid = current->tgid;
+       info.si_pid = task_tgid_vnr(current);
        info.si_uid = current->uid;
 
        read_lock(&tasklist_lock);
-       p = find_task_by_pid(pid);
-       if (p && (tgid <= 0 || p->tgid == tgid)) {
+       p = find_task_by_vpid(pid);
+       if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
                error = check_kill_permission(sig, &info, p);
                /*
                 * The null signal is a permissions and process existence
@@ -2338,15 +2323,6 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
        k = &current->sighand->action[sig-1];
 
        spin_lock_irq(&current->sighand->siglock);
-       if (signal_pending(current)) {
-               /*
-                * If there might be a fatal signal pending on multiple
-                * threads, make sure we take it before changing the action.
-                */
-               spin_unlock_irq(&current->sighand->siglock);
-               return -ERESTARTNOINTR;
-       }
-
        if (oact)
                *oact = *k;
 
@@ -2373,7 +2349,6 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
                        rm_from_queue_full(&mask, &t->signal->shared_pending);
                        do {
                                rm_from_queue_full(&mask, &t->pending);
-                               recalc_sigpending_tsk(t);
                                t = next_thread(t);
                        } while (t != current);
                }
@@ -2635,9 +2610,5 @@ __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
 
 void __init signals_init(void)
 {
-       sigqueue_cachep =
-               kmem_cache_create("sigqueue",
-                                 sizeof(struct sigqueue),
-                                 __alignof__(struct sigqueue),
-                                 SLAB_PANIC, NULL, NULL);
+       sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
 }