[PATCH] introduce lock_task_sighand() helper
[safe/jmp/linux-2.6] / kernel / signal.c
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *              Changes to use preallocated sigqueue structures
10  *              to allow signals to be sent reliably.
11  */
12
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <linux/capability.h>
29 #include <asm/param.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
32 #include <asm/siginfo.h>
33
34 /*
35  * SLAB caches for signal bits.
36  */
37
38 static kmem_cache_t *sigqueue_cachep;
39
40 /*
41  * In POSIX a signal is sent either to a specific thread (Linux task)
42  * or to the process as a whole (Linux thread group).  How the signal
43  * is sent determines whether it's to one thread or the whole group,
44  * which determines which signal mask(s) are involved in blocking it
45  * from being delivered until later.  When the signal is delivered,
46  * either it's caught or ignored by a user handler or it has a default
47  * effect that applies to the whole thread group (POSIX process).
48  *
49  * The possible effects an unblocked signal set to SIG_DFL can have are:
50  *   ignore     - Nothing Happens
51  *   terminate  - kill the process, i.e. all threads in the group,
52  *                similar to exit_group.  The group leader (only) reports
53  *                WIFSIGNALED status to its parent.
54  *   coredump   - write a core dump file describing all threads using
55  *                the same mm and then kill all those threads
56  *   stop       - stop all the threads in the group, i.e. TASK_STOPPED state
57  *
58  * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
59  * Other signals when not blocked and set to SIG_DFL behaves as follows.
60  * The job control signals also have other special effects.
61  *
62  *      +--------------------+------------------+
63  *      |  POSIX signal      |  default action  |
64  *      +--------------------+------------------+
65  *      |  SIGHUP            |  terminate       |
66  *      |  SIGINT            |  terminate       |
67  *      |  SIGQUIT           |  coredump        |
68  *      |  SIGILL            |  coredump        |
69  *      |  SIGTRAP           |  coredump        |
70  *      |  SIGABRT/SIGIOT    |  coredump        |
71  *      |  SIGBUS            |  coredump        |
72  *      |  SIGFPE            |  coredump        |
73  *      |  SIGKILL           |  terminate(+)    |
74  *      |  SIGUSR1           |  terminate       |
75  *      |  SIGSEGV           |  coredump        |
76  *      |  SIGUSR2           |  terminate       |
77  *      |  SIGPIPE           |  terminate       |
78  *      |  SIGALRM           |  terminate       |
79  *      |  SIGTERM           |  terminate       |
80  *      |  SIGCHLD           |  ignore          |
81  *      |  SIGCONT           |  ignore(*)       |
82  *      |  SIGSTOP           |  stop(*)(+)      |
83  *      |  SIGTSTP           |  stop(*)         |
84  *      |  SIGTTIN           |  stop(*)         |
85  *      |  SIGTTOU           |  stop(*)         |
86  *      |  SIGURG            |  ignore          |
87  *      |  SIGXCPU           |  coredump        |
88  *      |  SIGXFSZ           |  coredump        |
89  *      |  SIGVTALRM         |  terminate       |
90  *      |  SIGPROF           |  terminate       |
91  *      |  SIGPOLL/SIGIO     |  terminate       |
92  *      |  SIGSYS/SIGUNUSED  |  coredump        |
93  *      |  SIGSTKFLT         |  terminate       |
94  *      |  SIGWINCH          |  ignore          |
95  *      |  SIGPWR            |  terminate       |
96  *      |  SIGRTMIN-SIGRTMAX |  terminate       |
97  *      +--------------------+------------------+
98  *      |  non-POSIX signal  |  default action  |
99  *      +--------------------+------------------+
100  *      |  SIGEMT            |  coredump        |
101  *      +--------------------+------------------+
102  *
103  * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
104  * (*) Special job control effects:
105  * When SIGCONT is sent, it resumes the process (all threads in the group)
106  * from TASK_STOPPED state and also clears any pending/queued stop signals
107  * (any of those marked with "stop(*)").  This happens regardless of blocking,
108  * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
109  * any pending/queued SIGCONT signals; this happens regardless of blocking,
110  * catching, or ignored the stop signal, though (except for SIGSTOP) the
111  * default action of stopping the process may happen later or never.
112  */
113
114 #ifdef SIGEMT
115 #define M_SIGEMT        M(SIGEMT)
116 #else
117 #define M_SIGEMT        0
118 #endif
119
120 #if SIGRTMIN > BITS_PER_LONG
121 #define M(sig) (1ULL << ((sig)-1))
122 #else
123 #define M(sig) (1UL << ((sig)-1))
124 #endif
125 #define T(sig, mask) (M(sig) & (mask))
126
127 #define SIG_KERNEL_ONLY_MASK (\
128         M(SIGKILL)   |  M(SIGSTOP)                                   )
129
130 #define SIG_KERNEL_STOP_MASK (\
131         M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
132
133 #define SIG_KERNEL_COREDUMP_MASK (\
134         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
135         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
136         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
137
138 #define SIG_KERNEL_IGNORE_MASK (\
139         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
140
141 #define sig_kernel_only(sig) \
142                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
143 #define sig_kernel_coredump(sig) \
144                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
145 #define sig_kernel_ignore(sig) \
146                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
147 #define sig_kernel_stop(sig) \
148                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
149
150 #define sig_user_defined(t, signr) \
151         (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
152          ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
153
154 #define sig_fatal(t, signr) \
155         (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
156          (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
157
158 static int sig_ignored(struct task_struct *t, int sig)
159 {
160         void __user * handler;
161
162         /*
163          * Tracers always want to know about signals..
164          */
165         if (t->ptrace & PT_PTRACED)
166                 return 0;
167
168         /*
169          * Blocked signals are never ignored, since the
170          * signal handler may change by the time it is
171          * unblocked.
172          */
173         if (sigismember(&t->blocked, sig))
174                 return 0;
175
176         /* Is it explicitly or implicitly ignored? */
177         handler = t->sighand->action[sig-1].sa.sa_handler;
178         return   handler == SIG_IGN ||
179                 (handler == SIG_DFL && sig_kernel_ignore(sig));
180 }
181
182 /*
183  * Re-calculate pending state from the set of locally pending
184  * signals, globally pending signals, and blocked signals.
185  */
186 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
187 {
188         unsigned long ready;
189         long i;
190
191         switch (_NSIG_WORDS) {
192         default:
193                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
194                         ready |= signal->sig[i] &~ blocked->sig[i];
195                 break;
196
197         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
198                 ready |= signal->sig[2] &~ blocked->sig[2];
199                 ready |= signal->sig[1] &~ blocked->sig[1];
200                 ready |= signal->sig[0] &~ blocked->sig[0];
201                 break;
202
203         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
204                 ready |= signal->sig[0] &~ blocked->sig[0];
205                 break;
206
207         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
208         }
209         return ready != 0;
210 }
211
212 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
213
214 fastcall void recalc_sigpending_tsk(struct task_struct *t)
215 {
216         if (t->signal->group_stop_count > 0 ||
217             (freezing(t)) ||
218             PENDING(&t->pending, &t->blocked) ||
219             PENDING(&t->signal->shared_pending, &t->blocked))
220                 set_tsk_thread_flag(t, TIF_SIGPENDING);
221         else
222                 clear_tsk_thread_flag(t, TIF_SIGPENDING);
223 }
224
225 void recalc_sigpending(void)
226 {
227         recalc_sigpending_tsk(current);
228 }
229
230 /* Given the mask, find the first available signal that should be serviced. */
231
232 static int
233 next_signal(struct sigpending *pending, sigset_t *mask)
234 {
235         unsigned long i, *s, *m, x;
236         int sig = 0;
237         
238         s = pending->signal.sig;
239         m = mask->sig;
240         switch (_NSIG_WORDS) {
241         default:
242                 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
243                         if ((x = *s &~ *m) != 0) {
244                                 sig = ffz(~x) + i*_NSIG_BPW + 1;
245                                 break;
246                         }
247                 break;
248
249         case 2: if ((x = s[0] &~ m[0]) != 0)
250                         sig = 1;
251                 else if ((x = s[1] &~ m[1]) != 0)
252                         sig = _NSIG_BPW + 1;
253                 else
254                         break;
255                 sig += ffz(~x);
256                 break;
257
258         case 1: if ((x = *s &~ *m) != 0)
259                         sig = ffz(~x) + 1;
260                 break;
261         }
262         
263         return sig;
264 }
265
266 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
267                                          int override_rlimit)
268 {
269         struct sigqueue *q = NULL;
270
271         atomic_inc(&t->user->sigpending);
272         if (override_rlimit ||
273             atomic_read(&t->user->sigpending) <=
274                         t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
275                 q = kmem_cache_alloc(sigqueue_cachep, flags);
276         if (unlikely(q == NULL)) {
277                 atomic_dec(&t->user->sigpending);
278         } else {
279                 INIT_LIST_HEAD(&q->list);
280                 q->flags = 0;
281                 q->user = get_uid(t->user);
282         }
283         return(q);
284 }
285
286 static void __sigqueue_free(struct sigqueue *q)
287 {
288         if (q->flags & SIGQUEUE_PREALLOC)
289                 return;
290         atomic_dec(&q->user->sigpending);
291         free_uid(q->user);
292         kmem_cache_free(sigqueue_cachep, q);
293 }
294
295 static void flush_sigqueue(struct sigpending *queue)
296 {
297         struct sigqueue *q;
298
299         sigemptyset(&queue->signal);
300         while (!list_empty(&queue->list)) {
301                 q = list_entry(queue->list.next, struct sigqueue , list);
302                 list_del_init(&q->list);
303                 __sigqueue_free(q);
304         }
305 }
306
307 /*
308  * Flush all pending signals for a task.
309  */
310
311 void
312 flush_signals(struct task_struct *t)
313 {
314         unsigned long flags;
315
316         spin_lock_irqsave(&t->sighand->siglock, flags);
317         clear_tsk_thread_flag(t,TIF_SIGPENDING);
318         flush_sigqueue(&t->pending);
319         flush_sigqueue(&t->signal->shared_pending);
320         spin_unlock_irqrestore(&t->sighand->siglock, flags);
321 }
322
323 /*
324  * This function expects the tasklist_lock write-locked.
325  */
326 void __exit_sighand(struct task_struct *tsk)
327 {
328         struct sighand_struct * sighand = tsk->sighand;
329
330         /* Ok, we're done with the signal handlers */
331         tsk->sighand = NULL;
332         if (atomic_dec_and_test(&sighand->count))
333                 kmem_cache_free(sighand_cachep, sighand);
334 }
335
336 void exit_sighand(struct task_struct *tsk)
337 {
338         write_lock_irq(&tasklist_lock);
339         rcu_read_lock();
340         if (tsk->sighand != NULL) {
341                 struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
342                 spin_lock(&sighand->siglock);
343                 __exit_sighand(tsk);
344                 spin_unlock(&sighand->siglock);
345         }
346         rcu_read_unlock();
347         write_unlock_irq(&tasklist_lock);
348 }
349
350 /*
351  * This function expects the tasklist_lock write-locked.
352  */
353 void __exit_signal(struct task_struct *tsk)
354 {
355         struct signal_struct * sig = tsk->signal;
356         struct sighand_struct * sighand;
357
358         if (!sig)
359                 BUG();
360         if (!atomic_read(&sig->count))
361                 BUG();
362         rcu_read_lock();
363         sighand = rcu_dereference(tsk->sighand);
364         spin_lock(&sighand->siglock);
365         posix_cpu_timers_exit(tsk);
366         if (atomic_dec_and_test(&sig->count)) {
367                 posix_cpu_timers_exit_group(tsk);
368                 tsk->signal = NULL;
369                 __exit_sighand(tsk);
370                 spin_unlock(&sighand->siglock);
371                 flush_sigqueue(&sig->shared_pending);
372         } else {
373                 /*
374                  * If there is any task waiting for the group exit
375                  * then notify it:
376                  */
377                 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
378                         wake_up_process(sig->group_exit_task);
379                         sig->group_exit_task = NULL;
380                 }
381                 if (tsk == sig->curr_target)
382                         sig->curr_target = next_thread(tsk);
383                 tsk->signal = NULL;
384                 /*
385                  * Accumulate here the counters for all threads but the
386                  * group leader as they die, so they can be added into
387                  * the process-wide totals when those are taken.
388                  * The group leader stays around as a zombie as long
389                  * as there are other threads.  When it gets reaped,
390                  * the exit.c code will add its counts into these totals.
391                  * We won't ever get here for the group leader, since it
392                  * will have been the last reference on the signal_struct.
393                  */
394                 sig->utime = cputime_add(sig->utime, tsk->utime);
395                 sig->stime = cputime_add(sig->stime, tsk->stime);
396                 sig->min_flt += tsk->min_flt;
397                 sig->maj_flt += tsk->maj_flt;
398                 sig->nvcsw += tsk->nvcsw;
399                 sig->nivcsw += tsk->nivcsw;
400                 sig->sched_time += tsk->sched_time;
401                 __exit_sighand(tsk);
402                 spin_unlock(&sighand->siglock);
403                 sig = NULL;     /* Marker for below.  */
404         }
405         rcu_read_unlock();
406         clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
407         flush_sigqueue(&tsk->pending);
408         if (sig) {
409                 /*
410                  * We are cleaning up the signal_struct here.
411                  */
412                 exit_thread_group_keys(sig);
413                 kmem_cache_free(signal_cachep, sig);
414         }
415 }
416
417 void exit_signal(struct task_struct *tsk)
418 {
419         atomic_dec(&tsk->signal->live);
420
421         write_lock_irq(&tasklist_lock);
422         __exit_signal(tsk);
423         write_unlock_irq(&tasklist_lock);
424 }
425
426 /*
427  * Flush all handlers for a task.
428  */
429
430 void
431 flush_signal_handlers(struct task_struct *t, int force_default)
432 {
433         int i;
434         struct k_sigaction *ka = &t->sighand->action[0];
435         for (i = _NSIG ; i != 0 ; i--) {
436                 if (force_default || ka->sa.sa_handler != SIG_IGN)
437                         ka->sa.sa_handler = SIG_DFL;
438                 ka->sa.sa_flags = 0;
439                 sigemptyset(&ka->sa.sa_mask);
440                 ka++;
441         }
442 }
443
444
445 /* Notify the system that a driver wants to block all signals for this
446  * process, and wants to be notified if any signals at all were to be
447  * sent/acted upon.  If the notifier routine returns non-zero, then the
448  * signal will be acted upon after all.  If the notifier routine returns 0,
449  * then then signal will be blocked.  Only one block per process is
450  * allowed.  priv is a pointer to private data that the notifier routine
451  * can use to determine if the signal should be blocked or not.  */
452
453 void
454 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
455 {
456         unsigned long flags;
457
458         spin_lock_irqsave(&current->sighand->siglock, flags);
459         current->notifier_mask = mask;
460         current->notifier_data = priv;
461         current->notifier = notifier;
462         spin_unlock_irqrestore(&current->sighand->siglock, flags);
463 }
464
465 /* Notify the system that blocking has ended. */
466
467 void
468 unblock_all_signals(void)
469 {
470         unsigned long flags;
471
472         spin_lock_irqsave(&current->sighand->siglock, flags);
473         current->notifier = NULL;
474         current->notifier_data = NULL;
475         recalc_sigpending();
476         spin_unlock_irqrestore(&current->sighand->siglock, flags);
477 }
478
479 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
480 {
481         struct sigqueue *q, *first = NULL;
482         int still_pending = 0;
483
484         if (unlikely(!sigismember(&list->signal, sig)))
485                 return 0;
486
487         /*
488          * Collect the siginfo appropriate to this signal.  Check if
489          * there is another siginfo for the same signal.
490         */
491         list_for_each_entry(q, &list->list, list) {
492                 if (q->info.si_signo == sig) {
493                         if (first) {
494                                 still_pending = 1;
495                                 break;
496                         }
497                         first = q;
498                 }
499         }
500         if (first) {
501                 list_del_init(&first->list);
502                 copy_siginfo(info, &first->info);
503                 __sigqueue_free(first);
504                 if (!still_pending)
505                         sigdelset(&list->signal, sig);
506         } else {
507
508                 /* Ok, it wasn't in the queue.  This must be
509                    a fast-pathed signal or we must have been
510                    out of queue space.  So zero out the info.
511                  */
512                 sigdelset(&list->signal, sig);
513                 info->si_signo = sig;
514                 info->si_errno = 0;
515                 info->si_code = 0;
516                 info->si_pid = 0;
517                 info->si_uid = 0;
518         }
519         return 1;
520 }
521
522 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
523                         siginfo_t *info)
524 {
525         int sig = 0;
526
527         sig = next_signal(pending, mask);
528         if (sig) {
529                 if (current->notifier) {
530                         if (sigismember(current->notifier_mask, sig)) {
531                                 if (!(current->notifier)(current->notifier_data)) {
532                                         clear_thread_flag(TIF_SIGPENDING);
533                                         return 0;
534                                 }
535                         }
536                 }
537
538                 if (!collect_signal(sig, pending, info))
539                         sig = 0;
540                                 
541         }
542         recalc_sigpending();
543
544         return sig;
545 }
546
547 /*
548  * Dequeue a signal and return the element to the caller, which is 
549  * expected to free it.
550  *
551  * All callers have to hold the siglock.
552  */
553 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
554 {
555         int signr = __dequeue_signal(&tsk->pending, mask, info);
556         if (!signr)
557                 signr = __dequeue_signal(&tsk->signal->shared_pending,
558                                          mask, info);
559         if (signr && unlikely(sig_kernel_stop(signr))) {
560                 /*
561                  * Set a marker that we have dequeued a stop signal.  Our
562                  * caller might release the siglock and then the pending
563                  * stop signal it is about to process is no longer in the
564                  * pending bitmasks, but must still be cleared by a SIGCONT
565                  * (and overruled by a SIGKILL).  So those cases clear this
566                  * shared flag after we've set it.  Note that this flag may
567                  * remain set after the signal we return is ignored or
568                  * handled.  That doesn't matter because its only purpose
569                  * is to alert stop-signal processing code when another
570                  * processor has come along and cleared the flag.
571                  */
572                 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
573                         tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
574         }
575         if ( signr &&
576              ((info->si_code & __SI_MASK) == __SI_TIMER) &&
577              info->si_sys_private){
578                 /*
579                  * Release the siglock to ensure proper locking order
580                  * of timer locks outside of siglocks.  Note, we leave
581                  * irqs disabled here, since the posix-timers code is
582                  * about to disable them again anyway.
583                  */
584                 spin_unlock(&tsk->sighand->siglock);
585                 do_schedule_next_timer(info);
586                 spin_lock(&tsk->sighand->siglock);
587         }
588         return signr;
589 }
590
591 /*
592  * Tell a process that it has a new active signal..
593  *
594  * NOTE! we rely on the previous spin_lock to
595  * lock interrupts for us! We can only be called with
596  * "siglock" held, and the local interrupt must
597  * have been disabled when that got acquired!
598  *
599  * No need to set need_resched since signal event passing
600  * goes through ->blocked
601  */
602 void signal_wake_up(struct task_struct *t, int resume)
603 {
604         unsigned int mask;
605
606         set_tsk_thread_flag(t, TIF_SIGPENDING);
607
608         /*
609          * For SIGKILL, we want to wake it up in the stopped/traced case.
610          * We don't check t->state here because there is a race with it
611          * executing another processor and just now entering stopped state.
612          * By using wake_up_state, we ensure the process will wake up and
613          * handle its death signal.
614          */
615         mask = TASK_INTERRUPTIBLE;
616         if (resume)
617                 mask |= TASK_STOPPED | TASK_TRACED;
618         if (!wake_up_state(t, mask))
619                 kick_process(t);
620 }
621
622 /*
623  * Remove signals in mask from the pending set and queue.
624  * Returns 1 if any signals were found.
625  *
626  * All callers must be holding the siglock.
627  *
628  * This version takes a sigset mask and looks at all signals,
629  * not just those in the first mask word.
630  */
631 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
632 {
633         struct sigqueue *q, *n;
634         sigset_t m;
635
636         sigandsets(&m, mask, &s->signal);
637         if (sigisemptyset(&m))
638                 return 0;
639
640         signandsets(&s->signal, &s->signal, mask);
641         list_for_each_entry_safe(q, n, &s->list, list) {
642                 if (sigismember(mask, q->info.si_signo)) {
643                         list_del_init(&q->list);
644                         __sigqueue_free(q);
645                 }
646         }
647         return 1;
648 }
649 /*
650  * Remove signals in mask from the pending set and queue.
651  * Returns 1 if any signals were found.
652  *
653  * All callers must be holding the siglock.
654  */
655 static int rm_from_queue(unsigned long mask, struct sigpending *s)
656 {
657         struct sigqueue *q, *n;
658
659         if (!sigtestsetmask(&s->signal, mask))
660                 return 0;
661
662         sigdelsetmask(&s->signal, mask);
663         list_for_each_entry_safe(q, n, &s->list, list) {
664                 if (q->info.si_signo < SIGRTMIN &&
665                     (mask & sigmask(q->info.si_signo))) {
666                         list_del_init(&q->list);
667                         __sigqueue_free(q);
668                 }
669         }
670         return 1;
671 }
672
673 /*
674  * Bad permissions for sending the signal
675  */
676 static int check_kill_permission(int sig, struct siginfo *info,
677                                  struct task_struct *t)
678 {
679         int error = -EINVAL;
680         if (!valid_signal(sig))
681                 return error;
682         error = -EPERM;
683         if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
684             && ((sig != SIGCONT) ||
685                 (current->signal->session != t->signal->session))
686             && (current->euid ^ t->suid) && (current->euid ^ t->uid)
687             && (current->uid ^ t->suid) && (current->uid ^ t->uid)
688             && !capable(CAP_KILL))
689                 return error;
690
691         error = security_task_kill(t, info, sig);
692         if (!error)
693                 audit_signal_info(sig, t); /* Let audit system see the signal */
694         return error;
695 }
696
697 /* forward decl */
698 static void do_notify_parent_cldstop(struct task_struct *tsk,
699                                      int to_self,
700                                      int why);
701
702 /*
703  * Handle magic process-wide effects of stop/continue signals.
704  * Unlike the signal actions, these happen immediately at signal-generation
705  * time regardless of blocking, ignoring, or handling.  This does the
706  * actual continuing for SIGCONT, but not the actual stopping for stop
707  * signals.  The process stop is done as a signal action for SIG_DFL.
708  */
709 static void handle_stop_signal(int sig, struct task_struct *p)
710 {
711         struct task_struct *t;
712
713         if (p->signal->flags & SIGNAL_GROUP_EXIT)
714                 /*
715                  * The process is in the middle of dying already.
716                  */
717                 return;
718
719         if (sig_kernel_stop(sig)) {
720                 /*
721                  * This is a stop signal.  Remove SIGCONT from all queues.
722                  */
723                 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
724                 t = p;
725                 do {
726                         rm_from_queue(sigmask(SIGCONT), &t->pending);
727                         t = next_thread(t);
728                 } while (t != p);
729         } else if (sig == SIGCONT) {
730                 /*
731                  * Remove all stop signals from all queues,
732                  * and wake all threads.
733                  */
734                 if (unlikely(p->signal->group_stop_count > 0)) {
735                         /*
736                          * There was a group stop in progress.  We'll
737                          * pretend it finished before we got here.  We are
738                          * obliged to report it to the parent: if the
739                          * SIGSTOP happened "after" this SIGCONT, then it
740                          * would have cleared this pending SIGCONT.  If it
741                          * happened "before" this SIGCONT, then the parent
742                          * got the SIGCHLD about the stop finishing before
743                          * the continue happened.  We do the notification
744                          * now, and it's as if the stop had finished and
745                          * the SIGCHLD was pending on entry to this kill.
746                          */
747                         p->signal->group_stop_count = 0;
748                         p->signal->flags = SIGNAL_STOP_CONTINUED;
749                         spin_unlock(&p->sighand->siglock);
750                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
751                         spin_lock(&p->sighand->siglock);
752                 }
753                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
754                 t = p;
755                 do {
756                         unsigned int state;
757                         rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
758                         
759                         /*
760                          * If there is a handler for SIGCONT, we must make
761                          * sure that no thread returns to user mode before
762                          * we post the signal, in case it was the only
763                          * thread eligible to run the signal handler--then
764                          * it must not do anything between resuming and
765                          * running the handler.  With the TIF_SIGPENDING
766                          * flag set, the thread will pause and acquire the
767                          * siglock that we hold now and until we've queued
768                          * the pending signal. 
769                          *
770                          * Wake up the stopped thread _after_ setting
771                          * TIF_SIGPENDING
772                          */
773                         state = TASK_STOPPED;
774                         if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
775                                 set_tsk_thread_flag(t, TIF_SIGPENDING);
776                                 state |= TASK_INTERRUPTIBLE;
777                         }
778                         wake_up_state(t, state);
779
780                         t = next_thread(t);
781                 } while (t != p);
782
783                 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
784                         /*
785                          * We were in fact stopped, and are now continued.
786                          * Notify the parent with CLD_CONTINUED.
787                          */
788                         p->signal->flags = SIGNAL_STOP_CONTINUED;
789                         p->signal->group_exit_code = 0;
790                         spin_unlock(&p->sighand->siglock);
791                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
792                         spin_lock(&p->sighand->siglock);
793                 } else {
794                         /*
795                          * We are not stopped, but there could be a stop
796                          * signal in the middle of being processed after
797                          * being removed from the queue.  Clear that too.
798                          */
799                         p->signal->flags = 0;
800                 }
801         } else if (sig == SIGKILL) {
802                 /*
803                  * Make sure that any pending stop signal already dequeued
804                  * is undone by the wakeup for SIGKILL.
805                  */
806                 p->signal->flags = 0;
807         }
808 }
809
810 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
811                         struct sigpending *signals)
812 {
813         struct sigqueue * q = NULL;
814         int ret = 0;
815
816         /*
817          * fast-pathed signals for kernel-internal things like SIGSTOP
818          * or SIGKILL.
819          */
820         if (info == SEND_SIG_FORCED)
821                 goto out_set;
822
823         /* Real-time signals must be queued if sent by sigqueue, or
824            some other real-time mechanism.  It is implementation
825            defined whether kill() does so.  We attempt to do so, on
826            the principle of least surprise, but since kill is not
827            allowed to fail with EAGAIN when low on memory we just
828            make sure at least one signal gets delivered and don't
829            pass on the info struct.  */
830
831         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
832                                              (is_si_special(info) ||
833                                               info->si_code >= 0)));
834         if (q) {
835                 list_add_tail(&q->list, &signals->list);
836                 switch ((unsigned long) info) {
837                 case (unsigned long) SEND_SIG_NOINFO:
838                         q->info.si_signo = sig;
839                         q->info.si_errno = 0;
840                         q->info.si_code = SI_USER;
841                         q->info.si_pid = current->pid;
842                         q->info.si_uid = current->uid;
843                         break;
844                 case (unsigned long) SEND_SIG_PRIV:
845                         q->info.si_signo = sig;
846                         q->info.si_errno = 0;
847                         q->info.si_code = SI_KERNEL;
848                         q->info.si_pid = 0;
849                         q->info.si_uid = 0;
850                         break;
851                 default:
852                         copy_siginfo(&q->info, info);
853                         break;
854                 }
855         } else if (!is_si_special(info)) {
856                 if (sig >= SIGRTMIN && info->si_code != SI_USER)
857                 /*
858                  * Queue overflow, abort.  We may abort if the signal was rt
859                  * and sent by user using something other than kill().
860                  */
861                         return -EAGAIN;
862         }
863
864 out_set:
865         sigaddset(&signals->signal, sig);
866         return ret;
867 }
868
869 #define LEGACY_QUEUE(sigptr, sig) \
870         (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
871
872
873 static int
874 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
875 {
876         int ret = 0;
877
878         if (!irqs_disabled())
879                 BUG();
880         assert_spin_locked(&t->sighand->siglock);
881
882         /* Short-circuit ignored signals.  */
883         if (sig_ignored(t, sig))
884                 goto out;
885
886         /* Support queueing exactly one non-rt signal, so that we
887            can get more detailed information about the cause of
888            the signal. */
889         if (LEGACY_QUEUE(&t->pending, sig))
890                 goto out;
891
892         ret = send_signal(sig, info, t, &t->pending);
893         if (!ret && !sigismember(&t->blocked, sig))
894                 signal_wake_up(t, sig == SIGKILL);
895 out:
896         return ret;
897 }
898
899 /*
900  * Force a signal that the process can't ignore: if necessary
901  * we unblock the signal and change any SIG_IGN to SIG_DFL.
902  */
903
904 int
905 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
906 {
907         unsigned long int flags;
908         int ret;
909
910         spin_lock_irqsave(&t->sighand->siglock, flags);
911         if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
912                 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
913         }
914         if (sigismember(&t->blocked, sig)) {
915                 sigdelset(&t->blocked, sig);
916         }
917         recalc_sigpending_tsk(t);
918         ret = specific_send_sig_info(sig, info, t);
919         spin_unlock_irqrestore(&t->sighand->siglock, flags);
920
921         return ret;
922 }
923
924 void
925 force_sig_specific(int sig, struct task_struct *t)
926 {
927         force_sig_info(sig, SEND_SIG_FORCED, t);
928 }
929
930 /*
931  * Test if P wants to take SIG.  After we've checked all threads with this,
932  * it's equivalent to finding no threads not blocking SIG.  Any threads not
933  * blocking SIG were ruled out because they are not running and already
934  * have pending signals.  Such threads will dequeue from the shared queue
935  * as soon as they're available, so putting the signal on the shared queue
936  * will be equivalent to sending it to one such thread.
937  */
938 static inline int wants_signal(int sig, struct task_struct *p)
939 {
940         if (sigismember(&p->blocked, sig))
941                 return 0;
942         if (p->flags & PF_EXITING)
943                 return 0;
944         if (sig == SIGKILL)
945                 return 1;
946         if (p->state & (TASK_STOPPED | TASK_TRACED))
947                 return 0;
948         return task_curr(p) || !signal_pending(p);
949 }
950
951 static void
952 __group_complete_signal(int sig, struct task_struct *p)
953 {
954         struct task_struct *t;
955
956         /*
957          * Now find a thread we can wake up to take the signal off the queue.
958          *
959          * If the main thread wants the signal, it gets first crack.
960          * Probably the least surprising to the average bear.
961          */
962         if (wants_signal(sig, p))
963                 t = p;
964         else if (thread_group_empty(p))
965                 /*
966                  * There is just one thread and it does not need to be woken.
967                  * It will dequeue unblocked signals before it runs again.
968                  */
969                 return;
970         else {
971                 /*
972                  * Otherwise try to find a suitable thread.
973                  */
974                 t = p->signal->curr_target;
975                 if (t == NULL)
976                         /* restart balancing at this thread */
977                         t = p->signal->curr_target = p;
978                 BUG_ON(t->tgid != p->tgid);
979
980                 while (!wants_signal(sig, t)) {
981                         t = next_thread(t);
982                         if (t == p->signal->curr_target)
983                                 /*
984                                  * No thread needs to be woken.
985                                  * Any eligible threads will see
986                                  * the signal in the queue soon.
987                                  */
988                                 return;
989                 }
990                 p->signal->curr_target = t;
991         }
992
993         /*
994          * Found a killable thread.  If the signal will be fatal,
995          * then start taking the whole group down immediately.
996          */
997         if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
998             !sigismember(&t->real_blocked, sig) &&
999             (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1000                 /*
1001                  * This signal will be fatal to the whole group.
1002                  */
1003                 if (!sig_kernel_coredump(sig)) {
1004                         /*
1005                          * Start a group exit and wake everybody up.
1006                          * This way we don't have other threads
1007                          * running and doing things after a slower
1008                          * thread has the fatal signal pending.
1009                          */
1010                         p->signal->flags = SIGNAL_GROUP_EXIT;
1011                         p->signal->group_exit_code = sig;
1012                         p->signal->group_stop_count = 0;
1013                         t = p;
1014                         do {
1015                                 sigaddset(&t->pending.signal, SIGKILL);
1016                                 signal_wake_up(t, 1);
1017                                 t = next_thread(t);
1018                         } while (t != p);
1019                         return;
1020                 }
1021
1022                 /*
1023                  * There will be a core dump.  We make all threads other
1024                  * than the chosen one go into a group stop so that nothing
1025                  * happens until it gets scheduled, takes the signal off
1026                  * the shared queue, and does the core dump.  This is a
1027                  * little more complicated than strictly necessary, but it
1028                  * keeps the signal state that winds up in the core dump
1029                  * unchanged from the death state, e.g. which thread had
1030                  * the core-dump signal unblocked.
1031                  */
1032                 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1033                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1034                 p->signal->group_stop_count = 0;
1035                 p->signal->group_exit_task = t;
1036                 t = p;
1037                 do {
1038                         p->signal->group_stop_count++;
1039                         signal_wake_up(t, 0);
1040                         t = next_thread(t);
1041                 } while (t != p);
1042                 wake_up_process(p->signal->group_exit_task);
1043                 return;
1044         }
1045
1046         /*
1047          * The signal is already in the shared-pending queue.
1048          * Tell the chosen thread to wake up and dequeue it.
1049          */
1050         signal_wake_up(t, sig == SIGKILL);
1051         return;
1052 }
1053
1054 int
1055 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1056 {
1057         int ret = 0;
1058
1059         assert_spin_locked(&p->sighand->siglock);
1060         handle_stop_signal(sig, p);
1061
1062         /* Short-circuit ignored signals.  */
1063         if (sig_ignored(p, sig))
1064                 return ret;
1065
1066         if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1067                 /* This is a non-RT signal and we already have one queued.  */
1068                 return ret;
1069
1070         /*
1071          * Put this signal on the shared-pending queue, or fail with EAGAIN.
1072          * We always use the shared queue for process-wide signals,
1073          * to avoid several races.
1074          */
1075         ret = send_signal(sig, info, p, &p->signal->shared_pending);
1076         if (unlikely(ret))
1077                 return ret;
1078
1079         __group_complete_signal(sig, p);
1080         return 0;
1081 }
1082
1083 /*
1084  * Nuke all other threads in the group.
1085  */
1086 void zap_other_threads(struct task_struct *p)
1087 {
1088         struct task_struct *t;
1089
1090         p->signal->flags = SIGNAL_GROUP_EXIT;
1091         p->signal->group_stop_count = 0;
1092
1093         if (thread_group_empty(p))
1094                 return;
1095
1096         for (t = next_thread(p); t != p; t = next_thread(t)) {
1097                 /*
1098                  * Don't bother with already dead threads
1099                  */
1100                 if (t->exit_state)
1101                         continue;
1102
1103                 /*
1104                  * We don't want to notify the parent, since we are
1105                  * killed as part of a thread group due to another
1106                  * thread doing an execve() or similar. So set the
1107                  * exit signal to -1 to allow immediate reaping of
1108                  * the process.  But don't detach the thread group
1109                  * leader.
1110                  */
1111                 if (t != p->group_leader)
1112                         t->exit_signal = -1;
1113
1114                 /* SIGKILL will be handled before any pending SIGSTOP */
1115                 sigaddset(&t->pending.signal, SIGKILL);
1116                 signal_wake_up(t, 1);
1117         }
1118 }
1119
1120 /*
1121  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1122  */
1123 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1124 {
1125         struct sighand_struct *sighand;
1126
1127         for (;;) {
1128                 sighand = rcu_dereference(tsk->sighand);
1129                 if (unlikely(sighand == NULL))
1130                         break;
1131
1132                 spin_lock_irqsave(&sighand->siglock, *flags);
1133                 if (likely(sighand == tsk->sighand))
1134                         break;
1135                 spin_unlock_irqrestore(&sighand->siglock, *flags);
1136         }
1137
1138         return sighand;
1139 }
1140
1141 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1142 {
1143         unsigned long flags;
1144         int ret;
1145
1146         ret = check_kill_permission(sig, info, p);
1147
1148         if (!ret && sig) {
1149                 ret = -ESRCH;
1150                 if (lock_task_sighand(p, &flags)) {
1151                         ret = __group_send_sig_info(sig, info, p);
1152                         unlock_task_sighand(p, &flags);
1153                 }
1154         }
1155
1156         return ret;
1157 }
1158
1159 /*
1160  * kill_pg_info() sends a signal to a process group: this is what the tty
1161  * control characters do (^C, ^Z etc)
1162  */
1163
1164 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1165 {
1166         struct task_struct *p = NULL;
1167         int retval, success;
1168
1169         if (pgrp <= 0)
1170                 return -EINVAL;
1171
1172         success = 0;
1173         retval = -ESRCH;
1174         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1175                 int err = group_send_sig_info(sig, info, p);
1176                 success |= !err;
1177                 retval = err;
1178         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1179         return success ? 0 : retval;
1180 }
1181
1182 int
1183 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1184 {
1185         int retval;
1186
1187         read_lock(&tasklist_lock);
1188         retval = __kill_pg_info(sig, info, pgrp);
1189         read_unlock(&tasklist_lock);
1190
1191         return retval;
1192 }
1193
1194 int
1195 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1196 {
1197         int error;
1198         int acquired_tasklist_lock = 0;
1199         struct task_struct *p;
1200
1201         rcu_read_lock();
1202         if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
1203                 read_lock(&tasklist_lock);
1204                 acquired_tasklist_lock = 1;
1205         }
1206         p = find_task_by_pid(pid);
1207         error = -ESRCH;
1208         if (p)
1209                 error = group_send_sig_info(sig, info, p);
1210         if (unlikely(acquired_tasklist_lock))
1211                 read_unlock(&tasklist_lock);
1212         rcu_read_unlock();
1213         return error;
1214 }
1215
1216 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1217 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1218                       uid_t uid, uid_t euid)
1219 {
1220         int ret = -EINVAL;
1221         struct task_struct *p;
1222
1223         if (!valid_signal(sig))
1224                 return ret;
1225
1226         read_lock(&tasklist_lock);
1227         p = find_task_by_pid(pid);
1228         if (!p) {
1229                 ret = -ESRCH;
1230                 goto out_unlock;
1231         }
1232         if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1233             && (euid != p->suid) && (euid != p->uid)
1234             && (uid != p->suid) && (uid != p->uid)) {
1235                 ret = -EPERM;
1236                 goto out_unlock;
1237         }
1238         if (sig && p->sighand) {
1239                 unsigned long flags;
1240                 spin_lock_irqsave(&p->sighand->siglock, flags);
1241                 ret = __group_send_sig_info(sig, info, p);
1242                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1243         }
1244 out_unlock:
1245         read_unlock(&tasklist_lock);
1246         return ret;
1247 }
1248 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1249
1250 /*
1251  * kill_something_info() interprets pid in interesting ways just like kill(2).
1252  *
1253  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1254  * is probably wrong.  Should make it like BSD or SYSV.
1255  */
1256
1257 static int kill_something_info(int sig, struct siginfo *info, int pid)
1258 {
1259         if (!pid) {
1260                 return kill_pg_info(sig, info, process_group(current));
1261         } else if (pid == -1) {
1262                 int retval = 0, count = 0;
1263                 struct task_struct * p;
1264
1265                 read_lock(&tasklist_lock);
1266                 for_each_process(p) {
1267                         if (p->pid > 1 && p->tgid != current->tgid) {
1268                                 int err = group_send_sig_info(sig, info, p);
1269                                 ++count;
1270                                 if (err != -EPERM)
1271                                         retval = err;
1272                         }
1273                 }
1274                 read_unlock(&tasklist_lock);
1275                 return count ? retval : -ESRCH;
1276         } else if (pid < 0) {
1277                 return kill_pg_info(sig, info, -pid);
1278         } else {
1279                 return kill_proc_info(sig, info, pid);
1280         }
1281 }
1282
1283 /*
1284  * These are for backward compatibility with the rest of the kernel source.
1285  */
1286
1287 /*
1288  * These two are the most common entry points.  They send a signal
1289  * just to the specific thread.
1290  */
1291 int
1292 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1293 {
1294         int ret;
1295         unsigned long flags;
1296
1297         /*
1298          * Make sure legacy kernel users don't send in bad values
1299          * (normal paths check this in check_kill_permission).
1300          */
1301         if (!valid_signal(sig))
1302                 return -EINVAL;
1303
1304         /*
1305          * We need the tasklist lock even for the specific
1306          * thread case (when we don't need to follow the group
1307          * lists) in order to avoid races with "p->sighand"
1308          * going away or changing from under us.
1309          */
1310         read_lock(&tasklist_lock);  
1311         spin_lock_irqsave(&p->sighand->siglock, flags);
1312         ret = specific_send_sig_info(sig, info, p);
1313         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1314         read_unlock(&tasklist_lock);
1315         return ret;
1316 }
1317
1318 #define __si_special(priv) \
1319         ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1320
1321 int
1322 send_sig(int sig, struct task_struct *p, int priv)
1323 {
1324         return send_sig_info(sig, __si_special(priv), p);
1325 }
1326
1327 /*
1328  * This is the entry point for "process-wide" signals.
1329  * They will go to an appropriate thread in the thread group.
1330  */
1331 int
1332 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1333 {
1334         int ret;
1335         read_lock(&tasklist_lock);
1336         ret = group_send_sig_info(sig, info, p);
1337         read_unlock(&tasklist_lock);
1338         return ret;
1339 }
1340
1341 void
1342 force_sig(int sig, struct task_struct *p)
1343 {
1344         force_sig_info(sig, SEND_SIG_PRIV, p);
1345 }
1346
1347 /*
1348  * When things go south during signal handling, we
1349  * will force a SIGSEGV. And if the signal that caused
1350  * the problem was already a SIGSEGV, we'll want to
1351  * make sure we don't even try to deliver the signal..
1352  */
1353 int
1354 force_sigsegv(int sig, struct task_struct *p)
1355 {
1356         if (sig == SIGSEGV) {
1357                 unsigned long flags;
1358                 spin_lock_irqsave(&p->sighand->siglock, flags);
1359                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1360                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1361         }
1362         force_sig(SIGSEGV, p);
1363         return 0;
1364 }
1365
1366 int
1367 kill_pg(pid_t pgrp, int sig, int priv)
1368 {
1369         return kill_pg_info(sig, __si_special(priv), pgrp);
1370 }
1371
1372 int
1373 kill_proc(pid_t pid, int sig, int priv)
1374 {
1375         return kill_proc_info(sig, __si_special(priv), pid);
1376 }
1377
1378 /*
1379  * These functions support sending signals using preallocated sigqueue
1380  * structures.  This is needed "because realtime applications cannot
1381  * afford to lose notifications of asynchronous events, like timer
1382  * expirations or I/O completions".  In the case of Posix Timers 
1383  * we allocate the sigqueue structure from the timer_create.  If this
1384  * allocation fails we are able to report the failure to the application
1385  * with an EAGAIN error.
1386  */
1387  
1388 struct sigqueue *sigqueue_alloc(void)
1389 {
1390         struct sigqueue *q;
1391
1392         if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1393                 q->flags |= SIGQUEUE_PREALLOC;
1394         return(q);
1395 }
1396
1397 void sigqueue_free(struct sigqueue *q)
1398 {
1399         unsigned long flags;
1400         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1401         /*
1402          * If the signal is still pending remove it from the
1403          * pending queue.
1404          */
1405         if (unlikely(!list_empty(&q->list))) {
1406                 spinlock_t *lock = &current->sighand->siglock;
1407                 read_lock(&tasklist_lock);
1408                 spin_lock_irqsave(lock, flags);
1409                 if (!list_empty(&q->list))
1410                         list_del_init(&q->list);
1411                 spin_unlock_irqrestore(lock, flags);
1412                 read_unlock(&tasklist_lock);
1413         }
1414         q->flags &= ~SIGQUEUE_PREALLOC;
1415         __sigqueue_free(q);
1416 }
1417
1418 int
1419 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1420 {
1421         unsigned long flags;
1422         int ret = 0;
1423         struct sighand_struct *sh;
1424
1425         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1426
1427         /*
1428          * The rcu based delayed sighand destroy makes it possible to
1429          * run this without tasklist lock held. The task struct itself
1430          * cannot go away as create_timer did get_task_struct().
1431          *
1432          * We return -1, when the task is marked exiting, so
1433          * posix_timer_event can redirect it to the group leader
1434          */
1435         rcu_read_lock();
1436
1437         if (unlikely(p->flags & PF_EXITING)) {
1438                 ret = -1;
1439                 goto out_err;
1440         }
1441
1442 retry:
1443         sh = rcu_dereference(p->sighand);
1444
1445         spin_lock_irqsave(&sh->siglock, flags);
1446         if (p->sighand != sh) {
1447                 /* We raced with exec() in a multithreaded process... */
1448                 spin_unlock_irqrestore(&sh->siglock, flags);
1449                 goto retry;
1450         }
1451
1452         /*
1453          * We do the check here again to handle the following scenario:
1454          *
1455          * CPU 0                CPU 1
1456          * send_sigqueue
1457          * check PF_EXITING
1458          * interrupt            exit code running
1459          *                      __exit_signal
1460          *                      lock sighand->siglock
1461          *                      unlock sighand->siglock
1462          * lock sh->siglock
1463          * add(tsk->pending)    flush_sigqueue(tsk->pending)
1464          *
1465          */
1466
1467         if (unlikely(p->flags & PF_EXITING)) {
1468                 ret = -1;
1469                 goto out;
1470         }
1471
1472         if (unlikely(!list_empty(&q->list))) {
1473                 /*
1474                  * If an SI_TIMER entry is already queue just increment
1475                  * the overrun count.
1476                  */
1477                 if (q->info.si_code != SI_TIMER)
1478                         BUG();
1479                 q->info.si_overrun++;
1480                 goto out;
1481         }
1482         /* Short-circuit ignored signals.  */
1483         if (sig_ignored(p, sig)) {
1484                 ret = 1;
1485                 goto out;
1486         }
1487
1488         list_add_tail(&q->list, &p->pending.list);
1489         sigaddset(&p->pending.signal, sig);
1490         if (!sigismember(&p->blocked, sig))
1491                 signal_wake_up(p, sig == SIGKILL);
1492
1493 out:
1494         spin_unlock_irqrestore(&sh->siglock, flags);
1495 out_err:
1496         rcu_read_unlock();
1497
1498         return ret;
1499 }
1500
1501 int
1502 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1503 {
1504         unsigned long flags;
1505         int ret = 0;
1506
1507         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1508
1509         read_lock(&tasklist_lock);
1510         /* Since it_lock is held, p->sighand cannot be NULL. */
1511         spin_lock_irqsave(&p->sighand->siglock, flags);
1512         handle_stop_signal(sig, p);
1513
1514         /* Short-circuit ignored signals.  */
1515         if (sig_ignored(p, sig)) {
1516                 ret = 1;
1517                 goto out;
1518         }
1519
1520         if (unlikely(!list_empty(&q->list))) {
1521                 /*
1522                  * If an SI_TIMER entry is already queue just increment
1523                  * the overrun count.  Other uses should not try to
1524                  * send the signal multiple times.
1525                  */
1526                 if (q->info.si_code != SI_TIMER)
1527                         BUG();
1528                 q->info.si_overrun++;
1529                 goto out;
1530         } 
1531
1532         /*
1533          * Put this signal on the shared-pending queue.
1534          * We always use the shared queue for process-wide signals,
1535          * to avoid several races.
1536          */
1537         list_add_tail(&q->list, &p->signal->shared_pending.list);
1538         sigaddset(&p->signal->shared_pending.signal, sig);
1539
1540         __group_complete_signal(sig, p);
1541 out:
1542         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1543         read_unlock(&tasklist_lock);
1544         return ret;
1545 }
1546
1547 /*
1548  * Wake up any threads in the parent blocked in wait* syscalls.
1549  */
1550 static inline void __wake_up_parent(struct task_struct *p,
1551                                     struct task_struct *parent)
1552 {
1553         wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1554 }
1555
1556 /*
1557  * Let a parent know about the death of a child.
1558  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1559  */
1560
1561 void do_notify_parent(struct task_struct *tsk, int sig)
1562 {
1563         struct siginfo info;
1564         unsigned long flags;
1565         struct sighand_struct *psig;
1566
1567         BUG_ON(sig == -1);
1568
1569         /* do_notify_parent_cldstop should have been called instead.  */
1570         BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1571
1572         BUG_ON(!tsk->ptrace &&
1573                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1574
1575         info.si_signo = sig;
1576         info.si_errno = 0;
1577         info.si_pid = tsk->pid;
1578         info.si_uid = tsk->uid;
1579
1580         /* FIXME: find out whether or not this is supposed to be c*time. */
1581         info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1582                                                        tsk->signal->utime));
1583         info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1584                                                        tsk->signal->stime));
1585
1586         info.si_status = tsk->exit_code & 0x7f;
1587         if (tsk->exit_code & 0x80)
1588                 info.si_code = CLD_DUMPED;
1589         else if (tsk->exit_code & 0x7f)
1590                 info.si_code = CLD_KILLED;
1591         else {
1592                 info.si_code = CLD_EXITED;
1593                 info.si_status = tsk->exit_code >> 8;
1594         }
1595
1596         psig = tsk->parent->sighand;
1597         spin_lock_irqsave(&psig->siglock, flags);
1598         if (!tsk->ptrace && sig == SIGCHLD &&
1599             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1600              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1601                 /*
1602                  * We are exiting and our parent doesn't care.  POSIX.1
1603                  * defines special semantics for setting SIGCHLD to SIG_IGN
1604                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1605                  * automatically and not left for our parent's wait4 call.
1606                  * Rather than having the parent do it as a magic kind of
1607                  * signal handler, we just set this to tell do_exit that we
1608                  * can be cleaned up without becoming a zombie.  Note that
1609                  * we still call __wake_up_parent in this case, because a
1610                  * blocked sys_wait4 might now return -ECHILD.
1611                  *
1612                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1613                  * is implementation-defined: we do (if you don't want
1614                  * it, just use SIG_IGN instead).
1615                  */
1616                 tsk->exit_signal = -1;
1617                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1618                         sig = 0;
1619         }
1620         if (valid_signal(sig) && sig > 0)
1621                 __group_send_sig_info(sig, &info, tsk->parent);
1622         __wake_up_parent(tsk, tsk->parent);
1623         spin_unlock_irqrestore(&psig->siglock, flags);
1624 }
1625
1626 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1627 {
1628         struct siginfo info;
1629         unsigned long flags;
1630         struct task_struct *parent;
1631         struct sighand_struct *sighand;
1632
1633         if (to_self)
1634                 parent = tsk->parent;
1635         else {
1636                 tsk = tsk->group_leader;
1637                 parent = tsk->real_parent;
1638         }
1639
1640         info.si_signo = SIGCHLD;
1641         info.si_errno = 0;
1642         info.si_pid = tsk->pid;
1643         info.si_uid = tsk->uid;
1644
1645         /* FIXME: find out whether or not this is supposed to be c*time. */
1646         info.si_utime = cputime_to_jiffies(tsk->utime);
1647         info.si_stime = cputime_to_jiffies(tsk->stime);
1648
1649         info.si_code = why;
1650         switch (why) {
1651         case CLD_CONTINUED:
1652                 info.si_status = SIGCONT;
1653                 break;
1654         case CLD_STOPPED:
1655                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1656                 break;
1657         case CLD_TRAPPED:
1658                 info.si_status = tsk->exit_code & 0x7f;
1659                 break;
1660         default:
1661                 BUG();
1662         }
1663
1664         sighand = parent->sighand;
1665         spin_lock_irqsave(&sighand->siglock, flags);
1666         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1667             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1668                 __group_send_sig_info(SIGCHLD, &info, parent);
1669         /*
1670          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1671          */
1672         __wake_up_parent(tsk, parent);
1673         spin_unlock_irqrestore(&sighand->siglock, flags);
1674 }
1675
1676 /*
1677  * This must be called with current->sighand->siglock held.
1678  *
1679  * This should be the path for all ptrace stops.
1680  * We always set current->last_siginfo while stopped here.
1681  * That makes it a way to test a stopped process for
1682  * being ptrace-stopped vs being job-control-stopped.
1683  *
1684  * If we actually decide not to stop at all because the tracer is gone,
1685  * we leave nostop_code in current->exit_code.
1686  */
1687 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1688 {
1689         /*
1690          * If there is a group stop in progress,
1691          * we must participate in the bookkeeping.
1692          */
1693         if (current->signal->group_stop_count > 0)
1694                 --current->signal->group_stop_count;
1695
1696         current->last_siginfo = info;
1697         current->exit_code = exit_code;
1698
1699         /* Let the debugger run.  */
1700         set_current_state(TASK_TRACED);
1701         spin_unlock_irq(&current->sighand->siglock);
1702         read_lock(&tasklist_lock);
1703         if (likely(current->ptrace & PT_PTRACED) &&
1704             likely(current->parent != current->real_parent ||
1705                    !(current->ptrace & PT_ATTACHED)) &&
1706             (likely(current->parent->signal != current->signal) ||
1707              !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1708                 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1709                 read_unlock(&tasklist_lock);
1710                 schedule();
1711         } else {
1712                 /*
1713                  * By the time we got the lock, our tracer went away.
1714                  * Don't stop here.
1715                  */
1716                 read_unlock(&tasklist_lock);
1717                 set_current_state(TASK_RUNNING);
1718                 current->exit_code = nostop_code;
1719         }
1720
1721         /*
1722          * We are back.  Now reacquire the siglock before touching
1723          * last_siginfo, so that we are sure to have synchronized with
1724          * any signal-sending on another CPU that wants to examine it.
1725          */
1726         spin_lock_irq(&current->sighand->siglock);
1727         current->last_siginfo = NULL;
1728
1729         /*
1730          * Queued signals ignored us while we were stopped for tracing.
1731          * So check for any that we should take before resuming user mode.
1732          */
1733         recalc_sigpending();
1734 }
1735
1736 void ptrace_notify(int exit_code)
1737 {
1738         siginfo_t info;
1739
1740         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1741
1742         memset(&info, 0, sizeof info);
1743         info.si_signo = SIGTRAP;
1744         info.si_code = exit_code;
1745         info.si_pid = current->pid;
1746         info.si_uid = current->uid;
1747
1748         /* Let the debugger run.  */
1749         spin_lock_irq(&current->sighand->siglock);
1750         ptrace_stop(exit_code, 0, &info);
1751         spin_unlock_irq(&current->sighand->siglock);
1752 }
1753
1754 static void
1755 finish_stop(int stop_count)
1756 {
1757         int to_self;
1758
1759         /*
1760          * If there are no other threads in the group, or if there is
1761          * a group stop in progress and we are the last to stop,
1762          * report to the parent.  When ptraced, every thread reports itself.
1763          */
1764         if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1765                 to_self = 1;
1766         else if (stop_count == 0)
1767                 to_self = 0;
1768         else
1769                 goto out;
1770
1771         read_lock(&tasklist_lock);
1772         do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1773         read_unlock(&tasklist_lock);
1774
1775 out:
1776         schedule();
1777         /*
1778          * Now we don't run again until continued.
1779          */
1780         current->exit_code = 0;
1781 }
1782
1783 /*
1784  * This performs the stopping for SIGSTOP and other stop signals.
1785  * We have to stop all threads in the thread group.
1786  * Returns nonzero if we've actually stopped and released the siglock.
1787  * Returns zero if we didn't stop and still hold the siglock.
1788  */
1789 static int
1790 do_signal_stop(int signr)
1791 {
1792         struct signal_struct *sig = current->signal;
1793         struct sighand_struct *sighand = current->sighand;
1794         int stop_count = -1;
1795
1796         if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1797                 return 0;
1798
1799         if (sig->group_stop_count > 0) {
1800                 /*
1801                  * There is a group stop in progress.  We don't need to
1802                  * start another one.
1803                  */
1804                 signr = sig->group_exit_code;
1805                 stop_count = --sig->group_stop_count;
1806                 current->exit_code = signr;
1807                 set_current_state(TASK_STOPPED);
1808                 if (stop_count == 0)
1809                         sig->flags = SIGNAL_STOP_STOPPED;
1810                 spin_unlock_irq(&sighand->siglock);
1811         }
1812         else if (thread_group_empty(current)) {
1813                 /*
1814                  * Lock must be held through transition to stopped state.
1815                  */
1816                 current->exit_code = current->signal->group_exit_code = signr;
1817                 set_current_state(TASK_STOPPED);
1818                 sig->flags = SIGNAL_STOP_STOPPED;
1819                 spin_unlock_irq(&sighand->siglock);
1820         }
1821         else {
1822                 /*
1823                  * There is no group stop already in progress.
1824                  * We must initiate one now, but that requires
1825                  * dropping siglock to get both the tasklist lock
1826                  * and siglock again in the proper order.  Note that
1827                  * this allows an intervening SIGCONT to be posted.
1828                  * We need to check for that and bail out if necessary.
1829                  */
1830                 struct task_struct *t;
1831
1832                 spin_unlock_irq(&sighand->siglock);
1833
1834                 /* signals can be posted during this window */
1835
1836                 read_lock(&tasklist_lock);
1837                 spin_lock_irq(&sighand->siglock);
1838
1839                 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1840                         /*
1841                          * Another stop or continue happened while we
1842                          * didn't have the lock.  We can just swallow this
1843                          * signal now.  If we raced with a SIGCONT, that
1844                          * should have just cleared it now.  If we raced
1845                          * with another processor delivering a stop signal,
1846                          * then the SIGCONT that wakes us up should clear it.
1847                          */
1848                         read_unlock(&tasklist_lock);
1849                         return 0;
1850                 }
1851
1852                 if (sig->group_stop_count == 0) {
1853                         sig->group_exit_code = signr;
1854                         stop_count = 0;
1855                         for (t = next_thread(current); t != current;
1856                              t = next_thread(t))
1857                                 /*
1858                                  * Setting state to TASK_STOPPED for a group
1859                                  * stop is always done with the siglock held,
1860                                  * so this check has no races.
1861                                  */
1862                                 if (!t->exit_state &&
1863                                     !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1864                                         stop_count++;
1865                                         signal_wake_up(t, 0);
1866                                 }
1867                         sig->group_stop_count = stop_count;
1868                 }
1869                 else {
1870                         /* A race with another thread while unlocked.  */
1871                         signr = sig->group_exit_code;
1872                         stop_count = --sig->group_stop_count;
1873                 }
1874
1875                 current->exit_code = signr;
1876                 set_current_state(TASK_STOPPED);
1877                 if (stop_count == 0)
1878                         sig->flags = SIGNAL_STOP_STOPPED;
1879
1880                 spin_unlock_irq(&sighand->siglock);
1881                 read_unlock(&tasklist_lock);
1882         }
1883
1884         finish_stop(stop_count);
1885         return 1;
1886 }
1887
1888 /*
1889  * Do appropriate magic when group_stop_count > 0.
1890  * We return nonzero if we stopped, after releasing the siglock.
1891  * We return zero if we still hold the siglock and should look
1892  * for another signal without checking group_stop_count again.
1893  */
1894 static int handle_group_stop(void)
1895 {
1896         int stop_count;
1897
1898         if (current->signal->group_exit_task == current) {
1899                 /*
1900                  * Group stop is so we can do a core dump,
1901                  * We are the initiating thread, so get on with it.
1902                  */
1903                 current->signal->group_exit_task = NULL;
1904                 return 0;
1905         }
1906
1907         if (current->signal->flags & SIGNAL_GROUP_EXIT)
1908                 /*
1909                  * Group stop is so another thread can do a core dump,
1910                  * or else we are racing against a death signal.
1911                  * Just punt the stop so we can get the next signal.
1912                  */
1913                 return 0;
1914
1915         /*
1916          * There is a group stop in progress.  We stop
1917          * without any associated signal being in our queue.
1918          */
1919         stop_count = --current->signal->group_stop_count;
1920         if (stop_count == 0)
1921                 current->signal->flags = SIGNAL_STOP_STOPPED;
1922         current->exit_code = current->signal->group_exit_code;
1923         set_current_state(TASK_STOPPED);
1924         spin_unlock_irq(&current->sighand->siglock);
1925         finish_stop(stop_count);
1926         return 1;
1927 }
1928
1929 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1930                           struct pt_regs *regs, void *cookie)
1931 {
1932         sigset_t *mask = &current->blocked;
1933         int signr = 0;
1934
1935         try_to_freeze();
1936
1937 relock:
1938         spin_lock_irq(&current->sighand->siglock);
1939         for (;;) {
1940                 struct k_sigaction *ka;
1941
1942                 if (unlikely(current->signal->group_stop_count > 0) &&
1943                     handle_group_stop())
1944                         goto relock;
1945
1946                 signr = dequeue_signal(current, mask, info);
1947
1948                 if (!signr)
1949                         break; /* will return 0 */
1950
1951                 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1952                         ptrace_signal_deliver(regs, cookie);
1953
1954                         /* Let the debugger run.  */
1955                         ptrace_stop(signr, signr, info);
1956
1957                         /* We're back.  Did the debugger cancel the sig or group_exit? */
1958                         signr = current->exit_code;
1959                         if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1960                                 continue;
1961
1962                         current->exit_code = 0;
1963
1964                         /* Update the siginfo structure if the signal has
1965                            changed.  If the debugger wanted something
1966                            specific in the siginfo structure then it should
1967                            have updated *info via PTRACE_SETSIGINFO.  */
1968                         if (signr != info->si_signo) {
1969                                 info->si_signo = signr;
1970                                 info->si_errno = 0;
1971                                 info->si_code = SI_USER;
1972                                 info->si_pid = current->parent->pid;
1973                                 info->si_uid = current->parent->uid;
1974                         }
1975
1976                         /* If the (new) signal is now blocked, requeue it.  */
1977                         if (sigismember(&current->blocked, signr)) {
1978                                 specific_send_sig_info(signr, info, current);
1979                                 continue;
1980                         }
1981                 }
1982
1983                 ka = &current->sighand->action[signr-1];
1984                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1985                         continue;
1986                 if (ka->sa.sa_handler != SIG_DFL) {
1987                         /* Run the handler.  */
1988                         *return_ka = *ka;
1989
1990                         if (ka->sa.sa_flags & SA_ONESHOT)
1991                                 ka->sa.sa_handler = SIG_DFL;
1992
1993                         break; /* will return non-zero "signr" value */
1994                 }
1995
1996                 /*
1997                  * Now we are doing the default action for this signal.
1998                  */
1999                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2000                         continue;
2001
2002                 /* Init gets no signals it doesn't want.  */
2003                 if (current == child_reaper)
2004                         continue;
2005
2006                 if (sig_kernel_stop(signr)) {
2007                         /*
2008                          * The default action is to stop all threads in
2009                          * the thread group.  The job control signals
2010                          * do nothing in an orphaned pgrp, but SIGSTOP
2011                          * always works.  Note that siglock needs to be
2012                          * dropped during the call to is_orphaned_pgrp()
2013                          * because of lock ordering with tasklist_lock.
2014                          * This allows an intervening SIGCONT to be posted.
2015                          * We need to check for that and bail out if necessary.
2016                          */
2017                         if (signr != SIGSTOP) {
2018                                 spin_unlock_irq(&current->sighand->siglock);
2019
2020                                 /* signals can be posted during this window */
2021
2022                                 if (is_orphaned_pgrp(process_group(current)))
2023                                         goto relock;
2024
2025                                 spin_lock_irq(&current->sighand->siglock);
2026                         }
2027
2028                         if (likely(do_signal_stop(signr))) {
2029                                 /* It released the siglock.  */
2030                                 goto relock;
2031                         }
2032
2033                         /*
2034                          * We didn't actually stop, due to a race
2035                          * with SIGCONT or something like that.
2036                          */
2037                         continue;
2038                 }
2039
2040                 spin_unlock_irq(&current->sighand->siglock);
2041
2042                 /*
2043                  * Anything else is fatal, maybe with a core dump.
2044                  */
2045                 current->flags |= PF_SIGNALED;
2046                 if (sig_kernel_coredump(signr)) {
2047                         /*
2048                          * If it was able to dump core, this kills all
2049                          * other threads in the group and synchronizes with
2050                          * their demise.  If we lost the race with another
2051                          * thread getting here, it set group_exit_code
2052                          * first and our do_group_exit call below will use
2053                          * that value and ignore the one we pass it.
2054                          */
2055                         do_coredump((long)signr, signr, regs);
2056                 }
2057
2058                 /*
2059                  * Death signals, no core dump.
2060                  */
2061                 do_group_exit(signr);
2062                 /* NOTREACHED */
2063         }
2064         spin_unlock_irq(&current->sighand->siglock);
2065         return signr;
2066 }
2067
2068 EXPORT_SYMBOL(recalc_sigpending);
2069 EXPORT_SYMBOL_GPL(dequeue_signal);
2070 EXPORT_SYMBOL(flush_signals);
2071 EXPORT_SYMBOL(force_sig);
2072 EXPORT_SYMBOL(kill_pg);
2073 EXPORT_SYMBOL(kill_proc);
2074 EXPORT_SYMBOL(ptrace_notify);
2075 EXPORT_SYMBOL(send_sig);
2076 EXPORT_SYMBOL(send_sig_info);
2077 EXPORT_SYMBOL(sigprocmask);
2078 EXPORT_SYMBOL(block_all_signals);
2079 EXPORT_SYMBOL(unblock_all_signals);
2080
2081
2082 /*
2083  * System call entry points.
2084  */
2085
2086 asmlinkage long sys_restart_syscall(void)
2087 {
2088         struct restart_block *restart = &current_thread_info()->restart_block;
2089         return restart->fn(restart);
2090 }
2091
2092 long do_no_restart_syscall(struct restart_block *param)
2093 {
2094         return -EINTR;
2095 }
2096
2097 /*
2098  * We don't need to get the kernel lock - this is all local to this
2099  * particular thread.. (and that's good, because this is _heavily_
2100  * used by various programs)
2101  */
2102
2103 /*
2104  * This is also useful for kernel threads that want to temporarily
2105  * (or permanently) block certain signals.
2106  *
2107  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2108  * interface happily blocks "unblockable" signals like SIGKILL
2109  * and friends.
2110  */
2111 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2112 {
2113         int error;
2114
2115         spin_lock_irq(&current->sighand->siglock);
2116         if (oldset)
2117                 *oldset = current->blocked;
2118
2119         error = 0;
2120         switch (how) {
2121         case SIG_BLOCK:
2122                 sigorsets(&current->blocked, &current->blocked, set);
2123                 break;
2124         case SIG_UNBLOCK:
2125                 signandsets(&current->blocked, &current->blocked, set);
2126                 break;
2127         case SIG_SETMASK:
2128                 current->blocked = *set;
2129                 break;
2130         default:
2131                 error = -EINVAL;
2132         }
2133         recalc_sigpending();
2134         spin_unlock_irq(&current->sighand->siglock);
2135
2136         return error;
2137 }
2138
2139 asmlinkage long
2140 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2141 {
2142         int error = -EINVAL;
2143         sigset_t old_set, new_set;
2144
2145         /* XXX: Don't preclude handling different sized sigset_t's.  */
2146         if (sigsetsize != sizeof(sigset_t))
2147                 goto out;
2148
2149         if (set) {
2150                 error = -EFAULT;
2151                 if (copy_from_user(&new_set, set, sizeof(*set)))
2152                         goto out;
2153                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2154
2155                 error = sigprocmask(how, &new_set, &old_set);
2156                 if (error)
2157                         goto out;
2158                 if (oset)
2159                         goto set_old;
2160         } else if (oset) {
2161                 spin_lock_irq(&current->sighand->siglock);
2162                 old_set = current->blocked;
2163                 spin_unlock_irq(&current->sighand->siglock);
2164
2165         set_old:
2166                 error = -EFAULT;
2167                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2168                         goto out;
2169         }
2170         error = 0;
2171 out:
2172         return error;
2173 }
2174
2175 long do_sigpending(void __user *set, unsigned long sigsetsize)
2176 {
2177         long error = -EINVAL;
2178         sigset_t pending;
2179
2180         if (sigsetsize > sizeof(sigset_t))
2181                 goto out;
2182
2183         spin_lock_irq(&current->sighand->siglock);
2184         sigorsets(&pending, &current->pending.signal,
2185                   &current->signal->shared_pending.signal);
2186         spin_unlock_irq(&current->sighand->siglock);
2187
2188         /* Outside the lock because only this thread touches it.  */
2189         sigandsets(&pending, &current->blocked, &pending);
2190
2191         error = -EFAULT;
2192         if (!copy_to_user(set, &pending, sigsetsize))
2193                 error = 0;
2194
2195 out:
2196         return error;
2197 }       
2198
2199 asmlinkage long
2200 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2201 {
2202         return do_sigpending(set, sigsetsize);
2203 }
2204
2205 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2206
2207 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2208 {
2209         int err;
2210
2211         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2212                 return -EFAULT;
2213         if (from->si_code < 0)
2214                 return __copy_to_user(to, from, sizeof(siginfo_t))
2215                         ? -EFAULT : 0;
2216         /*
2217          * If you change siginfo_t structure, please be sure
2218          * this code is fixed accordingly.
2219          * It should never copy any pad contained in the structure
2220          * to avoid security leaks, but must copy the generic
2221          * 3 ints plus the relevant union member.
2222          */
2223         err = __put_user(from->si_signo, &to->si_signo);
2224         err |= __put_user(from->si_errno, &to->si_errno);
2225         err |= __put_user((short)from->si_code, &to->si_code);
2226         switch (from->si_code & __SI_MASK) {
2227         case __SI_KILL:
2228                 err |= __put_user(from->si_pid, &to->si_pid);
2229                 err |= __put_user(from->si_uid, &to->si_uid);
2230                 break;
2231         case __SI_TIMER:
2232                  err |= __put_user(from->si_tid, &to->si_tid);
2233                  err |= __put_user(from->si_overrun, &to->si_overrun);
2234                  err |= __put_user(from->si_ptr, &to->si_ptr);
2235                 break;
2236         case __SI_POLL:
2237                 err |= __put_user(from->si_band, &to->si_band);
2238                 err |= __put_user(from->si_fd, &to->si_fd);
2239                 break;
2240         case __SI_FAULT:
2241                 err |= __put_user(from->si_addr, &to->si_addr);
2242 #ifdef __ARCH_SI_TRAPNO
2243                 err |= __put_user(from->si_trapno, &to->si_trapno);
2244 #endif
2245                 break;
2246         case __SI_CHLD:
2247                 err |= __put_user(from->si_pid, &to->si_pid);
2248                 err |= __put_user(from->si_uid, &to->si_uid);
2249                 err |= __put_user(from->si_status, &to->si_status);
2250                 err |= __put_user(from->si_utime, &to->si_utime);
2251                 err |= __put_user(from->si_stime, &to->si_stime);
2252                 break;
2253         case __SI_RT: /* This is not generated by the kernel as of now. */
2254         case __SI_MESGQ: /* But this is */
2255                 err |= __put_user(from->si_pid, &to->si_pid);
2256                 err |= __put_user(from->si_uid, &to->si_uid);
2257                 err |= __put_user(from->si_ptr, &to->si_ptr);
2258                 break;
2259         default: /* this is just in case for now ... */
2260                 err |= __put_user(from->si_pid, &to->si_pid);
2261                 err |= __put_user(from->si_uid, &to->si_uid);
2262                 break;
2263         }
2264         return err;
2265 }
2266
2267 #endif
2268
2269 asmlinkage long
2270 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2271                     siginfo_t __user *uinfo,
2272                     const struct timespec __user *uts,
2273                     size_t sigsetsize)
2274 {
2275         int ret, sig;
2276         sigset_t these;
2277         struct timespec ts;
2278         siginfo_t info;
2279         long timeout = 0;
2280
2281         /* XXX: Don't preclude handling different sized sigset_t's.  */
2282         if (sigsetsize != sizeof(sigset_t))
2283                 return -EINVAL;
2284
2285         if (copy_from_user(&these, uthese, sizeof(these)))
2286                 return -EFAULT;
2287                 
2288         /*
2289          * Invert the set of allowed signals to get those we
2290          * want to block.
2291          */
2292         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2293         signotset(&these);
2294
2295         if (uts) {
2296                 if (copy_from_user(&ts, uts, sizeof(ts)))
2297                         return -EFAULT;
2298                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2299                     || ts.tv_sec < 0)
2300                         return -EINVAL;
2301         }
2302
2303         spin_lock_irq(&current->sighand->siglock);
2304         sig = dequeue_signal(current, &these, &info);
2305         if (!sig) {
2306                 timeout = MAX_SCHEDULE_TIMEOUT;
2307                 if (uts)
2308                         timeout = (timespec_to_jiffies(&ts)
2309                                    + (ts.tv_sec || ts.tv_nsec));
2310
2311                 if (timeout) {
2312                         /* None ready -- temporarily unblock those we're
2313                          * interested while we are sleeping in so that we'll
2314                          * be awakened when they arrive.  */
2315                         current->real_blocked = current->blocked;
2316                         sigandsets(&current->blocked, &current->blocked, &these);
2317                         recalc_sigpending();
2318                         spin_unlock_irq(&current->sighand->siglock);
2319
2320                         timeout = schedule_timeout_interruptible(timeout);
2321
2322                         spin_lock_irq(&current->sighand->siglock);
2323                         sig = dequeue_signal(current, &these, &info);
2324                         current->blocked = current->real_blocked;
2325                         siginitset(&current->real_blocked, 0);
2326                         recalc_sigpending();
2327                 }
2328         }
2329         spin_unlock_irq(&current->sighand->siglock);
2330
2331         if (sig) {
2332                 ret = sig;
2333                 if (uinfo) {
2334                         if (copy_siginfo_to_user(uinfo, &info))
2335                                 ret = -EFAULT;
2336                 }
2337         } else {
2338                 ret = -EAGAIN;
2339                 if (timeout)
2340                         ret = -EINTR;
2341         }
2342
2343         return ret;
2344 }
2345
2346 asmlinkage long
2347 sys_kill(int pid, int sig)
2348 {
2349         struct siginfo info;
2350
2351         info.si_signo = sig;
2352         info.si_errno = 0;
2353         info.si_code = SI_USER;
2354         info.si_pid = current->tgid;
2355         info.si_uid = current->uid;
2356
2357         return kill_something_info(sig, &info, pid);
2358 }
2359
2360 static int do_tkill(int tgid, int pid, int sig)
2361 {
2362         int error;
2363         struct siginfo info;
2364         struct task_struct *p;
2365
2366         error = -ESRCH;
2367         info.si_signo = sig;
2368         info.si_errno = 0;
2369         info.si_code = SI_TKILL;
2370         info.si_pid = current->tgid;
2371         info.si_uid = current->uid;
2372
2373         read_lock(&tasklist_lock);
2374         p = find_task_by_pid(pid);
2375         if (p && (tgid <= 0 || p->tgid == tgid)) {
2376                 error = check_kill_permission(sig, &info, p);
2377                 /*
2378                  * The null signal is a permissions and process existence
2379                  * probe.  No signal is actually delivered.
2380                  */
2381                 if (!error && sig && p->sighand) {
2382                         spin_lock_irq(&p->sighand->siglock);
2383                         handle_stop_signal(sig, p);
2384                         error = specific_send_sig_info(sig, &info, p);
2385                         spin_unlock_irq(&p->sighand->siglock);
2386                 }
2387         }
2388         read_unlock(&tasklist_lock);
2389
2390         return error;
2391 }
2392
2393 /**
2394  *  sys_tgkill - send signal to one specific thread
2395  *  @tgid: the thread group ID of the thread
2396  *  @pid: the PID of the thread
2397  *  @sig: signal to be sent
2398  *
2399  *  This syscall also checks the tgid and returns -ESRCH even if the PID
2400  *  exists but it's not belonging to the target process anymore. This
2401  *  method solves the problem of threads exiting and PIDs getting reused.
2402  */
2403 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2404 {
2405         /* This is only valid for single tasks */
2406         if (pid <= 0 || tgid <= 0)
2407                 return -EINVAL;
2408
2409         return do_tkill(tgid, pid, sig);
2410 }
2411
2412 /*
2413  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2414  */
2415 asmlinkage long
2416 sys_tkill(int pid, int sig)
2417 {
2418         /* This is only valid for single tasks */
2419         if (pid <= 0)
2420                 return -EINVAL;
2421
2422         return do_tkill(0, pid, sig);
2423 }
2424
2425 asmlinkage long
2426 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2427 {
2428         siginfo_t info;
2429
2430         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2431                 return -EFAULT;
2432
2433         /* Not even root can pretend to send signals from the kernel.
2434            Nor can they impersonate a kill(), which adds source info.  */
2435         if (info.si_code >= 0)
2436                 return -EPERM;
2437         info.si_signo = sig;
2438
2439         /* POSIX.1b doesn't mention process groups.  */
2440         return kill_proc_info(sig, &info, pid);
2441 }
2442
2443 int
2444 do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2445 {
2446         struct k_sigaction *k;
2447         sigset_t mask;
2448
2449         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2450                 return -EINVAL;
2451
2452         k = &current->sighand->action[sig-1];
2453
2454         spin_lock_irq(&current->sighand->siglock);
2455         if (signal_pending(current)) {
2456                 /*
2457                  * If there might be a fatal signal pending on multiple
2458                  * threads, make sure we take it before changing the action.
2459                  */
2460                 spin_unlock_irq(&current->sighand->siglock);
2461                 return -ERESTARTNOINTR;
2462         }
2463
2464         if (oact)
2465                 *oact = *k;
2466
2467         if (act) {
2468                 sigdelsetmask(&act->sa.sa_mask,
2469                               sigmask(SIGKILL) | sigmask(SIGSTOP));
2470                 /*
2471                  * POSIX 3.3.1.3:
2472                  *  "Setting a signal action to SIG_IGN for a signal that is
2473                  *   pending shall cause the pending signal to be discarded,
2474                  *   whether or not it is blocked."
2475                  *
2476                  *  "Setting a signal action to SIG_DFL for a signal that is
2477                  *   pending and whose default action is to ignore the signal
2478                  *   (for example, SIGCHLD), shall cause the pending signal to
2479                  *   be discarded, whether or not it is blocked"
2480                  */
2481                 if (act->sa.sa_handler == SIG_IGN ||
2482                     (act->sa.sa_handler == SIG_DFL &&
2483                      sig_kernel_ignore(sig))) {
2484                         /*
2485                          * This is a fairly rare case, so we only take the
2486                          * tasklist_lock once we're sure we'll need it.
2487                          * Now we must do this little unlock and relock
2488                          * dance to maintain the lock hierarchy.
2489                          */
2490                         struct task_struct *t = current;
2491                         spin_unlock_irq(&t->sighand->siglock);
2492                         read_lock(&tasklist_lock);
2493                         spin_lock_irq(&t->sighand->siglock);
2494                         *k = *act;
2495                         sigemptyset(&mask);
2496                         sigaddset(&mask, sig);
2497                         rm_from_queue_full(&mask, &t->signal->shared_pending);
2498                         do {
2499                                 rm_from_queue_full(&mask, &t->pending);
2500                                 recalc_sigpending_tsk(t);
2501                                 t = next_thread(t);
2502                         } while (t != current);
2503                         spin_unlock_irq(&current->sighand->siglock);
2504                         read_unlock(&tasklist_lock);
2505                         return 0;
2506                 }
2507
2508                 *k = *act;
2509         }
2510
2511         spin_unlock_irq(&current->sighand->siglock);
2512         return 0;
2513 }
2514
2515 int 
2516 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2517 {
2518         stack_t oss;
2519         int error;
2520
2521         if (uoss) {
2522                 oss.ss_sp = (void __user *) current->sas_ss_sp;
2523                 oss.ss_size = current->sas_ss_size;
2524                 oss.ss_flags = sas_ss_flags(sp);
2525         }
2526
2527         if (uss) {
2528                 void __user *ss_sp;
2529                 size_t ss_size;
2530                 int ss_flags;
2531
2532                 error = -EFAULT;
2533                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2534                     || __get_user(ss_sp, &uss->ss_sp)
2535                     || __get_user(ss_flags, &uss->ss_flags)
2536                     || __get_user(ss_size, &uss->ss_size))
2537                         goto out;
2538
2539                 error = -EPERM;
2540                 if (on_sig_stack(sp))
2541                         goto out;
2542
2543                 error = -EINVAL;
2544                 /*
2545                  *
2546                  * Note - this code used to test ss_flags incorrectly
2547                  *        old code may have been written using ss_flags==0
2548                  *        to mean ss_flags==SS_ONSTACK (as this was the only
2549                  *        way that worked) - this fix preserves that older
2550                  *        mechanism
2551                  */
2552                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2553                         goto out;
2554
2555                 if (ss_flags == SS_DISABLE) {
2556                         ss_size = 0;
2557                         ss_sp = NULL;
2558                 } else {
2559                         error = -ENOMEM;
2560                         if (ss_size < MINSIGSTKSZ)
2561                                 goto out;
2562                 }
2563
2564                 current->sas_ss_sp = (unsigned long) ss_sp;
2565                 current->sas_ss_size = ss_size;
2566         }
2567
2568         if (uoss) {
2569                 error = -EFAULT;
2570                 if (copy_to_user(uoss, &oss, sizeof(oss)))
2571                         goto out;
2572         }
2573
2574         error = 0;
2575 out:
2576         return error;
2577 }
2578
2579 #ifdef __ARCH_WANT_SYS_SIGPENDING
2580
2581 asmlinkage long
2582 sys_sigpending(old_sigset_t __user *set)
2583 {
2584         return do_sigpending(set, sizeof(*set));
2585 }
2586
2587 #endif
2588
2589 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2590 /* Some platforms have their own version with special arguments others
2591    support only sys_rt_sigprocmask.  */
2592
2593 asmlinkage long
2594 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2595 {
2596         int error;
2597         old_sigset_t old_set, new_set;
2598
2599         if (set) {
2600                 error = -EFAULT;
2601                 if (copy_from_user(&new_set, set, sizeof(*set)))
2602                         goto out;
2603                 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2604
2605                 spin_lock_irq(&current->sighand->siglock);
2606                 old_set = current->blocked.sig[0];
2607
2608                 error = 0;
2609                 switch (how) {
2610                 default:
2611                         error = -EINVAL;
2612                         break;
2613                 case SIG_BLOCK:
2614                         sigaddsetmask(&current->blocked, new_set);
2615                         break;
2616                 case SIG_UNBLOCK:
2617                         sigdelsetmask(&current->blocked, new_set);
2618                         break;
2619                 case SIG_SETMASK:
2620                         current->blocked.sig[0] = new_set;
2621                         break;
2622                 }
2623
2624                 recalc_sigpending();
2625                 spin_unlock_irq(&current->sighand->siglock);
2626                 if (error)
2627                         goto out;
2628                 if (oset)
2629                         goto set_old;
2630         } else if (oset) {
2631                 old_set = current->blocked.sig[0];
2632         set_old:
2633                 error = -EFAULT;
2634                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2635                         goto out;
2636         }
2637         error = 0;
2638 out:
2639         return error;
2640 }
2641 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2642
2643 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2644 asmlinkage long
2645 sys_rt_sigaction(int sig,
2646                  const struct sigaction __user *act,
2647                  struct sigaction __user *oact,
2648                  size_t sigsetsize)
2649 {
2650         struct k_sigaction new_sa, old_sa;
2651         int ret = -EINVAL;
2652
2653         /* XXX: Don't preclude handling different sized sigset_t's.  */
2654         if (sigsetsize != sizeof(sigset_t))
2655                 goto out;
2656
2657         if (act) {
2658                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2659                         return -EFAULT;
2660         }
2661
2662         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2663
2664         if (!ret && oact) {
2665                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2666                         return -EFAULT;
2667         }
2668 out:
2669         return ret;
2670 }
2671 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2672
2673 #ifdef __ARCH_WANT_SYS_SGETMASK
2674
2675 /*
2676  * For backwards compatibility.  Functionality superseded by sigprocmask.
2677  */
2678 asmlinkage long
2679 sys_sgetmask(void)
2680 {
2681         /* SMP safe */
2682         return current->blocked.sig[0];
2683 }
2684
2685 asmlinkage long
2686 sys_ssetmask(int newmask)
2687 {
2688         int old;
2689
2690         spin_lock_irq(&current->sighand->siglock);
2691         old = current->blocked.sig[0];
2692
2693         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2694                                                   sigmask(SIGSTOP)));
2695         recalc_sigpending();
2696         spin_unlock_irq(&current->sighand->siglock);
2697
2698         return old;
2699 }
2700 #endif /* __ARCH_WANT_SGETMASK */
2701
2702 #ifdef __ARCH_WANT_SYS_SIGNAL
2703 /*
2704  * For backwards compatibility.  Functionality superseded by sigaction.
2705  */
2706 asmlinkage unsigned long
2707 sys_signal(int sig, __sighandler_t handler)
2708 {
2709         struct k_sigaction new_sa, old_sa;
2710         int ret;
2711
2712         new_sa.sa.sa_handler = handler;
2713         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2714         sigemptyset(&new_sa.sa.sa_mask);
2715
2716         ret = do_sigaction(sig, &new_sa, &old_sa);
2717
2718         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2719 }
2720 #endif /* __ARCH_WANT_SYS_SIGNAL */
2721
2722 #ifdef __ARCH_WANT_SYS_PAUSE
2723
2724 asmlinkage long
2725 sys_pause(void)
2726 {
2727         current->state = TASK_INTERRUPTIBLE;
2728         schedule();
2729         return -ERESTARTNOHAND;
2730 }
2731
2732 #endif
2733
2734 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2735 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2736 {
2737         sigset_t newset;
2738
2739         /* XXX: Don't preclude handling different sized sigset_t's.  */
2740         if (sigsetsize != sizeof(sigset_t))
2741                 return -EINVAL;
2742
2743         if (copy_from_user(&newset, unewset, sizeof(newset)))
2744                 return -EFAULT;
2745         sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2746
2747         spin_lock_irq(&current->sighand->siglock);
2748         current->saved_sigmask = current->blocked;
2749         current->blocked = newset;
2750         recalc_sigpending();
2751         spin_unlock_irq(&current->sighand->siglock);
2752
2753         current->state = TASK_INTERRUPTIBLE;
2754         schedule();
2755         set_thread_flag(TIF_RESTORE_SIGMASK);
2756         return -ERESTARTNOHAND;
2757 }
2758 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2759
2760 void __init signals_init(void)
2761 {
2762         sigqueue_cachep =
2763                 kmem_cache_create("sigqueue",
2764                                   sizeof(struct sigqueue),
2765                                   __alignof__(struct sigqueue),
2766                                   SLAB_PANIC, NULL, NULL);
2767 }