[PATCH] md: Change case of raid level reported in sys/mdX/md/level
[safe/jmp/linux-2.6] / kernel / signal.c
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *              Changes to use preallocated sigqueue structures
10  *              to allow signals to be sent reliably.
11  */
12
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
32
33 /*
34  * SLAB caches for signal bits.
35  */
36
37 static kmem_cache_t *sigqueue_cachep;
38
39 /*
40  * In POSIX a signal is sent either to a specific thread (Linux task)
41  * or to the process as a whole (Linux thread group).  How the signal
42  * is sent determines whether it's to one thread or the whole group,
43  * which determines which signal mask(s) are involved in blocking it
44  * from being delivered until later.  When the signal is delivered,
45  * either it's caught or ignored by a user handler or it has a default
46  * effect that applies to the whole thread group (POSIX process).
47  *
48  * The possible effects an unblocked signal set to SIG_DFL can have are:
49  *   ignore     - Nothing Happens
50  *   terminate  - kill the process, i.e. all threads in the group,
51  *                similar to exit_group.  The group leader (only) reports
52  *                WIFSIGNALED status to its parent.
53  *   coredump   - write a core dump file describing all threads using
54  *                the same mm and then kill all those threads
55  *   stop       - stop all the threads in the group, i.e. TASK_STOPPED state
56  *
57  * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58  * Other signals when not blocked and set to SIG_DFL behaves as follows.
59  * The job control signals also have other special effects.
60  *
61  *      +--------------------+------------------+
62  *      |  POSIX signal      |  default action  |
63  *      +--------------------+------------------+
64  *      |  SIGHUP            |  terminate       |
65  *      |  SIGINT            |  terminate       |
66  *      |  SIGQUIT           |  coredump        |
67  *      |  SIGILL            |  coredump        |
68  *      |  SIGTRAP           |  coredump        |
69  *      |  SIGABRT/SIGIOT    |  coredump        |
70  *      |  SIGBUS            |  coredump        |
71  *      |  SIGFPE            |  coredump        |
72  *      |  SIGKILL           |  terminate(+)    |
73  *      |  SIGUSR1           |  terminate       |
74  *      |  SIGSEGV           |  coredump        |
75  *      |  SIGUSR2           |  terminate       |
76  *      |  SIGPIPE           |  terminate       |
77  *      |  SIGALRM           |  terminate       |
78  *      |  SIGTERM           |  terminate       |
79  *      |  SIGCHLD           |  ignore          |
80  *      |  SIGCONT           |  ignore(*)       |
81  *      |  SIGSTOP           |  stop(*)(+)      |
82  *      |  SIGTSTP           |  stop(*)         |
83  *      |  SIGTTIN           |  stop(*)         |
84  *      |  SIGTTOU           |  stop(*)         |
85  *      |  SIGURG            |  ignore          |
86  *      |  SIGXCPU           |  coredump        |
87  *      |  SIGXFSZ           |  coredump        |
88  *      |  SIGVTALRM         |  terminate       |
89  *      |  SIGPROF           |  terminate       |
90  *      |  SIGPOLL/SIGIO     |  terminate       |
91  *      |  SIGSYS/SIGUNUSED  |  coredump        |
92  *      |  SIGSTKFLT         |  terminate       |
93  *      |  SIGWINCH          |  ignore          |
94  *      |  SIGPWR            |  terminate       |
95  *      |  SIGRTMIN-SIGRTMAX |  terminate       |
96  *      +--------------------+------------------+
97  *      |  non-POSIX signal  |  default action  |
98  *      +--------------------+------------------+
99  *      |  SIGEMT            |  coredump        |
100  *      +--------------------+------------------+
101  *
102  * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103  * (*) Special job control effects:
104  * When SIGCONT is sent, it resumes the process (all threads in the group)
105  * from TASK_STOPPED state and also clears any pending/queued stop signals
106  * (any of those marked with "stop(*)").  This happens regardless of blocking,
107  * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
108  * any pending/queued SIGCONT signals; this happens regardless of blocking,
109  * catching, or ignored the stop signal, though (except for SIGSTOP) the
110  * default action of stopping the process may happen later or never.
111  */
112
113 #ifdef SIGEMT
114 #define M_SIGEMT        M(SIGEMT)
115 #else
116 #define M_SIGEMT        0
117 #endif
118
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
121 #else
122 #define M(sig) (1UL << ((sig)-1))
123 #endif
124 #define T(sig, mask) (M(sig) & (mask))
125
126 #define SIG_KERNEL_ONLY_MASK (\
127         M(SIGKILL)   |  M(SIGSTOP)                                   )
128
129 #define SIG_KERNEL_STOP_MASK (\
130         M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
131
132 #define SIG_KERNEL_COREDUMP_MASK (\
133         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
134         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
135         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
136
137 #define SIG_KERNEL_IGNORE_MASK (\
138         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
139
140 #define sig_kernel_only(sig) \
141                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
148
149 #define sig_user_defined(t, signr) \
150         (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
151          ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152
153 #define sig_fatal(t, signr) \
154         (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155          (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156
157 static int sig_ignored(struct task_struct *t, int sig)
158 {
159         void __user * handler;
160
161         /*
162          * Tracers always want to know about signals..
163          */
164         if (t->ptrace & PT_PTRACED)
165                 return 0;
166
167         /*
168          * Blocked signals are never ignored, since the
169          * signal handler may change by the time it is
170          * unblocked.
171          */
172         if (sigismember(&t->blocked, sig))
173                 return 0;
174
175         /* Is it explicitly or implicitly ignored? */
176         handler = t->sighand->action[sig-1].sa.sa_handler;
177         return   handler == SIG_IGN ||
178                 (handler == SIG_DFL && sig_kernel_ignore(sig));
179 }
180
181 /*
182  * Re-calculate pending state from the set of locally pending
183  * signals, globally pending signals, and blocked signals.
184  */
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
186 {
187         unsigned long ready;
188         long i;
189
190         switch (_NSIG_WORDS) {
191         default:
192                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193                         ready |= signal->sig[i] &~ blocked->sig[i];
194                 break;
195
196         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
197                 ready |= signal->sig[2] &~ blocked->sig[2];
198                 ready |= signal->sig[1] &~ blocked->sig[1];
199                 ready |= signal->sig[0] &~ blocked->sig[0];
200                 break;
201
202         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
203                 ready |= signal->sig[0] &~ blocked->sig[0];
204                 break;
205
206         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
207         }
208         return ready != 0;
209 }
210
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 {
215         if (t->signal->group_stop_count > 0 ||
216             (freezing(t)) ||
217             PENDING(&t->pending, &t->blocked) ||
218             PENDING(&t->signal->shared_pending, &t->blocked))
219                 set_tsk_thread_flag(t, TIF_SIGPENDING);
220         else
221                 clear_tsk_thread_flag(t, TIF_SIGPENDING);
222 }
223
224 void recalc_sigpending(void)
225 {
226         recalc_sigpending_tsk(current);
227 }
228
229 /* Given the mask, find the first available signal that should be serviced. */
230
231 static int
232 next_signal(struct sigpending *pending, sigset_t *mask)
233 {
234         unsigned long i, *s, *m, x;
235         int sig = 0;
236         
237         s = pending->signal.sig;
238         m = mask->sig;
239         switch (_NSIG_WORDS) {
240         default:
241                 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242                         if ((x = *s &~ *m) != 0) {
243                                 sig = ffz(~x) + i*_NSIG_BPW + 1;
244                                 break;
245                         }
246                 break;
247
248         case 2: if ((x = s[0] &~ m[0]) != 0)
249                         sig = 1;
250                 else if ((x = s[1] &~ m[1]) != 0)
251                         sig = _NSIG_BPW + 1;
252                 else
253                         break;
254                 sig += ffz(~x);
255                 break;
256
257         case 1: if ((x = *s &~ *m) != 0)
258                         sig = ffz(~x) + 1;
259                 break;
260         }
261         
262         return sig;
263 }
264
265 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
266                                          int override_rlimit)
267 {
268         struct sigqueue *q = NULL;
269
270         atomic_inc(&t->user->sigpending);
271         if (override_rlimit ||
272             atomic_read(&t->user->sigpending) <=
273                         t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274                 q = kmem_cache_alloc(sigqueue_cachep, flags);
275         if (unlikely(q == NULL)) {
276                 atomic_dec(&t->user->sigpending);
277         } else {
278                 INIT_LIST_HEAD(&q->list);
279                 q->flags = 0;
280                 q->user = get_uid(t->user);
281         }
282         return(q);
283 }
284
285 static inline void __sigqueue_free(struct sigqueue *q)
286 {
287         if (q->flags & SIGQUEUE_PREALLOC)
288                 return;
289         atomic_dec(&q->user->sigpending);
290         free_uid(q->user);
291         kmem_cache_free(sigqueue_cachep, q);
292 }
293
294 static void flush_sigqueue(struct sigpending *queue)
295 {
296         struct sigqueue *q;
297
298         sigemptyset(&queue->signal);
299         while (!list_empty(&queue->list)) {
300                 q = list_entry(queue->list.next, struct sigqueue , list);
301                 list_del_init(&q->list);
302                 __sigqueue_free(q);
303         }
304 }
305
306 /*
307  * Flush all pending signals for a task.
308  */
309
310 void
311 flush_signals(struct task_struct *t)
312 {
313         unsigned long flags;
314
315         spin_lock_irqsave(&t->sighand->siglock, flags);
316         clear_tsk_thread_flag(t,TIF_SIGPENDING);
317         flush_sigqueue(&t->pending);
318         flush_sigqueue(&t->signal->shared_pending);
319         spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 }
321
322 /*
323  * This function expects the tasklist_lock write-locked.
324  */
325 void __exit_sighand(struct task_struct *tsk)
326 {
327         struct sighand_struct * sighand = tsk->sighand;
328
329         /* Ok, we're done with the signal handlers */
330         tsk->sighand = NULL;
331         if (atomic_dec_and_test(&sighand->count))
332                 kmem_cache_free(sighand_cachep, sighand);
333 }
334
335 void exit_sighand(struct task_struct *tsk)
336 {
337         write_lock_irq(&tasklist_lock);
338         __exit_sighand(tsk);
339         write_unlock_irq(&tasklist_lock);
340 }
341
342 /*
343  * This function expects the tasklist_lock write-locked.
344  */
345 void __exit_signal(struct task_struct *tsk)
346 {
347         struct signal_struct * sig = tsk->signal;
348         struct sighand_struct * sighand = tsk->sighand;
349
350         if (!sig)
351                 BUG();
352         if (!atomic_read(&sig->count))
353                 BUG();
354         spin_lock(&sighand->siglock);
355         posix_cpu_timers_exit(tsk);
356         if (atomic_dec_and_test(&sig->count)) {
357                 posix_cpu_timers_exit_group(tsk);
358                 if (tsk == sig->curr_target)
359                         sig->curr_target = next_thread(tsk);
360                 tsk->signal = NULL;
361                 spin_unlock(&sighand->siglock);
362                 flush_sigqueue(&sig->shared_pending);
363         } else {
364                 /*
365                  * If there is any task waiting for the group exit
366                  * then notify it:
367                  */
368                 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
369                         wake_up_process(sig->group_exit_task);
370                         sig->group_exit_task = NULL;
371                 }
372                 if (tsk == sig->curr_target)
373                         sig->curr_target = next_thread(tsk);
374                 tsk->signal = NULL;
375                 /*
376                  * Accumulate here the counters for all threads but the
377                  * group leader as they die, so they can be added into
378                  * the process-wide totals when those are taken.
379                  * The group leader stays around as a zombie as long
380                  * as there are other threads.  When it gets reaped,
381                  * the exit.c code will add its counts into these totals.
382                  * We won't ever get here for the group leader, since it
383                  * will have been the last reference on the signal_struct.
384                  */
385                 sig->utime = cputime_add(sig->utime, tsk->utime);
386                 sig->stime = cputime_add(sig->stime, tsk->stime);
387                 sig->min_flt += tsk->min_flt;
388                 sig->maj_flt += tsk->maj_flt;
389                 sig->nvcsw += tsk->nvcsw;
390                 sig->nivcsw += tsk->nivcsw;
391                 sig->sched_time += tsk->sched_time;
392                 spin_unlock(&sighand->siglock);
393                 sig = NULL;     /* Marker for below.  */
394         }
395         clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
396         flush_sigqueue(&tsk->pending);
397         if (sig) {
398                 /*
399                  * We are cleaning up the signal_struct here.
400                  */
401                 exit_thread_group_keys(sig);
402                 kmem_cache_free(signal_cachep, sig);
403         }
404 }
405
406 void exit_signal(struct task_struct *tsk)
407 {
408         atomic_dec(&tsk->signal->live);
409
410         write_lock_irq(&tasklist_lock);
411         __exit_signal(tsk);
412         write_unlock_irq(&tasklist_lock);
413 }
414
415 /*
416  * Flush all handlers for a task.
417  */
418
419 void
420 flush_signal_handlers(struct task_struct *t, int force_default)
421 {
422         int i;
423         struct k_sigaction *ka = &t->sighand->action[0];
424         for (i = _NSIG ; i != 0 ; i--) {
425                 if (force_default || ka->sa.sa_handler != SIG_IGN)
426                         ka->sa.sa_handler = SIG_DFL;
427                 ka->sa.sa_flags = 0;
428                 sigemptyset(&ka->sa.sa_mask);
429                 ka++;
430         }
431 }
432
433
434 /* Notify the system that a driver wants to block all signals for this
435  * process, and wants to be notified if any signals at all were to be
436  * sent/acted upon.  If the notifier routine returns non-zero, then the
437  * signal will be acted upon after all.  If the notifier routine returns 0,
438  * then then signal will be blocked.  Only one block per process is
439  * allowed.  priv is a pointer to private data that the notifier routine
440  * can use to determine if the signal should be blocked or not.  */
441
442 void
443 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
444 {
445         unsigned long flags;
446
447         spin_lock_irqsave(&current->sighand->siglock, flags);
448         current->notifier_mask = mask;
449         current->notifier_data = priv;
450         current->notifier = notifier;
451         spin_unlock_irqrestore(&current->sighand->siglock, flags);
452 }
453
454 /* Notify the system that blocking has ended. */
455
456 void
457 unblock_all_signals(void)
458 {
459         unsigned long flags;
460
461         spin_lock_irqsave(&current->sighand->siglock, flags);
462         current->notifier = NULL;
463         current->notifier_data = NULL;
464         recalc_sigpending();
465         spin_unlock_irqrestore(&current->sighand->siglock, flags);
466 }
467
468 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
469 {
470         struct sigqueue *q, *first = NULL;
471         int still_pending = 0;
472
473         if (unlikely(!sigismember(&list->signal, sig)))
474                 return 0;
475
476         /*
477          * Collect the siginfo appropriate to this signal.  Check if
478          * there is another siginfo for the same signal.
479         */
480         list_for_each_entry(q, &list->list, list) {
481                 if (q->info.si_signo == sig) {
482                         if (first) {
483                                 still_pending = 1;
484                                 break;
485                         }
486                         first = q;
487                 }
488         }
489         if (first) {
490                 list_del_init(&first->list);
491                 copy_siginfo(info, &first->info);
492                 __sigqueue_free(first);
493                 if (!still_pending)
494                         sigdelset(&list->signal, sig);
495         } else {
496
497                 /* Ok, it wasn't in the queue.  This must be
498                    a fast-pathed signal or we must have been
499                    out of queue space.  So zero out the info.
500                  */
501                 sigdelset(&list->signal, sig);
502                 info->si_signo = sig;
503                 info->si_errno = 0;
504                 info->si_code = 0;
505                 info->si_pid = 0;
506                 info->si_uid = 0;
507         }
508         return 1;
509 }
510
511 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
512                         siginfo_t *info)
513 {
514         int sig = 0;
515
516         sig = next_signal(pending, mask);
517         if (sig) {
518                 if (current->notifier) {
519                         if (sigismember(current->notifier_mask, sig)) {
520                                 if (!(current->notifier)(current->notifier_data)) {
521                                         clear_thread_flag(TIF_SIGPENDING);
522                                         return 0;
523                                 }
524                         }
525                 }
526
527                 if (!collect_signal(sig, pending, info))
528                         sig = 0;
529                                 
530         }
531         recalc_sigpending();
532
533         return sig;
534 }
535
536 /*
537  * Dequeue a signal and return the element to the caller, which is 
538  * expected to free it.
539  *
540  * All callers have to hold the siglock.
541  */
542 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
543 {
544         int signr = __dequeue_signal(&tsk->pending, mask, info);
545         if (!signr)
546                 signr = __dequeue_signal(&tsk->signal->shared_pending,
547                                          mask, info);
548         if (signr && unlikely(sig_kernel_stop(signr))) {
549                 /*
550                  * Set a marker that we have dequeued a stop signal.  Our
551                  * caller might release the siglock and then the pending
552                  * stop signal it is about to process is no longer in the
553                  * pending bitmasks, but must still be cleared by a SIGCONT
554                  * (and overruled by a SIGKILL).  So those cases clear this
555                  * shared flag after we've set it.  Note that this flag may
556                  * remain set after the signal we return is ignored or
557                  * handled.  That doesn't matter because its only purpose
558                  * is to alert stop-signal processing code when another
559                  * processor has come along and cleared the flag.
560                  */
561                 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
562                         tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
563         }
564         if ( signr &&
565              ((info->si_code & __SI_MASK) == __SI_TIMER) &&
566              info->si_sys_private){
567                 /*
568                  * Release the siglock to ensure proper locking order
569                  * of timer locks outside of siglocks.  Note, we leave
570                  * irqs disabled here, since the posix-timers code is
571                  * about to disable them again anyway.
572                  */
573                 spin_unlock(&tsk->sighand->siglock);
574                 do_schedule_next_timer(info);
575                 spin_lock(&tsk->sighand->siglock);
576         }
577         return signr;
578 }
579
580 /*
581  * Tell a process that it has a new active signal..
582  *
583  * NOTE! we rely on the previous spin_lock to
584  * lock interrupts for us! We can only be called with
585  * "siglock" held, and the local interrupt must
586  * have been disabled when that got acquired!
587  *
588  * No need to set need_resched since signal event passing
589  * goes through ->blocked
590  */
591 void signal_wake_up(struct task_struct *t, int resume)
592 {
593         unsigned int mask;
594
595         set_tsk_thread_flag(t, TIF_SIGPENDING);
596
597         /*
598          * For SIGKILL, we want to wake it up in the stopped/traced case.
599          * We don't check t->state here because there is a race with it
600          * executing another processor and just now entering stopped state.
601          * By using wake_up_state, we ensure the process will wake up and
602          * handle its death signal.
603          */
604         mask = TASK_INTERRUPTIBLE;
605         if (resume)
606                 mask |= TASK_STOPPED | TASK_TRACED;
607         if (!wake_up_state(t, mask))
608                 kick_process(t);
609 }
610
611 /*
612  * Remove signals in mask from the pending set and queue.
613  * Returns 1 if any signals were found.
614  *
615  * All callers must be holding the siglock.
616  */
617 static int rm_from_queue(unsigned long mask, struct sigpending *s)
618 {
619         struct sigqueue *q, *n;
620
621         if (!sigtestsetmask(&s->signal, mask))
622                 return 0;
623
624         sigdelsetmask(&s->signal, mask);
625         list_for_each_entry_safe(q, n, &s->list, list) {
626                 if (q->info.si_signo < SIGRTMIN &&
627                     (mask & sigmask(q->info.si_signo))) {
628                         list_del_init(&q->list);
629                         __sigqueue_free(q);
630                 }
631         }
632         return 1;
633 }
634
635 /*
636  * Bad permissions for sending the signal
637  */
638 static int check_kill_permission(int sig, struct siginfo *info,
639                                  struct task_struct *t)
640 {
641         int error = -EINVAL;
642         if (!valid_signal(sig))
643                 return error;
644         error = -EPERM;
645         if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
646             && ((sig != SIGCONT) ||
647                 (current->signal->session != t->signal->session))
648             && (current->euid ^ t->suid) && (current->euid ^ t->uid)
649             && (current->uid ^ t->suid) && (current->uid ^ t->uid)
650             && !capable(CAP_KILL))
651                 return error;
652
653         error = security_task_kill(t, info, sig);
654         if (!error)
655                 audit_signal_info(sig, t); /* Let audit system see the signal */
656         return error;
657 }
658
659 /* forward decl */
660 static void do_notify_parent_cldstop(struct task_struct *tsk,
661                                      int to_self,
662                                      int why);
663
664 /*
665  * Handle magic process-wide effects of stop/continue signals.
666  * Unlike the signal actions, these happen immediately at signal-generation
667  * time regardless of blocking, ignoring, or handling.  This does the
668  * actual continuing for SIGCONT, but not the actual stopping for stop
669  * signals.  The process stop is done as a signal action for SIG_DFL.
670  */
671 static void handle_stop_signal(int sig, struct task_struct *p)
672 {
673         struct task_struct *t;
674
675         if (p->signal->flags & SIGNAL_GROUP_EXIT)
676                 /*
677                  * The process is in the middle of dying already.
678                  */
679                 return;
680
681         if (sig_kernel_stop(sig)) {
682                 /*
683                  * This is a stop signal.  Remove SIGCONT from all queues.
684                  */
685                 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
686                 t = p;
687                 do {
688                         rm_from_queue(sigmask(SIGCONT), &t->pending);
689                         t = next_thread(t);
690                 } while (t != p);
691         } else if (sig == SIGCONT) {
692                 /*
693                  * Remove all stop signals from all queues,
694                  * and wake all threads.
695                  */
696                 if (unlikely(p->signal->group_stop_count > 0)) {
697                         /*
698                          * There was a group stop in progress.  We'll
699                          * pretend it finished before we got here.  We are
700                          * obliged to report it to the parent: if the
701                          * SIGSTOP happened "after" this SIGCONT, then it
702                          * would have cleared this pending SIGCONT.  If it
703                          * happened "before" this SIGCONT, then the parent
704                          * got the SIGCHLD about the stop finishing before
705                          * the continue happened.  We do the notification
706                          * now, and it's as if the stop had finished and
707                          * the SIGCHLD was pending on entry to this kill.
708                          */
709                         p->signal->group_stop_count = 0;
710                         p->signal->flags = SIGNAL_STOP_CONTINUED;
711                         spin_unlock(&p->sighand->siglock);
712                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
713                         spin_lock(&p->sighand->siglock);
714                 }
715                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
716                 t = p;
717                 do {
718                         unsigned int state;
719                         rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
720                         
721                         /*
722                          * If there is a handler for SIGCONT, we must make
723                          * sure that no thread returns to user mode before
724                          * we post the signal, in case it was the only
725                          * thread eligible to run the signal handler--then
726                          * it must not do anything between resuming and
727                          * running the handler.  With the TIF_SIGPENDING
728                          * flag set, the thread will pause and acquire the
729                          * siglock that we hold now and until we've queued
730                          * the pending signal. 
731                          *
732                          * Wake up the stopped thread _after_ setting
733                          * TIF_SIGPENDING
734                          */
735                         state = TASK_STOPPED;
736                         if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
737                                 set_tsk_thread_flag(t, TIF_SIGPENDING);
738                                 state |= TASK_INTERRUPTIBLE;
739                         }
740                         wake_up_state(t, state);
741
742                         t = next_thread(t);
743                 } while (t != p);
744
745                 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
746                         /*
747                          * We were in fact stopped, and are now continued.
748                          * Notify the parent with CLD_CONTINUED.
749                          */
750                         p->signal->flags = SIGNAL_STOP_CONTINUED;
751                         p->signal->group_exit_code = 0;
752                         spin_unlock(&p->sighand->siglock);
753                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
754                         spin_lock(&p->sighand->siglock);
755                 } else {
756                         /*
757                          * We are not stopped, but there could be a stop
758                          * signal in the middle of being processed after
759                          * being removed from the queue.  Clear that too.
760                          */
761                         p->signal->flags = 0;
762                 }
763         } else if (sig == SIGKILL) {
764                 /*
765                  * Make sure that any pending stop signal already dequeued
766                  * is undone by the wakeup for SIGKILL.
767                  */
768                 p->signal->flags = 0;
769         }
770 }
771
772 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
773                         struct sigpending *signals)
774 {
775         struct sigqueue * q = NULL;
776         int ret = 0;
777
778         /*
779          * fast-pathed signals for kernel-internal things like SIGSTOP
780          * or SIGKILL.
781          */
782         if (info == SEND_SIG_FORCED)
783                 goto out_set;
784
785         /* Real-time signals must be queued if sent by sigqueue, or
786            some other real-time mechanism.  It is implementation
787            defined whether kill() does so.  We attempt to do so, on
788            the principle of least surprise, but since kill is not
789            allowed to fail with EAGAIN when low on memory we just
790            make sure at least one signal gets delivered and don't
791            pass on the info struct.  */
792
793         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
794                                              (is_si_special(info) ||
795                                               info->si_code >= 0)));
796         if (q) {
797                 list_add_tail(&q->list, &signals->list);
798                 switch ((unsigned long) info) {
799                 case (unsigned long) SEND_SIG_NOINFO:
800                         q->info.si_signo = sig;
801                         q->info.si_errno = 0;
802                         q->info.si_code = SI_USER;
803                         q->info.si_pid = current->pid;
804                         q->info.si_uid = current->uid;
805                         break;
806                 case (unsigned long) SEND_SIG_PRIV:
807                         q->info.si_signo = sig;
808                         q->info.si_errno = 0;
809                         q->info.si_code = SI_KERNEL;
810                         q->info.si_pid = 0;
811                         q->info.si_uid = 0;
812                         break;
813                 default:
814                         copy_siginfo(&q->info, info);
815                         break;
816                 }
817         } else if (!is_si_special(info)) {
818                 if (sig >= SIGRTMIN && info->si_code != SI_USER)
819                 /*
820                  * Queue overflow, abort.  We may abort if the signal was rt
821                  * and sent by user using something other than kill().
822                  */
823                         return -EAGAIN;
824         }
825
826 out_set:
827         sigaddset(&signals->signal, sig);
828         return ret;
829 }
830
831 #define LEGACY_QUEUE(sigptr, sig) \
832         (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
833
834
835 static int
836 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
837 {
838         int ret = 0;
839
840         if (!irqs_disabled())
841                 BUG();
842         assert_spin_locked(&t->sighand->siglock);
843
844         /* Short-circuit ignored signals.  */
845         if (sig_ignored(t, sig))
846                 goto out;
847
848         /* Support queueing exactly one non-rt signal, so that we
849            can get more detailed information about the cause of
850            the signal. */
851         if (LEGACY_QUEUE(&t->pending, sig))
852                 goto out;
853
854         ret = send_signal(sig, info, t, &t->pending);
855         if (!ret && !sigismember(&t->blocked, sig))
856                 signal_wake_up(t, sig == SIGKILL);
857 out:
858         return ret;
859 }
860
861 /*
862  * Force a signal that the process can't ignore: if necessary
863  * we unblock the signal and change any SIG_IGN to SIG_DFL.
864  */
865
866 int
867 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
868 {
869         unsigned long int flags;
870         int ret;
871
872         spin_lock_irqsave(&t->sighand->siglock, flags);
873         if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
874                 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
875         }
876         if (sigismember(&t->blocked, sig)) {
877                 sigdelset(&t->blocked, sig);
878         }
879         recalc_sigpending_tsk(t);
880         ret = specific_send_sig_info(sig, info, t);
881         spin_unlock_irqrestore(&t->sighand->siglock, flags);
882
883         return ret;
884 }
885
886 void
887 force_sig_specific(int sig, struct task_struct *t)
888 {
889         force_sig_info(sig, SEND_SIG_FORCED, t);
890 }
891
892 /*
893  * Test if P wants to take SIG.  After we've checked all threads with this,
894  * it's equivalent to finding no threads not blocking SIG.  Any threads not
895  * blocking SIG were ruled out because they are not running and already
896  * have pending signals.  Such threads will dequeue from the shared queue
897  * as soon as they're available, so putting the signal on the shared queue
898  * will be equivalent to sending it to one such thread.
899  */
900 static inline int wants_signal(int sig, struct task_struct *p)
901 {
902         if (sigismember(&p->blocked, sig))
903                 return 0;
904         if (p->flags & PF_EXITING)
905                 return 0;
906         if (sig == SIGKILL)
907                 return 1;
908         if (p->state & (TASK_STOPPED | TASK_TRACED))
909                 return 0;
910         return task_curr(p) || !signal_pending(p);
911 }
912
913 static void
914 __group_complete_signal(int sig, struct task_struct *p)
915 {
916         struct task_struct *t;
917
918         /*
919          * Now find a thread we can wake up to take the signal off the queue.
920          *
921          * If the main thread wants the signal, it gets first crack.
922          * Probably the least surprising to the average bear.
923          */
924         if (wants_signal(sig, p))
925                 t = p;
926         else if (thread_group_empty(p))
927                 /*
928                  * There is just one thread and it does not need to be woken.
929                  * It will dequeue unblocked signals before it runs again.
930                  */
931                 return;
932         else {
933                 /*
934                  * Otherwise try to find a suitable thread.
935                  */
936                 t = p->signal->curr_target;
937                 if (t == NULL)
938                         /* restart balancing at this thread */
939                         t = p->signal->curr_target = p;
940                 BUG_ON(t->tgid != p->tgid);
941
942                 while (!wants_signal(sig, t)) {
943                         t = next_thread(t);
944                         if (t == p->signal->curr_target)
945                                 /*
946                                  * No thread needs to be woken.
947                                  * Any eligible threads will see
948                                  * the signal in the queue soon.
949                                  */
950                                 return;
951                 }
952                 p->signal->curr_target = t;
953         }
954
955         /*
956          * Found a killable thread.  If the signal will be fatal,
957          * then start taking the whole group down immediately.
958          */
959         if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
960             !sigismember(&t->real_blocked, sig) &&
961             (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
962                 /*
963                  * This signal will be fatal to the whole group.
964                  */
965                 if (!sig_kernel_coredump(sig)) {
966                         /*
967                          * Start a group exit and wake everybody up.
968                          * This way we don't have other threads
969                          * running and doing things after a slower
970                          * thread has the fatal signal pending.
971                          */
972                         p->signal->flags = SIGNAL_GROUP_EXIT;
973                         p->signal->group_exit_code = sig;
974                         p->signal->group_stop_count = 0;
975                         t = p;
976                         do {
977                                 sigaddset(&t->pending.signal, SIGKILL);
978                                 signal_wake_up(t, 1);
979                                 t = next_thread(t);
980                         } while (t != p);
981                         return;
982                 }
983
984                 /*
985                  * There will be a core dump.  We make all threads other
986                  * than the chosen one go into a group stop so that nothing
987                  * happens until it gets scheduled, takes the signal off
988                  * the shared queue, and does the core dump.  This is a
989                  * little more complicated than strictly necessary, but it
990                  * keeps the signal state that winds up in the core dump
991                  * unchanged from the death state, e.g. which thread had
992                  * the core-dump signal unblocked.
993                  */
994                 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
995                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
996                 p->signal->group_stop_count = 0;
997                 p->signal->group_exit_task = t;
998                 t = p;
999                 do {
1000                         p->signal->group_stop_count++;
1001                         signal_wake_up(t, 0);
1002                         t = next_thread(t);
1003                 } while (t != p);
1004                 wake_up_process(p->signal->group_exit_task);
1005                 return;
1006         }
1007
1008         /*
1009          * The signal is already in the shared-pending queue.
1010          * Tell the chosen thread to wake up and dequeue it.
1011          */
1012         signal_wake_up(t, sig == SIGKILL);
1013         return;
1014 }
1015
1016 int
1017 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1018 {
1019         int ret = 0;
1020
1021         assert_spin_locked(&p->sighand->siglock);
1022         handle_stop_signal(sig, p);
1023
1024         /* Short-circuit ignored signals.  */
1025         if (sig_ignored(p, sig))
1026                 return ret;
1027
1028         if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1029                 /* This is a non-RT signal and we already have one queued.  */
1030                 return ret;
1031
1032         /*
1033          * Put this signal on the shared-pending queue, or fail with EAGAIN.
1034          * We always use the shared queue for process-wide signals,
1035          * to avoid several races.
1036          */
1037         ret = send_signal(sig, info, p, &p->signal->shared_pending);
1038         if (unlikely(ret))
1039                 return ret;
1040
1041         __group_complete_signal(sig, p);
1042         return 0;
1043 }
1044
1045 /*
1046  * Nuke all other threads in the group.
1047  */
1048 void zap_other_threads(struct task_struct *p)
1049 {
1050         struct task_struct *t;
1051
1052         p->signal->flags = SIGNAL_GROUP_EXIT;
1053         p->signal->group_stop_count = 0;
1054
1055         if (thread_group_empty(p))
1056                 return;
1057
1058         for (t = next_thread(p); t != p; t = next_thread(t)) {
1059                 /*
1060                  * Don't bother with already dead threads
1061                  */
1062                 if (t->exit_state)
1063                         continue;
1064
1065                 /*
1066                  * We don't want to notify the parent, since we are
1067                  * killed as part of a thread group due to another
1068                  * thread doing an execve() or similar. So set the
1069                  * exit signal to -1 to allow immediate reaping of
1070                  * the process.  But don't detach the thread group
1071                  * leader.
1072                  */
1073                 if (t != p->group_leader)
1074                         t->exit_signal = -1;
1075
1076                 /* SIGKILL will be handled before any pending SIGSTOP */
1077                 sigaddset(&t->pending.signal, SIGKILL);
1078                 signal_wake_up(t, 1);
1079         }
1080 }
1081
1082 /*
1083  * Must be called with the tasklist_lock held for reading!
1084  */
1085 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1086 {
1087         unsigned long flags;
1088         int ret;
1089
1090         ret = check_kill_permission(sig, info, p);
1091         if (!ret && sig && p->sighand) {
1092                 spin_lock_irqsave(&p->sighand->siglock, flags);
1093                 ret = __group_send_sig_info(sig, info, p);
1094                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1095         }
1096
1097         return ret;
1098 }
1099
1100 /*
1101  * kill_pg_info() sends a signal to a process group: this is what the tty
1102  * control characters do (^C, ^Z etc)
1103  */
1104
1105 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1106 {
1107         struct task_struct *p = NULL;
1108         int retval, success;
1109
1110         if (pgrp <= 0)
1111                 return -EINVAL;
1112
1113         success = 0;
1114         retval = -ESRCH;
1115         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1116                 int err = group_send_sig_info(sig, info, p);
1117                 success |= !err;
1118                 retval = err;
1119         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1120         return success ? 0 : retval;
1121 }
1122
1123 int
1124 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1125 {
1126         int retval;
1127
1128         read_lock(&tasklist_lock);
1129         retval = __kill_pg_info(sig, info, pgrp);
1130         read_unlock(&tasklist_lock);
1131
1132         return retval;
1133 }
1134
1135 int
1136 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1137 {
1138         int error;
1139         struct task_struct *p;
1140
1141         read_lock(&tasklist_lock);
1142         p = find_task_by_pid(pid);
1143         error = -ESRCH;
1144         if (p)
1145                 error = group_send_sig_info(sig, info, p);
1146         read_unlock(&tasklist_lock);
1147         return error;
1148 }
1149
1150 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1151 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1152                       uid_t uid, uid_t euid)
1153 {
1154         int ret = -EINVAL;
1155         struct task_struct *p;
1156
1157         if (!valid_signal(sig))
1158                 return ret;
1159
1160         read_lock(&tasklist_lock);
1161         p = find_task_by_pid(pid);
1162         if (!p) {
1163                 ret = -ESRCH;
1164                 goto out_unlock;
1165         }
1166         if ((!info || ((unsigned long)info != 1 &&
1167                         (unsigned long)info != 2 && SI_FROMUSER(info)))
1168             && (euid != p->suid) && (euid != p->uid)
1169             && (uid != p->suid) && (uid != p->uid)) {
1170                 ret = -EPERM;
1171                 goto out_unlock;
1172         }
1173         if (sig && p->sighand) {
1174                 unsigned long flags;
1175                 spin_lock_irqsave(&p->sighand->siglock, flags);
1176                 ret = __group_send_sig_info(sig, info, p);
1177                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1178         }
1179 out_unlock:
1180         read_unlock(&tasklist_lock);
1181         return ret;
1182 }
1183 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1184
1185 /*
1186  * kill_something_info() interprets pid in interesting ways just like kill(2).
1187  *
1188  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1189  * is probably wrong.  Should make it like BSD or SYSV.
1190  */
1191
1192 static int kill_something_info(int sig, struct siginfo *info, int pid)
1193 {
1194         if (!pid) {
1195                 return kill_pg_info(sig, info, process_group(current));
1196         } else if (pid == -1) {
1197                 int retval = 0, count = 0;
1198                 struct task_struct * p;
1199
1200                 read_lock(&tasklist_lock);
1201                 for_each_process(p) {
1202                         if (p->pid > 1 && p->tgid != current->tgid) {
1203                                 int err = group_send_sig_info(sig, info, p);
1204                                 ++count;
1205                                 if (err != -EPERM)
1206                                         retval = err;
1207                         }
1208                 }
1209                 read_unlock(&tasklist_lock);
1210                 return count ? retval : -ESRCH;
1211         } else if (pid < 0) {
1212                 return kill_pg_info(sig, info, -pid);
1213         } else {
1214                 return kill_proc_info(sig, info, pid);
1215         }
1216 }
1217
1218 /*
1219  * These are for backward compatibility with the rest of the kernel source.
1220  */
1221
1222 /*
1223  * These two are the most common entry points.  They send a signal
1224  * just to the specific thread.
1225  */
1226 int
1227 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1228 {
1229         int ret;
1230         unsigned long flags;
1231
1232         /*
1233          * Make sure legacy kernel users don't send in bad values
1234          * (normal paths check this in check_kill_permission).
1235          */
1236         if (!valid_signal(sig))
1237                 return -EINVAL;
1238
1239         /*
1240          * We need the tasklist lock even for the specific
1241          * thread case (when we don't need to follow the group
1242          * lists) in order to avoid races with "p->sighand"
1243          * going away or changing from under us.
1244          */
1245         read_lock(&tasklist_lock);  
1246         spin_lock_irqsave(&p->sighand->siglock, flags);
1247         ret = specific_send_sig_info(sig, info, p);
1248         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1249         read_unlock(&tasklist_lock);
1250         return ret;
1251 }
1252
1253 #define __si_special(priv) \
1254         ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1255
1256 int
1257 send_sig(int sig, struct task_struct *p, int priv)
1258 {
1259         return send_sig_info(sig, __si_special(priv), p);
1260 }
1261
1262 /*
1263  * This is the entry point for "process-wide" signals.
1264  * They will go to an appropriate thread in the thread group.
1265  */
1266 int
1267 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1268 {
1269         int ret;
1270         read_lock(&tasklist_lock);
1271         ret = group_send_sig_info(sig, info, p);
1272         read_unlock(&tasklist_lock);
1273         return ret;
1274 }
1275
1276 void
1277 force_sig(int sig, struct task_struct *p)
1278 {
1279         force_sig_info(sig, SEND_SIG_PRIV, p);
1280 }
1281
1282 /*
1283  * When things go south during signal handling, we
1284  * will force a SIGSEGV. And if the signal that caused
1285  * the problem was already a SIGSEGV, we'll want to
1286  * make sure we don't even try to deliver the signal..
1287  */
1288 int
1289 force_sigsegv(int sig, struct task_struct *p)
1290 {
1291         if (sig == SIGSEGV) {
1292                 unsigned long flags;
1293                 spin_lock_irqsave(&p->sighand->siglock, flags);
1294                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1295                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1296         }
1297         force_sig(SIGSEGV, p);
1298         return 0;
1299 }
1300
1301 int
1302 kill_pg(pid_t pgrp, int sig, int priv)
1303 {
1304         return kill_pg_info(sig, __si_special(priv), pgrp);
1305 }
1306
1307 int
1308 kill_proc(pid_t pid, int sig, int priv)
1309 {
1310         return kill_proc_info(sig, __si_special(priv), pid);
1311 }
1312
1313 /*
1314  * These functions support sending signals using preallocated sigqueue
1315  * structures.  This is needed "because realtime applications cannot
1316  * afford to lose notifications of asynchronous events, like timer
1317  * expirations or I/O completions".  In the case of Posix Timers 
1318  * we allocate the sigqueue structure from the timer_create.  If this
1319  * allocation fails we are able to report the failure to the application
1320  * with an EAGAIN error.
1321  */
1322  
1323 struct sigqueue *sigqueue_alloc(void)
1324 {
1325         struct sigqueue *q;
1326
1327         if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1328                 q->flags |= SIGQUEUE_PREALLOC;
1329         return(q);
1330 }
1331
1332 void sigqueue_free(struct sigqueue *q)
1333 {
1334         unsigned long flags;
1335         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1336         /*
1337          * If the signal is still pending remove it from the
1338          * pending queue.
1339          */
1340         if (unlikely(!list_empty(&q->list))) {
1341                 spinlock_t *lock = &current->sighand->siglock;
1342                 read_lock(&tasklist_lock);
1343                 spin_lock_irqsave(lock, flags);
1344                 if (!list_empty(&q->list))
1345                         list_del_init(&q->list);
1346                 spin_unlock_irqrestore(lock, flags);
1347                 read_unlock(&tasklist_lock);
1348         }
1349         q->flags &= ~SIGQUEUE_PREALLOC;
1350         __sigqueue_free(q);
1351 }
1352
1353 int
1354 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1355 {
1356         unsigned long flags;
1357         int ret = 0;
1358
1359         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1360         read_lock(&tasklist_lock);
1361
1362         if (unlikely(p->flags & PF_EXITING)) {
1363                 ret = -1;
1364                 goto out_err;
1365         }
1366
1367         spin_lock_irqsave(&p->sighand->siglock, flags);
1368
1369         if (unlikely(!list_empty(&q->list))) {
1370                 /*
1371                  * If an SI_TIMER entry is already queue just increment
1372                  * the overrun count.
1373                  */
1374                 if (q->info.si_code != SI_TIMER)
1375                         BUG();
1376                 q->info.si_overrun++;
1377                 goto out;
1378         }
1379         /* Short-circuit ignored signals.  */
1380         if (sig_ignored(p, sig)) {
1381                 ret = 1;
1382                 goto out;
1383         }
1384
1385         list_add_tail(&q->list, &p->pending.list);
1386         sigaddset(&p->pending.signal, sig);
1387         if (!sigismember(&p->blocked, sig))
1388                 signal_wake_up(p, sig == SIGKILL);
1389
1390 out:
1391         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1392 out_err:
1393         read_unlock(&tasklist_lock);
1394
1395         return ret;
1396 }
1397
1398 int
1399 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1400 {
1401         unsigned long flags;
1402         int ret = 0;
1403
1404         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1405         read_lock(&tasklist_lock);
1406         spin_lock_irqsave(&p->sighand->siglock, flags);
1407         handle_stop_signal(sig, p);
1408
1409         /* Short-circuit ignored signals.  */
1410         if (sig_ignored(p, sig)) {
1411                 ret = 1;
1412                 goto out;
1413         }
1414
1415         if (unlikely(!list_empty(&q->list))) {
1416                 /*
1417                  * If an SI_TIMER entry is already queue just increment
1418                  * the overrun count.  Other uses should not try to
1419                  * send the signal multiple times.
1420                  */
1421                 if (q->info.si_code != SI_TIMER)
1422                         BUG();
1423                 q->info.si_overrun++;
1424                 goto out;
1425         } 
1426
1427         /*
1428          * Put this signal on the shared-pending queue.
1429          * We always use the shared queue for process-wide signals,
1430          * to avoid several races.
1431          */
1432         list_add_tail(&q->list, &p->signal->shared_pending.list);
1433         sigaddset(&p->signal->shared_pending.signal, sig);
1434
1435         __group_complete_signal(sig, p);
1436 out:
1437         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1438         read_unlock(&tasklist_lock);
1439         return(ret);
1440 }
1441
1442 /*
1443  * Wake up any threads in the parent blocked in wait* syscalls.
1444  */
1445 static inline void __wake_up_parent(struct task_struct *p,
1446                                     struct task_struct *parent)
1447 {
1448         wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1449 }
1450
1451 /*
1452  * Let a parent know about the death of a child.
1453  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1454  */
1455
1456 void do_notify_parent(struct task_struct *tsk, int sig)
1457 {
1458         struct siginfo info;
1459         unsigned long flags;
1460         struct sighand_struct *psig;
1461
1462         BUG_ON(sig == -1);
1463
1464         /* do_notify_parent_cldstop should have been called instead.  */
1465         BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1466
1467         BUG_ON(!tsk->ptrace &&
1468                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1469
1470         info.si_signo = sig;
1471         info.si_errno = 0;
1472         info.si_pid = tsk->pid;
1473         info.si_uid = tsk->uid;
1474
1475         /* FIXME: find out whether or not this is supposed to be c*time. */
1476         info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1477                                                        tsk->signal->utime));
1478         info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1479                                                        tsk->signal->stime));
1480
1481         info.si_status = tsk->exit_code & 0x7f;
1482         if (tsk->exit_code & 0x80)
1483                 info.si_code = CLD_DUMPED;
1484         else if (tsk->exit_code & 0x7f)
1485                 info.si_code = CLD_KILLED;
1486         else {
1487                 info.si_code = CLD_EXITED;
1488                 info.si_status = tsk->exit_code >> 8;
1489         }
1490
1491         psig = tsk->parent->sighand;
1492         spin_lock_irqsave(&psig->siglock, flags);
1493         if (!tsk->ptrace && sig == SIGCHLD &&
1494             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1495              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1496                 /*
1497                  * We are exiting and our parent doesn't care.  POSIX.1
1498                  * defines special semantics for setting SIGCHLD to SIG_IGN
1499                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1500                  * automatically and not left for our parent's wait4 call.
1501                  * Rather than having the parent do it as a magic kind of
1502                  * signal handler, we just set this to tell do_exit that we
1503                  * can be cleaned up without becoming a zombie.  Note that
1504                  * we still call __wake_up_parent in this case, because a
1505                  * blocked sys_wait4 might now return -ECHILD.
1506                  *
1507                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1508                  * is implementation-defined: we do (if you don't want
1509                  * it, just use SIG_IGN instead).
1510                  */
1511                 tsk->exit_signal = -1;
1512                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1513                         sig = 0;
1514         }
1515         if (valid_signal(sig) && sig > 0)
1516                 __group_send_sig_info(sig, &info, tsk->parent);
1517         __wake_up_parent(tsk, tsk->parent);
1518         spin_unlock_irqrestore(&psig->siglock, flags);
1519 }
1520
1521 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1522 {
1523         struct siginfo info;
1524         unsigned long flags;
1525         struct task_struct *parent;
1526         struct sighand_struct *sighand;
1527
1528         if (to_self)
1529                 parent = tsk->parent;
1530         else {
1531                 tsk = tsk->group_leader;
1532                 parent = tsk->real_parent;
1533         }
1534
1535         info.si_signo = SIGCHLD;
1536         info.si_errno = 0;
1537         info.si_pid = tsk->pid;
1538         info.si_uid = tsk->uid;
1539
1540         /* FIXME: find out whether or not this is supposed to be c*time. */
1541         info.si_utime = cputime_to_jiffies(tsk->utime);
1542         info.si_stime = cputime_to_jiffies(tsk->stime);
1543
1544         info.si_code = why;
1545         switch (why) {
1546         case CLD_CONTINUED:
1547                 info.si_status = SIGCONT;
1548                 break;
1549         case CLD_STOPPED:
1550                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1551                 break;
1552         case CLD_TRAPPED:
1553                 info.si_status = tsk->exit_code & 0x7f;
1554                 break;
1555         default:
1556                 BUG();
1557         }
1558
1559         sighand = parent->sighand;
1560         spin_lock_irqsave(&sighand->siglock, flags);
1561         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1562             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1563                 __group_send_sig_info(SIGCHLD, &info, parent);
1564         /*
1565          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1566          */
1567         __wake_up_parent(tsk, parent);
1568         spin_unlock_irqrestore(&sighand->siglock, flags);
1569 }
1570
1571 /*
1572  * This must be called with current->sighand->siglock held.
1573  *
1574  * This should be the path for all ptrace stops.
1575  * We always set current->last_siginfo while stopped here.
1576  * That makes it a way to test a stopped process for
1577  * being ptrace-stopped vs being job-control-stopped.
1578  *
1579  * If we actually decide not to stop at all because the tracer is gone,
1580  * we leave nostop_code in current->exit_code.
1581  */
1582 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1583 {
1584         /*
1585          * If there is a group stop in progress,
1586          * we must participate in the bookkeeping.
1587          */
1588         if (current->signal->group_stop_count > 0)
1589                 --current->signal->group_stop_count;
1590
1591         current->last_siginfo = info;
1592         current->exit_code = exit_code;
1593
1594         /* Let the debugger run.  */
1595         set_current_state(TASK_TRACED);
1596         spin_unlock_irq(&current->sighand->siglock);
1597         read_lock(&tasklist_lock);
1598         if (likely(current->ptrace & PT_PTRACED) &&
1599             likely(current->parent != current->real_parent ||
1600                    !(current->ptrace & PT_ATTACHED)) &&
1601             (likely(current->parent->signal != current->signal) ||
1602              !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1603                 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1604                 read_unlock(&tasklist_lock);
1605                 schedule();
1606         } else {
1607                 /*
1608                  * By the time we got the lock, our tracer went away.
1609                  * Don't stop here.
1610                  */
1611                 read_unlock(&tasklist_lock);
1612                 set_current_state(TASK_RUNNING);
1613                 current->exit_code = nostop_code;
1614         }
1615
1616         /*
1617          * We are back.  Now reacquire the siglock before touching
1618          * last_siginfo, so that we are sure to have synchronized with
1619          * any signal-sending on another CPU that wants to examine it.
1620          */
1621         spin_lock_irq(&current->sighand->siglock);
1622         current->last_siginfo = NULL;
1623
1624         /*
1625          * Queued signals ignored us while we were stopped for tracing.
1626          * So check for any that we should take before resuming user mode.
1627          */
1628         recalc_sigpending();
1629 }
1630
1631 void ptrace_notify(int exit_code)
1632 {
1633         siginfo_t info;
1634
1635         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1636
1637         memset(&info, 0, sizeof info);
1638         info.si_signo = SIGTRAP;
1639         info.si_code = exit_code;
1640         info.si_pid = current->pid;
1641         info.si_uid = current->uid;
1642
1643         /* Let the debugger run.  */
1644         spin_lock_irq(&current->sighand->siglock);
1645         ptrace_stop(exit_code, 0, &info);
1646         spin_unlock_irq(&current->sighand->siglock);
1647 }
1648
1649 static void
1650 finish_stop(int stop_count)
1651 {
1652         int to_self;
1653
1654         /*
1655          * If there are no other threads in the group, or if there is
1656          * a group stop in progress and we are the last to stop,
1657          * report to the parent.  When ptraced, every thread reports itself.
1658          */
1659         if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1660                 to_self = 1;
1661         else if (stop_count == 0)
1662                 to_self = 0;
1663         else
1664                 goto out;
1665
1666         read_lock(&tasklist_lock);
1667         do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1668         read_unlock(&tasklist_lock);
1669
1670 out:
1671         schedule();
1672         /*
1673          * Now we don't run again until continued.
1674          */
1675         current->exit_code = 0;
1676 }
1677
1678 /*
1679  * This performs the stopping for SIGSTOP and other stop signals.
1680  * We have to stop all threads in the thread group.
1681  * Returns nonzero if we've actually stopped and released the siglock.
1682  * Returns zero if we didn't stop and still hold the siglock.
1683  */
1684 static int
1685 do_signal_stop(int signr)
1686 {
1687         struct signal_struct *sig = current->signal;
1688         struct sighand_struct *sighand = current->sighand;
1689         int stop_count = -1;
1690
1691         if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1692                 return 0;
1693
1694         if (sig->group_stop_count > 0) {
1695                 /*
1696                  * There is a group stop in progress.  We don't need to
1697                  * start another one.
1698                  */
1699                 signr = sig->group_exit_code;
1700                 stop_count = --sig->group_stop_count;
1701                 current->exit_code = signr;
1702                 set_current_state(TASK_STOPPED);
1703                 if (stop_count == 0)
1704                         sig->flags = SIGNAL_STOP_STOPPED;
1705                 spin_unlock_irq(&sighand->siglock);
1706         }
1707         else if (thread_group_empty(current)) {
1708                 /*
1709                  * Lock must be held through transition to stopped state.
1710                  */
1711                 current->exit_code = current->signal->group_exit_code = signr;
1712                 set_current_state(TASK_STOPPED);
1713                 sig->flags = SIGNAL_STOP_STOPPED;
1714                 spin_unlock_irq(&sighand->siglock);
1715         }
1716         else {
1717                 /*
1718                  * There is no group stop already in progress.
1719                  * We must initiate one now, but that requires
1720                  * dropping siglock to get both the tasklist lock
1721                  * and siglock again in the proper order.  Note that
1722                  * this allows an intervening SIGCONT to be posted.
1723                  * We need to check for that and bail out if necessary.
1724                  */
1725                 struct task_struct *t;
1726
1727                 spin_unlock_irq(&sighand->siglock);
1728
1729                 /* signals can be posted during this window */
1730
1731                 read_lock(&tasklist_lock);
1732                 spin_lock_irq(&sighand->siglock);
1733
1734                 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1735                         /*
1736                          * Another stop or continue happened while we
1737                          * didn't have the lock.  We can just swallow this
1738                          * signal now.  If we raced with a SIGCONT, that
1739                          * should have just cleared it now.  If we raced
1740                          * with another processor delivering a stop signal,
1741                          * then the SIGCONT that wakes us up should clear it.
1742                          */
1743                         read_unlock(&tasklist_lock);
1744                         return 0;
1745                 }
1746
1747                 if (sig->group_stop_count == 0) {
1748                         sig->group_exit_code = signr;
1749                         stop_count = 0;
1750                         for (t = next_thread(current); t != current;
1751                              t = next_thread(t))
1752                                 /*
1753                                  * Setting state to TASK_STOPPED for a group
1754                                  * stop is always done with the siglock held,
1755                                  * so this check has no races.
1756                                  */
1757                                 if (!t->exit_state &&
1758                                     !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1759                                         stop_count++;
1760                                         signal_wake_up(t, 0);
1761                                 }
1762                         sig->group_stop_count = stop_count;
1763                 }
1764                 else {
1765                         /* A race with another thread while unlocked.  */
1766                         signr = sig->group_exit_code;
1767                         stop_count = --sig->group_stop_count;
1768                 }
1769
1770                 current->exit_code = signr;
1771                 set_current_state(TASK_STOPPED);
1772                 if (stop_count == 0)
1773                         sig->flags = SIGNAL_STOP_STOPPED;
1774
1775                 spin_unlock_irq(&sighand->siglock);
1776                 read_unlock(&tasklist_lock);
1777         }
1778
1779         finish_stop(stop_count);
1780         return 1;
1781 }
1782
1783 /*
1784  * Do appropriate magic when group_stop_count > 0.
1785  * We return nonzero if we stopped, after releasing the siglock.
1786  * We return zero if we still hold the siglock and should look
1787  * for another signal without checking group_stop_count again.
1788  */
1789 static inline int handle_group_stop(void)
1790 {
1791         int stop_count;
1792
1793         if (current->signal->group_exit_task == current) {
1794                 /*
1795                  * Group stop is so we can do a core dump,
1796                  * We are the initiating thread, so get on with it.
1797                  */
1798                 current->signal->group_exit_task = NULL;
1799                 return 0;
1800         }
1801
1802         if (current->signal->flags & SIGNAL_GROUP_EXIT)
1803                 /*
1804                  * Group stop is so another thread can do a core dump,
1805                  * or else we are racing against a death signal.
1806                  * Just punt the stop so we can get the next signal.
1807                  */
1808                 return 0;
1809
1810         /*
1811          * There is a group stop in progress.  We stop
1812          * without any associated signal being in our queue.
1813          */
1814         stop_count = --current->signal->group_stop_count;
1815         if (stop_count == 0)
1816                 current->signal->flags = SIGNAL_STOP_STOPPED;
1817         current->exit_code = current->signal->group_exit_code;
1818         set_current_state(TASK_STOPPED);
1819         spin_unlock_irq(&current->sighand->siglock);
1820         finish_stop(stop_count);
1821         return 1;
1822 }
1823
1824 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1825                           struct pt_regs *regs, void *cookie)
1826 {
1827         sigset_t *mask = &current->blocked;
1828         int signr = 0;
1829
1830 relock:
1831         spin_lock_irq(&current->sighand->siglock);
1832         for (;;) {
1833                 struct k_sigaction *ka;
1834
1835                 if (unlikely(current->signal->group_stop_count > 0) &&
1836                     handle_group_stop())
1837                         goto relock;
1838
1839                 signr = dequeue_signal(current, mask, info);
1840
1841                 if (!signr)
1842                         break; /* will return 0 */
1843
1844                 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1845                         ptrace_signal_deliver(regs, cookie);
1846
1847                         /* Let the debugger run.  */
1848                         ptrace_stop(signr, signr, info);
1849
1850                         /* We're back.  Did the debugger cancel the sig or group_exit? */
1851                         signr = current->exit_code;
1852                         if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1853                                 continue;
1854
1855                         current->exit_code = 0;
1856
1857                         /* Update the siginfo structure if the signal has
1858                            changed.  If the debugger wanted something
1859                            specific in the siginfo structure then it should
1860                            have updated *info via PTRACE_SETSIGINFO.  */
1861                         if (signr != info->si_signo) {
1862                                 info->si_signo = signr;
1863                                 info->si_errno = 0;
1864                                 info->si_code = SI_USER;
1865                                 info->si_pid = current->parent->pid;
1866                                 info->si_uid = current->parent->uid;
1867                         }
1868
1869                         /* If the (new) signal is now blocked, requeue it.  */
1870                         if (sigismember(&current->blocked, signr)) {
1871                                 specific_send_sig_info(signr, info, current);
1872                                 continue;
1873                         }
1874                 }
1875
1876                 ka = &current->sighand->action[signr-1];
1877                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1878                         continue;
1879                 if (ka->sa.sa_handler != SIG_DFL) {
1880                         /* Run the handler.  */
1881                         *return_ka = *ka;
1882
1883                         if (ka->sa.sa_flags & SA_ONESHOT)
1884                                 ka->sa.sa_handler = SIG_DFL;
1885
1886                         break; /* will return non-zero "signr" value */
1887                 }
1888
1889                 /*
1890                  * Now we are doing the default action for this signal.
1891                  */
1892                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1893                         continue;
1894
1895                 /* Init gets no signals it doesn't want.  */
1896                 if (current->pid == 1)
1897                         continue;
1898
1899                 if (sig_kernel_stop(signr)) {
1900                         /*
1901                          * The default action is to stop all threads in
1902                          * the thread group.  The job control signals
1903                          * do nothing in an orphaned pgrp, but SIGSTOP
1904                          * always works.  Note that siglock needs to be
1905                          * dropped during the call to is_orphaned_pgrp()
1906                          * because of lock ordering with tasklist_lock.
1907                          * This allows an intervening SIGCONT to be posted.
1908                          * We need to check for that and bail out if necessary.
1909                          */
1910                         if (signr != SIGSTOP) {
1911                                 spin_unlock_irq(&current->sighand->siglock);
1912
1913                                 /* signals can be posted during this window */
1914
1915                                 if (is_orphaned_pgrp(process_group(current)))
1916                                         goto relock;
1917
1918                                 spin_lock_irq(&current->sighand->siglock);
1919                         }
1920
1921                         if (likely(do_signal_stop(signr))) {
1922                                 /* It released the siglock.  */
1923                                 goto relock;
1924                         }
1925
1926                         /*
1927                          * We didn't actually stop, due to a race
1928                          * with SIGCONT or something like that.
1929                          */
1930                         continue;
1931                 }
1932
1933                 spin_unlock_irq(&current->sighand->siglock);
1934
1935                 /*
1936                  * Anything else is fatal, maybe with a core dump.
1937                  */
1938                 current->flags |= PF_SIGNALED;
1939                 if (sig_kernel_coredump(signr)) {
1940                         /*
1941                          * If it was able to dump core, this kills all
1942                          * other threads in the group and synchronizes with
1943                          * their demise.  If we lost the race with another
1944                          * thread getting here, it set group_exit_code
1945                          * first and our do_group_exit call below will use
1946                          * that value and ignore the one we pass it.
1947                          */
1948                         do_coredump((long)signr, signr, regs);
1949                 }
1950
1951                 /*
1952                  * Death signals, no core dump.
1953                  */
1954                 do_group_exit(signr);
1955                 /* NOTREACHED */
1956         }
1957         spin_unlock_irq(&current->sighand->siglock);
1958         return signr;
1959 }
1960
1961 EXPORT_SYMBOL(recalc_sigpending);
1962 EXPORT_SYMBOL_GPL(dequeue_signal);
1963 EXPORT_SYMBOL(flush_signals);
1964 EXPORT_SYMBOL(force_sig);
1965 EXPORT_SYMBOL(kill_pg);
1966 EXPORT_SYMBOL(kill_proc);
1967 EXPORT_SYMBOL(ptrace_notify);
1968 EXPORT_SYMBOL(send_sig);
1969 EXPORT_SYMBOL(send_sig_info);
1970 EXPORT_SYMBOL(sigprocmask);
1971 EXPORT_SYMBOL(block_all_signals);
1972 EXPORT_SYMBOL(unblock_all_signals);
1973
1974
1975 /*
1976  * System call entry points.
1977  */
1978
1979 asmlinkage long sys_restart_syscall(void)
1980 {
1981         struct restart_block *restart = &current_thread_info()->restart_block;
1982         return restart->fn(restart);
1983 }
1984
1985 long do_no_restart_syscall(struct restart_block *param)
1986 {
1987         return -EINTR;
1988 }
1989
1990 /*
1991  * We don't need to get the kernel lock - this is all local to this
1992  * particular thread.. (and that's good, because this is _heavily_
1993  * used by various programs)
1994  */
1995
1996 /*
1997  * This is also useful for kernel threads that want to temporarily
1998  * (or permanently) block certain signals.
1999  *
2000  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2001  * interface happily blocks "unblockable" signals like SIGKILL
2002  * and friends.
2003  */
2004 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2005 {
2006         int error;
2007         sigset_t old_block;
2008
2009         spin_lock_irq(&current->sighand->siglock);
2010         old_block = current->blocked;
2011         error = 0;
2012         switch (how) {
2013         case SIG_BLOCK:
2014                 sigorsets(&current->blocked, &current->blocked, set);
2015                 break;
2016         case SIG_UNBLOCK:
2017                 signandsets(&current->blocked, &current->blocked, set);
2018                 break;
2019         case SIG_SETMASK:
2020                 current->blocked = *set;
2021                 break;
2022         default:
2023                 error = -EINVAL;
2024         }
2025         recalc_sigpending();
2026         spin_unlock_irq(&current->sighand->siglock);
2027         if (oldset)
2028                 *oldset = old_block;
2029         return error;
2030 }
2031
2032 asmlinkage long
2033 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2034 {
2035         int error = -EINVAL;
2036         sigset_t old_set, new_set;
2037
2038         /* XXX: Don't preclude handling different sized sigset_t's.  */
2039         if (sigsetsize != sizeof(sigset_t))
2040                 goto out;
2041
2042         if (set) {
2043                 error = -EFAULT;
2044                 if (copy_from_user(&new_set, set, sizeof(*set)))
2045                         goto out;
2046                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2047
2048                 error = sigprocmask(how, &new_set, &old_set);
2049                 if (error)
2050                         goto out;
2051                 if (oset)
2052                         goto set_old;
2053         } else if (oset) {
2054                 spin_lock_irq(&current->sighand->siglock);
2055                 old_set = current->blocked;
2056                 spin_unlock_irq(&current->sighand->siglock);
2057
2058         set_old:
2059                 error = -EFAULT;
2060                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2061                         goto out;
2062         }
2063         error = 0;
2064 out:
2065         return error;
2066 }
2067
2068 long do_sigpending(void __user *set, unsigned long sigsetsize)
2069 {
2070         long error = -EINVAL;
2071         sigset_t pending;
2072
2073         if (sigsetsize > sizeof(sigset_t))
2074                 goto out;
2075
2076         spin_lock_irq(&current->sighand->siglock);
2077         sigorsets(&pending, &current->pending.signal,
2078                   &current->signal->shared_pending.signal);
2079         spin_unlock_irq(&current->sighand->siglock);
2080
2081         /* Outside the lock because only this thread touches it.  */
2082         sigandsets(&pending, &current->blocked, &pending);
2083
2084         error = -EFAULT;
2085         if (!copy_to_user(set, &pending, sigsetsize))
2086                 error = 0;
2087
2088 out:
2089         return error;
2090 }       
2091
2092 asmlinkage long
2093 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2094 {
2095         return do_sigpending(set, sigsetsize);
2096 }
2097
2098 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2099
2100 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2101 {
2102         int err;
2103
2104         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2105                 return -EFAULT;
2106         if (from->si_code < 0)
2107                 return __copy_to_user(to, from, sizeof(siginfo_t))
2108                         ? -EFAULT : 0;
2109         /*
2110          * If you change siginfo_t structure, please be sure
2111          * this code is fixed accordingly.
2112          * It should never copy any pad contained in the structure
2113          * to avoid security leaks, but must copy the generic
2114          * 3 ints plus the relevant union member.
2115          */
2116         err = __put_user(from->si_signo, &to->si_signo);
2117         err |= __put_user(from->si_errno, &to->si_errno);
2118         err |= __put_user((short)from->si_code, &to->si_code);
2119         switch (from->si_code & __SI_MASK) {
2120         case __SI_KILL:
2121                 err |= __put_user(from->si_pid, &to->si_pid);
2122                 err |= __put_user(from->si_uid, &to->si_uid);
2123                 break;
2124         case __SI_TIMER:
2125                  err |= __put_user(from->si_tid, &to->si_tid);
2126                  err |= __put_user(from->si_overrun, &to->si_overrun);
2127                  err |= __put_user(from->si_ptr, &to->si_ptr);
2128                 break;
2129         case __SI_POLL:
2130                 err |= __put_user(from->si_band, &to->si_band);
2131                 err |= __put_user(from->si_fd, &to->si_fd);
2132                 break;
2133         case __SI_FAULT:
2134                 err |= __put_user(from->si_addr, &to->si_addr);
2135 #ifdef __ARCH_SI_TRAPNO
2136                 err |= __put_user(from->si_trapno, &to->si_trapno);
2137 #endif
2138                 break;
2139         case __SI_CHLD:
2140                 err |= __put_user(from->si_pid, &to->si_pid);
2141                 err |= __put_user(from->si_uid, &to->si_uid);
2142                 err |= __put_user(from->si_status, &to->si_status);
2143                 err |= __put_user(from->si_utime, &to->si_utime);
2144                 err |= __put_user(from->si_stime, &to->si_stime);
2145                 break;
2146         case __SI_RT: /* This is not generated by the kernel as of now. */
2147         case __SI_MESGQ: /* But this is */
2148                 err |= __put_user(from->si_pid, &to->si_pid);
2149                 err |= __put_user(from->si_uid, &to->si_uid);
2150                 err |= __put_user(from->si_ptr, &to->si_ptr);
2151                 break;
2152         default: /* this is just in case for now ... */
2153                 err |= __put_user(from->si_pid, &to->si_pid);
2154                 err |= __put_user(from->si_uid, &to->si_uid);
2155                 break;
2156         }
2157         return err;
2158 }
2159
2160 #endif
2161
2162 asmlinkage long
2163 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2164                     siginfo_t __user *uinfo,
2165                     const struct timespec __user *uts,
2166                     size_t sigsetsize)
2167 {
2168         int ret, sig;
2169         sigset_t these;
2170         struct timespec ts;
2171         siginfo_t info;
2172         long timeout = 0;
2173
2174         /* XXX: Don't preclude handling different sized sigset_t's.  */
2175         if (sigsetsize != sizeof(sigset_t))
2176                 return -EINVAL;
2177
2178         if (copy_from_user(&these, uthese, sizeof(these)))
2179                 return -EFAULT;
2180                 
2181         /*
2182          * Invert the set of allowed signals to get those we
2183          * want to block.
2184          */
2185         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2186         signotset(&these);
2187
2188         if (uts) {
2189                 if (copy_from_user(&ts, uts, sizeof(ts)))
2190                         return -EFAULT;
2191                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2192                     || ts.tv_sec < 0)
2193                         return -EINVAL;
2194         }
2195
2196         spin_lock_irq(&current->sighand->siglock);
2197         sig = dequeue_signal(current, &these, &info);
2198         if (!sig) {
2199                 timeout = MAX_SCHEDULE_TIMEOUT;
2200                 if (uts)
2201                         timeout = (timespec_to_jiffies(&ts)
2202                                    + (ts.tv_sec || ts.tv_nsec));
2203
2204                 if (timeout) {
2205                         /* None ready -- temporarily unblock those we're
2206                          * interested while we are sleeping in so that we'll
2207                          * be awakened when they arrive.  */
2208                         current->real_blocked = current->blocked;
2209                         sigandsets(&current->blocked, &current->blocked, &these);
2210                         recalc_sigpending();
2211                         spin_unlock_irq(&current->sighand->siglock);
2212
2213                         timeout = schedule_timeout_interruptible(timeout);
2214
2215                         try_to_freeze();
2216                         spin_lock_irq(&current->sighand->siglock);
2217                         sig = dequeue_signal(current, &these, &info);
2218                         current->blocked = current->real_blocked;
2219                         siginitset(&current->real_blocked, 0);
2220                         recalc_sigpending();
2221                 }
2222         }
2223         spin_unlock_irq(&current->sighand->siglock);
2224
2225         if (sig) {
2226                 ret = sig;
2227                 if (uinfo) {
2228                         if (copy_siginfo_to_user(uinfo, &info))
2229                                 ret = -EFAULT;
2230                 }
2231         } else {
2232                 ret = -EAGAIN;
2233                 if (timeout)
2234                         ret = -EINTR;
2235         }
2236
2237         return ret;
2238 }
2239
2240 asmlinkage long
2241 sys_kill(int pid, int sig)
2242 {
2243         struct siginfo info;
2244
2245         info.si_signo = sig;
2246         info.si_errno = 0;
2247         info.si_code = SI_USER;
2248         info.si_pid = current->tgid;
2249         info.si_uid = current->uid;
2250
2251         return kill_something_info(sig, &info, pid);
2252 }
2253
2254 static int do_tkill(int tgid, int pid, int sig)
2255 {
2256         int error;
2257         struct siginfo info;
2258         struct task_struct *p;
2259
2260         error = -ESRCH;
2261         info.si_signo = sig;
2262         info.si_errno = 0;
2263         info.si_code = SI_TKILL;
2264         info.si_pid = current->tgid;
2265         info.si_uid = current->uid;
2266
2267         read_lock(&tasklist_lock);
2268         p = find_task_by_pid(pid);
2269         if (p && (tgid <= 0 || p->tgid == tgid)) {
2270                 error = check_kill_permission(sig, &info, p);
2271                 /*
2272                  * The null signal is a permissions and process existence
2273                  * probe.  No signal is actually delivered.
2274                  */
2275                 if (!error && sig && p->sighand) {
2276                         spin_lock_irq(&p->sighand->siglock);
2277                         handle_stop_signal(sig, p);
2278                         error = specific_send_sig_info(sig, &info, p);
2279                         spin_unlock_irq(&p->sighand->siglock);
2280                 }
2281         }
2282         read_unlock(&tasklist_lock);
2283
2284         return error;
2285 }
2286
2287 /**
2288  *  sys_tgkill - send signal to one specific thread
2289  *  @tgid: the thread group ID of the thread
2290  *  @pid: the PID of the thread
2291  *  @sig: signal to be sent
2292  *
2293  *  This syscall also checks the tgid and returns -ESRCH even if the PID
2294  *  exists but it's not belonging to the target process anymore. This
2295  *  method solves the problem of threads exiting and PIDs getting reused.
2296  */
2297 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2298 {
2299         /* This is only valid for single tasks */
2300         if (pid <= 0 || tgid <= 0)
2301                 return -EINVAL;
2302
2303         return do_tkill(tgid, pid, sig);
2304 }
2305
2306 /*
2307  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2308  */
2309 asmlinkage long
2310 sys_tkill(int pid, int sig)
2311 {
2312         /* This is only valid for single tasks */
2313         if (pid <= 0)
2314                 return -EINVAL;
2315
2316         return do_tkill(0, pid, sig);
2317 }
2318
2319 asmlinkage long
2320 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2321 {
2322         siginfo_t info;
2323
2324         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2325                 return -EFAULT;
2326
2327         /* Not even root can pretend to send signals from the kernel.
2328            Nor can they impersonate a kill(), which adds source info.  */
2329         if (info.si_code >= 0)
2330                 return -EPERM;
2331         info.si_signo = sig;
2332
2333         /* POSIX.1b doesn't mention process groups.  */
2334         return kill_proc_info(sig, &info, pid);
2335 }
2336
2337 int
2338 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2339 {
2340         struct k_sigaction *k;
2341
2342         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2343                 return -EINVAL;
2344
2345         k = &current->sighand->action[sig-1];
2346
2347         spin_lock_irq(&current->sighand->siglock);
2348         if (signal_pending(current)) {
2349                 /*
2350                  * If there might be a fatal signal pending on multiple
2351                  * threads, make sure we take it before changing the action.
2352                  */
2353                 spin_unlock_irq(&current->sighand->siglock);
2354                 return -ERESTARTNOINTR;
2355         }
2356
2357         if (oact)
2358                 *oact = *k;
2359
2360         if (act) {
2361                 /*
2362                  * POSIX 3.3.1.3:
2363                  *  "Setting a signal action to SIG_IGN for a signal that is
2364                  *   pending shall cause the pending signal to be discarded,
2365                  *   whether or not it is blocked."
2366                  *
2367                  *  "Setting a signal action to SIG_DFL for a signal that is
2368                  *   pending and whose default action is to ignore the signal
2369                  *   (for example, SIGCHLD), shall cause the pending signal to
2370                  *   be discarded, whether or not it is blocked"
2371                  */
2372                 if (act->sa.sa_handler == SIG_IGN ||
2373                     (act->sa.sa_handler == SIG_DFL &&
2374                      sig_kernel_ignore(sig))) {
2375                         /*
2376                          * This is a fairly rare case, so we only take the
2377                          * tasklist_lock once we're sure we'll need it.
2378                          * Now we must do this little unlock and relock
2379                          * dance to maintain the lock hierarchy.
2380                          */
2381                         struct task_struct *t = current;
2382                         spin_unlock_irq(&t->sighand->siglock);
2383                         read_lock(&tasklist_lock);
2384                         spin_lock_irq(&t->sighand->siglock);
2385                         *k = *act;
2386                         sigdelsetmask(&k->sa.sa_mask,
2387                                       sigmask(SIGKILL) | sigmask(SIGSTOP));
2388                         rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2389                         do {
2390                                 rm_from_queue(sigmask(sig), &t->pending);
2391                                 recalc_sigpending_tsk(t);
2392                                 t = next_thread(t);
2393                         } while (t != current);
2394                         spin_unlock_irq(&current->sighand->siglock);
2395                         read_unlock(&tasklist_lock);
2396                         return 0;
2397                 }
2398
2399                 *k = *act;
2400                 sigdelsetmask(&k->sa.sa_mask,
2401                               sigmask(SIGKILL) | sigmask(SIGSTOP));
2402         }
2403
2404         spin_unlock_irq(&current->sighand->siglock);
2405         return 0;
2406 }
2407
2408 int 
2409 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2410 {
2411         stack_t oss;
2412         int error;
2413
2414         if (uoss) {
2415                 oss.ss_sp = (void __user *) current->sas_ss_sp;
2416                 oss.ss_size = current->sas_ss_size;
2417                 oss.ss_flags = sas_ss_flags(sp);
2418         }
2419
2420         if (uss) {
2421                 void __user *ss_sp;
2422                 size_t ss_size;
2423                 int ss_flags;
2424
2425                 error = -EFAULT;
2426                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2427                     || __get_user(ss_sp, &uss->ss_sp)
2428                     || __get_user(ss_flags, &uss->ss_flags)
2429                     || __get_user(ss_size, &uss->ss_size))
2430                         goto out;
2431
2432                 error = -EPERM;
2433                 if (on_sig_stack(sp))
2434                         goto out;
2435
2436                 error = -EINVAL;
2437                 /*
2438                  *
2439                  * Note - this code used to test ss_flags incorrectly
2440                  *        old code may have been written using ss_flags==0
2441                  *        to mean ss_flags==SS_ONSTACK (as this was the only
2442                  *        way that worked) - this fix preserves that older
2443                  *        mechanism
2444                  */
2445                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2446                         goto out;
2447
2448                 if (ss_flags == SS_DISABLE) {
2449                         ss_size = 0;
2450                         ss_sp = NULL;
2451                 } else {
2452                         error = -ENOMEM;
2453                         if (ss_size < MINSIGSTKSZ)
2454                                 goto out;
2455                 }
2456
2457                 current->sas_ss_sp = (unsigned long) ss_sp;
2458                 current->sas_ss_size = ss_size;
2459         }
2460
2461         if (uoss) {
2462                 error = -EFAULT;
2463                 if (copy_to_user(uoss, &oss, sizeof(oss)))
2464                         goto out;
2465         }
2466
2467         error = 0;
2468 out:
2469         return error;
2470 }
2471
2472 #ifdef __ARCH_WANT_SYS_SIGPENDING
2473
2474 asmlinkage long
2475 sys_sigpending(old_sigset_t __user *set)
2476 {
2477         return do_sigpending(set, sizeof(*set));
2478 }
2479
2480 #endif
2481
2482 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2483 /* Some platforms have their own version with special arguments others
2484    support only sys_rt_sigprocmask.  */
2485
2486 asmlinkage long
2487 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2488 {
2489         int error;
2490         old_sigset_t old_set, new_set;
2491
2492         if (set) {
2493                 error = -EFAULT;
2494                 if (copy_from_user(&new_set, set, sizeof(*set)))
2495                         goto out;
2496                 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2497
2498                 spin_lock_irq(&current->sighand->siglock);
2499                 old_set = current->blocked.sig[0];
2500
2501                 error = 0;
2502                 switch (how) {
2503                 default:
2504                         error = -EINVAL;
2505                         break;
2506                 case SIG_BLOCK:
2507                         sigaddsetmask(&current->blocked, new_set);
2508                         break;
2509                 case SIG_UNBLOCK:
2510                         sigdelsetmask(&current->blocked, new_set);
2511                         break;
2512                 case SIG_SETMASK:
2513                         current->blocked.sig[0] = new_set;
2514                         break;
2515                 }
2516
2517                 recalc_sigpending();
2518                 spin_unlock_irq(&current->sighand->siglock);
2519                 if (error)
2520                         goto out;
2521                 if (oset)
2522                         goto set_old;
2523         } else if (oset) {
2524                 old_set = current->blocked.sig[0];
2525         set_old:
2526                 error = -EFAULT;
2527                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2528                         goto out;
2529         }
2530         error = 0;
2531 out:
2532         return error;
2533 }
2534 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2535
2536 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2537 asmlinkage long
2538 sys_rt_sigaction(int sig,
2539                  const struct sigaction __user *act,
2540                  struct sigaction __user *oact,
2541                  size_t sigsetsize)
2542 {
2543         struct k_sigaction new_sa, old_sa;
2544         int ret = -EINVAL;
2545
2546         /* XXX: Don't preclude handling different sized sigset_t's.  */
2547         if (sigsetsize != sizeof(sigset_t))
2548                 goto out;
2549
2550         if (act) {
2551                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2552                         return -EFAULT;
2553         }
2554
2555         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2556
2557         if (!ret && oact) {
2558                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2559                         return -EFAULT;
2560         }
2561 out:
2562         return ret;
2563 }
2564 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2565
2566 #ifdef __ARCH_WANT_SYS_SGETMASK
2567
2568 /*
2569  * For backwards compatibility.  Functionality superseded by sigprocmask.
2570  */
2571 asmlinkage long
2572 sys_sgetmask(void)
2573 {
2574         /* SMP safe */
2575         return current->blocked.sig[0];
2576 }
2577
2578 asmlinkage long
2579 sys_ssetmask(int newmask)
2580 {
2581         int old;
2582
2583         spin_lock_irq(&current->sighand->siglock);
2584         old = current->blocked.sig[0];
2585
2586         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2587                                                   sigmask(SIGSTOP)));
2588         recalc_sigpending();
2589         spin_unlock_irq(&current->sighand->siglock);
2590
2591         return old;
2592 }
2593 #endif /* __ARCH_WANT_SGETMASK */
2594
2595 #ifdef __ARCH_WANT_SYS_SIGNAL
2596 /*
2597  * For backwards compatibility.  Functionality superseded by sigaction.
2598  */
2599 asmlinkage unsigned long
2600 sys_signal(int sig, __sighandler_t handler)
2601 {
2602         struct k_sigaction new_sa, old_sa;
2603         int ret;
2604
2605         new_sa.sa.sa_handler = handler;
2606         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2607
2608         ret = do_sigaction(sig, &new_sa, &old_sa);
2609
2610         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2611 }
2612 #endif /* __ARCH_WANT_SYS_SIGNAL */
2613
2614 #ifdef __ARCH_WANT_SYS_PAUSE
2615
2616 asmlinkage long
2617 sys_pause(void)
2618 {
2619         current->state = TASK_INTERRUPTIBLE;
2620         schedule();
2621         return -ERESTARTNOHAND;
2622 }
2623
2624 #endif
2625
2626 void __init signals_init(void)
2627 {
2628         sigqueue_cachep =
2629                 kmem_cache_create("sigqueue",
2630                                   sizeof(struct sigqueue),
2631                                   __alignof__(struct sigqueue),
2632                                   SLAB_PANIC, NULL, NULL);
2633 }