[PATCH] taskstats: cleanup do_exit() path
[safe/jmp/linux-2.6] / kernel / exit.c
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp_lock.h>
11 #include <linux/module.h>
12 #include <linux/capability.h>
13 #include <linux/completion.h>
14 #include <linux/personality.h>
15 #include <linux/tty.h>
16 #include <linux/namespace.h>
17 #include <linux/key.h>
18 #include <linux/security.h>
19 #include <linux/cpu.h>
20 #include <linux/acct.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/file.h>
23 #include <linux/binfmts.h>
24 #include <linux/nsproxy.h>
25 #include <linux/ptrace.h>
26 #include <linux/profile.h>
27 #include <linux/mount.h>
28 #include <linux/proc_fs.h>
29 #include <linux/mempolicy.h>
30 #include <linux/taskstats_kern.h>
31 #include <linux/delayacct.h>
32 #include <linux/cpuset.h>
33 #include <linux/syscalls.h>
34 #include <linux/signal.h>
35 #include <linux/posix-timers.h>
36 #include <linux/cn_proc.h>
37 #include <linux/mutex.h>
38 #include <linux/futex.h>
39 #include <linux/compat.h>
40 #include <linux/pipe_fs_i.h>
41 #include <linux/audit.h> /* for audit_free() */
42 #include <linux/resource.h>
43 #include <linux/blkdev.h>
44
45 #include <asm/uaccess.h>
46 #include <asm/unistd.h>
47 #include <asm/pgtable.h>
48 #include <asm/mmu_context.h>
49
50 extern void sem_exit (void);
51 extern struct task_struct *child_reaper;
52
53 static void exit_mm(struct task_struct * tsk);
54
55 static void __unhash_process(struct task_struct *p)
56 {
57         nr_threads--;
58         detach_pid(p, PIDTYPE_PID);
59         if (thread_group_leader(p)) {
60                 detach_pid(p, PIDTYPE_PGID);
61                 detach_pid(p, PIDTYPE_SID);
62
63                 list_del_rcu(&p->tasks);
64                 __get_cpu_var(process_counts)--;
65         }
66         list_del_rcu(&p->thread_group);
67         remove_parent(p);
68 }
69
70 /*
71  * This function expects the tasklist_lock write-locked.
72  */
73 static void __exit_signal(struct task_struct *tsk)
74 {
75         struct signal_struct *sig = tsk->signal;
76         struct sighand_struct *sighand;
77
78         BUG_ON(!sig);
79         BUG_ON(!atomic_read(&sig->count));
80
81         rcu_read_lock();
82         sighand = rcu_dereference(tsk->sighand);
83         spin_lock(&sighand->siglock);
84
85         posix_cpu_timers_exit(tsk);
86         if (atomic_dec_and_test(&sig->count))
87                 posix_cpu_timers_exit_group(tsk);
88         else {
89                 /*
90                  * If there is any task waiting for the group exit
91                  * then notify it:
92                  */
93                 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
94                         wake_up_process(sig->group_exit_task);
95                         sig->group_exit_task = NULL;
96                 }
97                 if (tsk == sig->curr_target)
98                         sig->curr_target = next_thread(tsk);
99                 /*
100                  * Accumulate here the counters for all threads but the
101                  * group leader as they die, so they can be added into
102                  * the process-wide totals when those are taken.
103                  * The group leader stays around as a zombie as long
104                  * as there are other threads.  When it gets reaped,
105                  * the exit.c code will add its counts into these totals.
106                  * We won't ever get here for the group leader, since it
107                  * will have been the last reference on the signal_struct.
108                  */
109                 sig->utime = cputime_add(sig->utime, tsk->utime);
110                 sig->stime = cputime_add(sig->stime, tsk->stime);
111                 sig->min_flt += tsk->min_flt;
112                 sig->maj_flt += tsk->maj_flt;
113                 sig->nvcsw += tsk->nvcsw;
114                 sig->nivcsw += tsk->nivcsw;
115                 sig->sched_time += tsk->sched_time;
116                 sig = NULL; /* Marker for below. */
117         }
118
119         __unhash_process(tsk);
120
121         tsk->signal = NULL;
122         tsk->sighand = NULL;
123         spin_unlock(&sighand->siglock);
124         rcu_read_unlock();
125
126         __cleanup_sighand(sighand);
127         clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
128         flush_sigqueue(&tsk->pending);
129         if (sig) {
130                 flush_sigqueue(&sig->shared_pending);
131                 taskstats_tgid_free(sig);
132                 __cleanup_signal(sig);
133         }
134 }
135
136 static void delayed_put_task_struct(struct rcu_head *rhp)
137 {
138         put_task_struct(container_of(rhp, struct task_struct, rcu));
139 }
140
141 void release_task(struct task_struct * p)
142 {
143         struct task_struct *leader;
144         int zap_leader;
145 repeat:
146         atomic_dec(&p->user->processes);
147         write_lock_irq(&tasklist_lock);
148         ptrace_unlink(p);
149         BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
150         __exit_signal(p);
151
152         /*
153          * If we are the last non-leader member of the thread
154          * group, and the leader is zombie, then notify the
155          * group leader's parent process. (if it wants notification.)
156          */
157         zap_leader = 0;
158         leader = p->group_leader;
159         if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
160                 BUG_ON(leader->exit_signal == -1);
161                 do_notify_parent(leader, leader->exit_signal);
162                 /*
163                  * If we were the last child thread and the leader has
164                  * exited already, and the leader's parent ignores SIGCHLD,
165                  * then we are the one who should release the leader.
166                  *
167                  * do_notify_parent() will have marked it self-reaping in
168                  * that case.
169                  */
170                 zap_leader = (leader->exit_signal == -1);
171         }
172
173         sched_exit(p);
174         write_unlock_irq(&tasklist_lock);
175         proc_flush_task(p);
176         release_thread(p);
177         call_rcu(&p->rcu, delayed_put_task_struct);
178
179         p = leader;
180         if (unlikely(zap_leader))
181                 goto repeat;
182 }
183
184 /*
185  * This checks not only the pgrp, but falls back on the pid if no
186  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
187  * without this...
188  */
189 int session_of_pgrp(int pgrp)
190 {
191         struct task_struct *p;
192         int sid = -1;
193
194         read_lock(&tasklist_lock);
195         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
196                 if (p->signal->session > 0) {
197                         sid = p->signal->session;
198                         goto out;
199                 }
200         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
201         p = find_task_by_pid(pgrp);
202         if (p)
203                 sid = p->signal->session;
204 out:
205         read_unlock(&tasklist_lock);
206         
207         return sid;
208 }
209
210 /*
211  * Determine if a process group is "orphaned", according to the POSIX
212  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
213  * by terminal-generated stop signals.  Newly orphaned process groups are
214  * to receive a SIGHUP and a SIGCONT.
215  *
216  * "I ask you, have you ever known what it is to be an orphan?"
217  */
218 static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)
219 {
220         struct task_struct *p;
221         int ret = 1;
222
223         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
224                 if (p == ignored_task
225                                 || p->exit_state
226                                 || is_init(p->real_parent))
227                         continue;
228                 if (process_group(p->real_parent) != pgrp
229                             && p->real_parent->signal->session == p->signal->session) {
230                         ret = 0;
231                         break;
232                 }
233         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
234         return ret;     /* (sighing) "Often!" */
235 }
236
237 int is_orphaned_pgrp(int pgrp)
238 {
239         int retval;
240
241         read_lock(&tasklist_lock);
242         retval = will_become_orphaned_pgrp(pgrp, NULL);
243         read_unlock(&tasklist_lock);
244
245         return retval;
246 }
247
248 static int has_stopped_jobs(int pgrp)
249 {
250         int retval = 0;
251         struct task_struct *p;
252
253         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
254                 if (p->state != TASK_STOPPED)
255                         continue;
256                 retval = 1;
257                 break;
258         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
259         return retval;
260 }
261
262 /**
263  * reparent_to_init - Reparent the calling kernel thread to the init task.
264  *
265  * If a kernel thread is launched as a result of a system call, or if
266  * it ever exits, it should generally reparent itself to init so that
267  * it is correctly cleaned up on exit.
268  *
269  * The various task state such as scheduling policy and priority may have
270  * been inherited from a user process, so we reset them to sane values here.
271  *
272  * NOTE that reparent_to_init() gives the caller full capabilities.
273  */
274 static void reparent_to_init(void)
275 {
276         write_lock_irq(&tasklist_lock);
277
278         ptrace_unlink(current);
279         /* Reparent to init */
280         remove_parent(current);
281         current->parent = child_reaper;
282         current->real_parent = child_reaper;
283         add_parent(current);
284
285         /* Set the exit signal to SIGCHLD so we signal init on exit */
286         current->exit_signal = SIGCHLD;
287
288         if (!has_rt_policy(current) && (task_nice(current) < 0))
289                 set_user_nice(current, 0);
290         /* cpus_allowed? */
291         /* rt_priority? */
292         /* signals? */
293         security_task_reparent_to_init(current);
294         memcpy(current->signal->rlim, init_task.signal->rlim,
295                sizeof(current->signal->rlim));
296         atomic_inc(&(INIT_USER->__count));
297         write_unlock_irq(&tasklist_lock);
298         switch_uid(INIT_USER);
299 }
300
301 void __set_special_pids(pid_t session, pid_t pgrp)
302 {
303         struct task_struct *curr = current->group_leader;
304
305         if (curr->signal->session != session) {
306                 detach_pid(curr, PIDTYPE_SID);
307                 curr->signal->session = session;
308                 attach_pid(curr, PIDTYPE_SID, session);
309         }
310         if (process_group(curr) != pgrp) {
311                 detach_pid(curr, PIDTYPE_PGID);
312                 curr->signal->pgrp = pgrp;
313                 attach_pid(curr, PIDTYPE_PGID, pgrp);
314         }
315 }
316
317 void set_special_pids(pid_t session, pid_t pgrp)
318 {
319         write_lock_irq(&tasklist_lock);
320         __set_special_pids(session, pgrp);
321         write_unlock_irq(&tasklist_lock);
322 }
323
324 /*
325  * Let kernel threads use this to say that they
326  * allow a certain signal (since daemonize() will
327  * have disabled all of them by default).
328  */
329 int allow_signal(int sig)
330 {
331         if (!valid_signal(sig) || sig < 1)
332                 return -EINVAL;
333
334         spin_lock_irq(&current->sighand->siglock);
335         sigdelset(&current->blocked, sig);
336         if (!current->mm) {
337                 /* Kernel threads handle their own signals.
338                    Let the signal code know it'll be handled, so
339                    that they don't get converted to SIGKILL or
340                    just silently dropped */
341                 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
342         }
343         recalc_sigpending();
344         spin_unlock_irq(&current->sighand->siglock);
345         return 0;
346 }
347
348 EXPORT_SYMBOL(allow_signal);
349
350 int disallow_signal(int sig)
351 {
352         if (!valid_signal(sig) || sig < 1)
353                 return -EINVAL;
354
355         spin_lock_irq(&current->sighand->siglock);
356         sigaddset(&current->blocked, sig);
357         recalc_sigpending();
358         spin_unlock_irq(&current->sighand->siglock);
359         return 0;
360 }
361
362 EXPORT_SYMBOL(disallow_signal);
363
364 /*
365  *      Put all the gunge required to become a kernel thread without
366  *      attached user resources in one place where it belongs.
367  */
368
369 void daemonize(const char *name, ...)
370 {
371         va_list args;
372         struct fs_struct *fs;
373         sigset_t blocked;
374
375         va_start(args, name);
376         vsnprintf(current->comm, sizeof(current->comm), name, args);
377         va_end(args);
378
379         /*
380          * If we were started as result of loading a module, close all of the
381          * user space pages.  We don't need them, and if we didn't close them
382          * they would be locked into memory.
383          */
384         exit_mm(current);
385
386         set_special_pids(1, 1);
387         mutex_lock(&tty_mutex);
388         current->signal->tty = NULL;
389         mutex_unlock(&tty_mutex);
390
391         /* Block and flush all signals */
392         sigfillset(&blocked);
393         sigprocmask(SIG_BLOCK, &blocked, NULL);
394         flush_signals(current);
395
396         /* Become as one with the init task */
397
398         exit_fs(current);       /* current->fs->count--; */
399         fs = init_task.fs;
400         current->fs = fs;
401         atomic_inc(&fs->count);
402
403         exit_task_namespaces(current);
404         current->nsproxy = init_task.nsproxy;
405         get_task_namespaces(current);
406
407         exit_files(current);
408         current->files = init_task.files;
409         atomic_inc(&current->files->count);
410
411         reparent_to_init();
412 }
413
414 EXPORT_SYMBOL(daemonize);
415
416 static void close_files(struct files_struct * files)
417 {
418         int i, j;
419         struct fdtable *fdt;
420
421         j = 0;
422
423         /*
424          * It is safe to dereference the fd table without RCU or
425          * ->file_lock because this is the last reference to the
426          * files structure.
427          */
428         fdt = files_fdtable(files);
429         for (;;) {
430                 unsigned long set;
431                 i = j * __NFDBITS;
432                 if (i >= fdt->max_fdset || i >= fdt->max_fds)
433                         break;
434                 set = fdt->open_fds->fds_bits[j++];
435                 while (set) {
436                         if (set & 1) {
437                                 struct file * file = xchg(&fdt->fd[i], NULL);
438                                 if (file)
439                                         filp_close(file, files);
440                         }
441                         i++;
442                         set >>= 1;
443                 }
444         }
445 }
446
447 struct files_struct *get_files_struct(struct task_struct *task)
448 {
449         struct files_struct *files;
450
451         task_lock(task);
452         files = task->files;
453         if (files)
454                 atomic_inc(&files->count);
455         task_unlock(task);
456
457         return files;
458 }
459
460 void fastcall put_files_struct(struct files_struct *files)
461 {
462         struct fdtable *fdt;
463
464         if (atomic_dec_and_test(&files->count)) {
465                 close_files(files);
466                 /*
467                  * Free the fd and fdset arrays if we expanded them.
468                  * If the fdtable was embedded, pass files for freeing
469                  * at the end of the RCU grace period. Otherwise,
470                  * you can free files immediately.
471                  */
472                 fdt = files_fdtable(files);
473                 if (fdt == &files->fdtab)
474                         fdt->free_files = files;
475                 else
476                         kmem_cache_free(files_cachep, files);
477                 free_fdtable(fdt);
478         }
479 }
480
481 EXPORT_SYMBOL(put_files_struct);
482
483 void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
484 {
485         struct files_struct *old;
486
487         old = tsk->files;
488         task_lock(tsk);
489         tsk->files = files;
490         task_unlock(tsk);
491         put_files_struct(old);
492 }
493 EXPORT_SYMBOL(reset_files_struct);
494
495 static inline void __exit_files(struct task_struct *tsk)
496 {
497         struct files_struct * files = tsk->files;
498
499         if (files) {
500                 task_lock(tsk);
501                 tsk->files = NULL;
502                 task_unlock(tsk);
503                 put_files_struct(files);
504         }
505 }
506
507 void exit_files(struct task_struct *tsk)
508 {
509         __exit_files(tsk);
510 }
511
512 static inline void __put_fs_struct(struct fs_struct *fs)
513 {
514         /* No need to hold fs->lock if we are killing it */
515         if (atomic_dec_and_test(&fs->count)) {
516                 dput(fs->root);
517                 mntput(fs->rootmnt);
518                 dput(fs->pwd);
519                 mntput(fs->pwdmnt);
520                 if (fs->altroot) {
521                         dput(fs->altroot);
522                         mntput(fs->altrootmnt);
523                 }
524                 kmem_cache_free(fs_cachep, fs);
525         }
526 }
527
528 void put_fs_struct(struct fs_struct *fs)
529 {
530         __put_fs_struct(fs);
531 }
532
533 static inline void __exit_fs(struct task_struct *tsk)
534 {
535         struct fs_struct * fs = tsk->fs;
536
537         if (fs) {
538                 task_lock(tsk);
539                 tsk->fs = NULL;
540                 task_unlock(tsk);
541                 __put_fs_struct(fs);
542         }
543 }
544
545 void exit_fs(struct task_struct *tsk)
546 {
547         __exit_fs(tsk);
548 }
549
550 EXPORT_SYMBOL_GPL(exit_fs);
551
552 /*
553  * Turn us into a lazy TLB process if we
554  * aren't already..
555  */
556 static void exit_mm(struct task_struct * tsk)
557 {
558         struct mm_struct *mm = tsk->mm;
559
560         mm_release(tsk, mm);
561         if (!mm)
562                 return;
563         /*
564          * Serialize with any possible pending coredump.
565          * We must hold mmap_sem around checking core_waiters
566          * and clearing tsk->mm.  The core-inducing thread
567          * will increment core_waiters for each thread in the
568          * group with ->mm != NULL.
569          */
570         down_read(&mm->mmap_sem);
571         if (mm->core_waiters) {
572                 up_read(&mm->mmap_sem);
573                 down_write(&mm->mmap_sem);
574                 if (!--mm->core_waiters)
575                         complete(mm->core_startup_done);
576                 up_write(&mm->mmap_sem);
577
578                 wait_for_completion(&mm->core_done);
579                 down_read(&mm->mmap_sem);
580         }
581         atomic_inc(&mm->mm_count);
582         BUG_ON(mm != tsk->active_mm);
583         /* more a memory barrier than a real lock */
584         task_lock(tsk);
585         tsk->mm = NULL;
586         up_read(&mm->mmap_sem);
587         enter_lazy_tlb(mm, current);
588         task_unlock(tsk);
589         mmput(mm);
590 }
591
592 static inline void
593 choose_new_parent(struct task_struct *p, struct task_struct *reaper)
594 {
595         /*
596          * Make sure we're not reparenting to ourselves and that
597          * the parent is not a zombie.
598          */
599         BUG_ON(p == reaper || reaper->exit_state);
600         p->real_parent = reaper;
601 }
602
603 static void
604 reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
605 {
606         /* We don't want people slaying init.  */
607         if (p->exit_signal != -1)
608                 p->exit_signal = SIGCHLD;
609
610         if (p->pdeath_signal)
611                 /* We already hold the tasklist_lock here.  */
612                 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
613
614         /* Move the child from its dying parent to the new one.  */
615         if (unlikely(traced)) {
616                 /* Preserve ptrace links if someone else is tracing this child.  */
617                 list_del_init(&p->ptrace_list);
618                 if (p->parent != p->real_parent)
619                         list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
620         } else {
621                 /* If this child is being traced, then we're the one tracing it
622                  * anyway, so let go of it.
623                  */
624                 p->ptrace = 0;
625                 remove_parent(p);
626                 p->parent = p->real_parent;
627                 add_parent(p);
628
629                 /* If we'd notified the old parent about this child's death,
630                  * also notify the new parent.
631                  */
632                 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
633                     thread_group_empty(p))
634                         do_notify_parent(p, p->exit_signal);
635                 else if (p->state == TASK_TRACED) {
636                         /*
637                          * If it was at a trace stop, turn it into
638                          * a normal stop since it's no longer being
639                          * traced.
640                          */
641                         ptrace_untrace(p);
642                 }
643         }
644
645         /*
646          * process group orphan check
647          * Case ii: Our child is in a different pgrp
648          * than we are, and it was the only connection
649          * outside, so the child pgrp is now orphaned.
650          */
651         if ((process_group(p) != process_group(father)) &&
652             (p->signal->session == father->signal->session)) {
653                 int pgrp = process_group(p);
654
655                 if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) {
656                         __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp);
657                         __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp);
658                 }
659         }
660 }
661
662 /*
663  * When we die, we re-parent all our children.
664  * Try to give them to another thread in our thread
665  * group, and if no such member exists, give it to
666  * the global child reaper process (ie "init")
667  */
668 static void
669 forget_original_parent(struct task_struct *father, struct list_head *to_release)
670 {
671         struct task_struct *p, *reaper = father;
672         struct list_head *_p, *_n;
673
674         do {
675                 reaper = next_thread(reaper);
676                 if (reaper == father) {
677                         reaper = child_reaper;
678                         break;
679                 }
680         } while (reaper->exit_state);
681
682         /*
683          * There are only two places where our children can be:
684          *
685          * - in our child list
686          * - in our ptraced child list
687          *
688          * Search them and reparent children.
689          */
690         list_for_each_safe(_p, _n, &father->children) {
691                 int ptrace;
692                 p = list_entry(_p, struct task_struct, sibling);
693
694                 ptrace = p->ptrace;
695
696                 /* if father isn't the real parent, then ptrace must be enabled */
697                 BUG_ON(father != p->real_parent && !ptrace);
698
699                 if (father == p->real_parent) {
700                         /* reparent with a reaper, real father it's us */
701                         choose_new_parent(p, reaper);
702                         reparent_thread(p, father, 0);
703                 } else {
704                         /* reparent ptraced task to its real parent */
705                         __ptrace_unlink (p);
706                         if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
707                             thread_group_empty(p))
708                                 do_notify_parent(p, p->exit_signal);
709                 }
710
711                 /*
712                  * if the ptraced child is a zombie with exit_signal == -1
713                  * we must collect it before we exit, or it will remain
714                  * zombie forever since we prevented it from self-reap itself
715                  * while it was being traced by us, to be able to see it in wait4.
716                  */
717                 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
718                         list_add(&p->ptrace_list, to_release);
719         }
720         list_for_each_safe(_p, _n, &father->ptrace_children) {
721                 p = list_entry(_p, struct task_struct, ptrace_list);
722                 choose_new_parent(p, reaper);
723                 reparent_thread(p, father, 1);
724         }
725 }
726
727 /*
728  * Send signals to all our closest relatives so that they know
729  * to properly mourn us..
730  */
731 static void exit_notify(struct task_struct *tsk)
732 {
733         int state;
734         struct task_struct *t;
735         struct list_head ptrace_dead, *_p, *_n;
736
737         if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
738             && !thread_group_empty(tsk)) {
739                 /*
740                  * This occurs when there was a race between our exit
741                  * syscall and a group signal choosing us as the one to
742                  * wake up.  It could be that we are the only thread
743                  * alerted to check for pending signals, but another thread
744                  * should be woken now to take the signal since we will not.
745                  * Now we'll wake all the threads in the group just to make
746                  * sure someone gets all the pending signals.
747                  */
748                 read_lock(&tasklist_lock);
749                 spin_lock_irq(&tsk->sighand->siglock);
750                 for (t = next_thread(tsk); t != tsk; t = next_thread(t))
751                         if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
752                                 recalc_sigpending_tsk(t);
753                                 if (signal_pending(t))
754                                         signal_wake_up(t, 0);
755                         }
756                 spin_unlock_irq(&tsk->sighand->siglock);
757                 read_unlock(&tasklist_lock);
758         }
759
760         write_lock_irq(&tasklist_lock);
761
762         /*
763          * This does two things:
764          *
765          * A.  Make init inherit all the child processes
766          * B.  Check to see if any process groups have become orphaned
767          *      as a result of our exiting, and if they have any stopped
768          *      jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
769          */
770
771         INIT_LIST_HEAD(&ptrace_dead);
772         forget_original_parent(tsk, &ptrace_dead);
773         BUG_ON(!list_empty(&tsk->children));
774         BUG_ON(!list_empty(&tsk->ptrace_children));
775
776         /*
777          * Check to see if any process groups have become orphaned
778          * as a result of our exiting, and if they have any stopped
779          * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
780          *
781          * Case i: Our father is in a different pgrp than we are
782          * and we were the only connection outside, so our pgrp
783          * is about to become orphaned.
784          */
785          
786         t = tsk->real_parent;
787         
788         if ((process_group(t) != process_group(tsk)) &&
789             (t->signal->session == tsk->signal->session) &&
790             will_become_orphaned_pgrp(process_group(tsk), tsk) &&
791             has_stopped_jobs(process_group(tsk))) {
792                 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk));
793                 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk));
794         }
795
796         /* Let father know we died 
797          *
798          * Thread signals are configurable, but you aren't going to use
799          * that to send signals to arbitary processes. 
800          * That stops right now.
801          *
802          * If the parent exec id doesn't match the exec id we saved
803          * when we started then we know the parent has changed security
804          * domain.
805          *
806          * If our self_exec id doesn't match our parent_exec_id then
807          * we have changed execution domain as these two values started
808          * the same after a fork.
809          *      
810          */
811         
812         if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
813             ( tsk->parent_exec_id != t->self_exec_id  ||
814               tsk->self_exec_id != tsk->parent_exec_id)
815             && !capable(CAP_KILL))
816                 tsk->exit_signal = SIGCHLD;
817
818
819         /* If something other than our normal parent is ptracing us, then
820          * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
821          * only has special meaning to our real parent.
822          */
823         if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
824                 int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
825                 do_notify_parent(tsk, signal);
826         } else if (tsk->ptrace) {
827                 do_notify_parent(tsk, SIGCHLD);
828         }
829
830         state = EXIT_ZOMBIE;
831         if (tsk->exit_signal == -1 &&
832             (likely(tsk->ptrace == 0) ||
833              unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
834                 state = EXIT_DEAD;
835         tsk->exit_state = state;
836
837         write_unlock_irq(&tasklist_lock);
838
839         list_for_each_safe(_p, _n, &ptrace_dead) {
840                 list_del_init(_p);
841                 t = list_entry(_p, struct task_struct, ptrace_list);
842                 release_task(t);
843         }
844
845         /* If the process is dead, release it - nobody will wait for it */
846         if (state == EXIT_DEAD)
847                 release_task(tsk);
848 }
849
850 fastcall NORET_TYPE void do_exit(long code)
851 {
852         struct task_struct *tsk = current;
853         int group_dead;
854
855         profile_task_exit(tsk);
856
857         WARN_ON(atomic_read(&tsk->fs_excl));
858
859         if (unlikely(in_interrupt()))
860                 panic("Aiee, killing interrupt handler!");
861         if (unlikely(!tsk->pid))
862                 panic("Attempted to kill the idle task!");
863         if (unlikely(tsk == child_reaper))
864                 panic("Attempted to kill init!");
865
866         if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
867                 current->ptrace_message = code;
868                 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
869         }
870
871         /*
872          * We're taking recursive faults here in do_exit. Safest is to just
873          * leave this task alone and wait for reboot.
874          */
875         if (unlikely(tsk->flags & PF_EXITING)) {
876                 printk(KERN_ALERT
877                         "Fixing recursive fault but reboot is needed!\n");
878                 if (tsk->io_context)
879                         exit_io_context();
880                 set_current_state(TASK_UNINTERRUPTIBLE);
881                 schedule();
882         }
883
884         tsk->flags |= PF_EXITING;
885
886         if (unlikely(in_atomic()))
887                 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
888                                 current->comm, current->pid,
889                                 preempt_count());
890
891         acct_update_integrals(tsk);
892         if (tsk->mm) {
893                 update_hiwater_rss(tsk->mm);
894                 update_hiwater_vm(tsk->mm);
895         }
896         group_dead = atomic_dec_and_test(&tsk->signal->live);
897         if (group_dead) {
898                 hrtimer_cancel(&tsk->signal->real_timer);
899                 exit_itimers(tsk->signal);
900         }
901         acct_collect(code, group_dead);
902         if (unlikely(tsk->robust_list))
903                 exit_robust_list(tsk);
904 #if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
905         if (unlikely(tsk->compat_robust_list))
906                 compat_exit_robust_list(tsk);
907 #endif
908         if (unlikely(tsk->audit_context))
909                 audit_free(tsk);
910
911         taskstats_exit(tsk, group_dead);
912
913         exit_mm(tsk);
914
915         if (group_dead)
916                 acct_process();
917         exit_sem(tsk);
918         __exit_files(tsk);
919         __exit_fs(tsk);
920         exit_thread();
921         cpuset_exit(tsk);
922         exit_keys(tsk);
923
924         if (group_dead && tsk->signal->leader)
925                 disassociate_ctty(1);
926
927         module_put(task_thread_info(tsk)->exec_domain->module);
928         if (tsk->binfmt)
929                 module_put(tsk->binfmt->module);
930
931         tsk->exit_code = code;
932         proc_exit_connector(tsk);
933         exit_notify(tsk);
934         exit_task_namespaces(tsk);
935 #ifdef CONFIG_NUMA
936         mpol_free(tsk->mempolicy);
937         tsk->mempolicy = NULL;
938 #endif
939         /*
940          * This must happen late, after the PID is not
941          * hashed anymore:
942          */
943         if (unlikely(!list_empty(&tsk->pi_state_list)))
944                 exit_pi_state_list(tsk);
945         if (unlikely(current->pi_state_cache))
946                 kfree(current->pi_state_cache);
947         /*
948          * Make sure we are holding no locks:
949          */
950         debug_check_no_locks_held(tsk);
951
952         if (tsk->io_context)
953                 exit_io_context();
954
955         if (tsk->splice_pipe)
956                 __free_pipe_info(tsk->splice_pipe);
957
958         preempt_disable();
959         /* causes final put_task_struct in finish_task_switch(). */
960         tsk->state = TASK_DEAD;
961
962         schedule();
963         BUG();
964         /* Avoid "noreturn function does return".  */
965         for (;;)
966                 cpu_relax();    /* For when BUG is null */
967 }
968
969 EXPORT_SYMBOL_GPL(do_exit);
970
971 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
972 {
973         if (comp)
974                 complete(comp);
975
976         do_exit(code);
977 }
978
979 EXPORT_SYMBOL(complete_and_exit);
980
981 asmlinkage long sys_exit(int error_code)
982 {
983         do_exit((error_code&0xff)<<8);
984 }
985
986 /*
987  * Take down every thread in the group.  This is called by fatal signals
988  * as well as by sys_exit_group (below).
989  */
990 NORET_TYPE void
991 do_group_exit(int exit_code)
992 {
993         BUG_ON(exit_code & 0x80); /* core dumps don't get here */
994
995         if (current->signal->flags & SIGNAL_GROUP_EXIT)
996                 exit_code = current->signal->group_exit_code;
997         else if (!thread_group_empty(current)) {
998                 struct signal_struct *const sig = current->signal;
999                 struct sighand_struct *const sighand = current->sighand;
1000                 spin_lock_irq(&sighand->siglock);
1001                 if (sig->flags & SIGNAL_GROUP_EXIT)
1002                         /* Another thread got here before we took the lock.  */
1003                         exit_code = sig->group_exit_code;
1004                 else {
1005                         sig->group_exit_code = exit_code;
1006                         zap_other_threads(current);
1007                 }
1008                 spin_unlock_irq(&sighand->siglock);
1009         }
1010
1011         do_exit(exit_code);
1012         /* NOTREACHED */
1013 }
1014
1015 /*
1016  * this kills every thread in the thread group. Note that any externally
1017  * wait4()-ing process will get the correct exit code - even if this
1018  * thread is not the thread group leader.
1019  */
1020 asmlinkage void sys_exit_group(int error_code)
1021 {
1022         do_group_exit((error_code & 0xff) << 8);
1023 }
1024
1025 static int eligible_child(pid_t pid, int options, struct task_struct *p)
1026 {
1027         if (pid > 0) {
1028                 if (p->pid != pid)
1029                         return 0;
1030         } else if (!pid) {
1031                 if (process_group(p) != process_group(current))
1032                         return 0;
1033         } else if (pid != -1) {
1034                 if (process_group(p) != -pid)
1035                         return 0;
1036         }
1037
1038         /*
1039          * Do not consider detached threads that are
1040          * not ptraced:
1041          */
1042         if (p->exit_signal == -1 && !p->ptrace)
1043                 return 0;
1044
1045         /* Wait for all children (clone and not) if __WALL is set;
1046          * otherwise, wait for clone children *only* if __WCLONE is
1047          * set; otherwise, wait for non-clone children *only*.  (Note:
1048          * A "clone" child here is one that reports to its parent
1049          * using a signal other than SIGCHLD.) */
1050         if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1051             && !(options & __WALL))
1052                 return 0;
1053         /*
1054          * Do not consider thread group leaders that are
1055          * in a non-empty thread group:
1056          */
1057         if (delay_group_leader(p))
1058                 return 2;
1059
1060         if (security_task_wait(p))
1061                 return 0;
1062
1063         return 1;
1064 }
1065
1066 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1067                                int why, int status,
1068                                struct siginfo __user *infop,
1069                                struct rusage __user *rusagep)
1070 {
1071         int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1072
1073         put_task_struct(p);
1074         if (!retval)
1075                 retval = put_user(SIGCHLD, &infop->si_signo);
1076         if (!retval)
1077                 retval = put_user(0, &infop->si_errno);
1078         if (!retval)
1079                 retval = put_user((short)why, &infop->si_code);
1080         if (!retval)
1081                 retval = put_user(pid, &infop->si_pid);
1082         if (!retval)
1083                 retval = put_user(uid, &infop->si_uid);
1084         if (!retval)
1085                 retval = put_user(status, &infop->si_status);
1086         if (!retval)
1087                 retval = pid;
1088         return retval;
1089 }
1090
1091 /*
1092  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1093  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1094  * the lock and this task is uninteresting.  If we return nonzero, we have
1095  * released the lock and the system call should return.
1096  */
1097 static int wait_task_zombie(struct task_struct *p, int noreap,
1098                             struct siginfo __user *infop,
1099                             int __user *stat_addr, struct rusage __user *ru)
1100 {
1101         unsigned long state;
1102         int retval;
1103         int status;
1104
1105         if (unlikely(noreap)) {
1106                 pid_t pid = p->pid;
1107                 uid_t uid = p->uid;
1108                 int exit_code = p->exit_code;
1109                 int why, status;
1110
1111                 if (unlikely(p->exit_state != EXIT_ZOMBIE))
1112                         return 0;
1113                 if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1114                         return 0;
1115                 get_task_struct(p);
1116                 read_unlock(&tasklist_lock);
1117                 if ((exit_code & 0x7f) == 0) {
1118                         why = CLD_EXITED;
1119                         status = exit_code >> 8;
1120                 } else {
1121                         why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1122                         status = exit_code & 0x7f;
1123                 }
1124                 return wait_noreap_copyout(p, pid, uid, why,
1125                                            status, infop, ru);
1126         }
1127
1128         /*
1129          * Try to move the task's state to DEAD
1130          * only one thread is allowed to do this:
1131          */
1132         state = xchg(&p->exit_state, EXIT_DEAD);
1133         if (state != EXIT_ZOMBIE) {
1134                 BUG_ON(state != EXIT_DEAD);
1135                 return 0;
1136         }
1137         if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
1138                 /*
1139                  * This can only happen in a race with a ptraced thread
1140                  * dying on another processor.
1141                  */
1142                 return 0;
1143         }
1144
1145         if (likely(p->real_parent == p->parent) && likely(p->signal)) {
1146                 struct signal_struct *psig;
1147                 struct signal_struct *sig;
1148
1149                 /*
1150                  * The resource counters for the group leader are in its
1151                  * own task_struct.  Those for dead threads in the group
1152                  * are in its signal_struct, as are those for the child
1153                  * processes it has previously reaped.  All these
1154                  * accumulate in the parent's signal_struct c* fields.
1155                  *
1156                  * We don't bother to take a lock here to protect these
1157                  * p->signal fields, because they are only touched by
1158                  * __exit_signal, which runs with tasklist_lock
1159                  * write-locked anyway, and so is excluded here.  We do
1160                  * need to protect the access to p->parent->signal fields,
1161                  * as other threads in the parent group can be right
1162                  * here reaping other children at the same time.
1163                  */
1164                 spin_lock_irq(&p->parent->sighand->siglock);
1165                 psig = p->parent->signal;
1166                 sig = p->signal;
1167                 psig->cutime =
1168                         cputime_add(psig->cutime,
1169                         cputime_add(p->utime,
1170                         cputime_add(sig->utime,
1171                                     sig->cutime)));
1172                 psig->cstime =
1173                         cputime_add(psig->cstime,
1174                         cputime_add(p->stime,
1175                         cputime_add(sig->stime,
1176                                     sig->cstime)));
1177                 psig->cmin_flt +=
1178                         p->min_flt + sig->min_flt + sig->cmin_flt;
1179                 psig->cmaj_flt +=
1180                         p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1181                 psig->cnvcsw +=
1182                         p->nvcsw + sig->nvcsw + sig->cnvcsw;
1183                 psig->cnivcsw +=
1184                         p->nivcsw + sig->nivcsw + sig->cnivcsw;
1185                 spin_unlock_irq(&p->parent->sighand->siglock);
1186         }
1187
1188         /*
1189          * Now we are sure this task is interesting, and no other
1190          * thread can reap it because we set its state to EXIT_DEAD.
1191          */
1192         read_unlock(&tasklist_lock);
1193
1194         retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1195         status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1196                 ? p->signal->group_exit_code : p->exit_code;
1197         if (!retval && stat_addr)
1198                 retval = put_user(status, stat_addr);
1199         if (!retval && infop)
1200                 retval = put_user(SIGCHLD, &infop->si_signo);
1201         if (!retval && infop)
1202                 retval = put_user(0, &infop->si_errno);
1203         if (!retval && infop) {
1204                 int why;
1205
1206                 if ((status & 0x7f) == 0) {
1207                         why = CLD_EXITED;
1208                         status >>= 8;
1209                 } else {
1210                         why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1211                         status &= 0x7f;
1212                 }
1213                 retval = put_user((short)why, &infop->si_code);
1214                 if (!retval)
1215                         retval = put_user(status, &infop->si_status);
1216         }
1217         if (!retval && infop)
1218                 retval = put_user(p->pid, &infop->si_pid);
1219         if (!retval && infop)
1220                 retval = put_user(p->uid, &infop->si_uid);
1221         if (retval) {
1222                 // TODO: is this safe?
1223                 p->exit_state = EXIT_ZOMBIE;
1224                 return retval;
1225         }
1226         retval = p->pid;
1227         if (p->real_parent != p->parent) {
1228                 write_lock_irq(&tasklist_lock);
1229                 /* Double-check with lock held.  */
1230                 if (p->real_parent != p->parent) {
1231                         __ptrace_unlink(p);
1232                         // TODO: is this safe?
1233                         p->exit_state = EXIT_ZOMBIE;
1234                         /*
1235                          * If this is not a detached task, notify the parent.
1236                          * If it's still not detached after that, don't release
1237                          * it now.
1238                          */
1239                         if (p->exit_signal != -1) {
1240                                 do_notify_parent(p, p->exit_signal);
1241                                 if (p->exit_signal != -1)
1242                                         p = NULL;
1243                         }
1244                 }
1245                 write_unlock_irq(&tasklist_lock);
1246         }
1247         if (p != NULL)
1248                 release_task(p);
1249         BUG_ON(!retval);
1250         return retval;
1251 }
1252
1253 /*
1254  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1255  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1256  * the lock and this task is uninteresting.  If we return nonzero, we have
1257  * released the lock and the system call should return.
1258  */
1259 static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1260                              int noreap, struct siginfo __user *infop,
1261                              int __user *stat_addr, struct rusage __user *ru)
1262 {
1263         int retval, exit_code;
1264
1265         if (!p->exit_code)
1266                 return 0;
1267         if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1268             p->signal && p->signal->group_stop_count > 0)
1269                 /*
1270                  * A group stop is in progress and this is the group leader.
1271                  * We won't report until all threads have stopped.
1272                  */
1273                 return 0;
1274
1275         /*
1276          * Now we are pretty sure this task is interesting.
1277          * Make sure it doesn't get reaped out from under us while we
1278          * give up the lock and then examine it below.  We don't want to
1279          * keep holding onto the tasklist_lock while we call getrusage and
1280          * possibly take page faults for user memory.
1281          */
1282         get_task_struct(p);
1283         read_unlock(&tasklist_lock);
1284
1285         if (unlikely(noreap)) {
1286                 pid_t pid = p->pid;
1287                 uid_t uid = p->uid;
1288                 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1289
1290                 exit_code = p->exit_code;
1291                 if (unlikely(!exit_code) ||
1292                     unlikely(p->state & TASK_TRACED))
1293                         goto bail_ref;
1294                 return wait_noreap_copyout(p, pid, uid,
1295                                            why, (exit_code << 8) | 0x7f,
1296                                            infop, ru);
1297         }
1298
1299         write_lock_irq(&tasklist_lock);
1300
1301         /*
1302          * This uses xchg to be atomic with the thread resuming and setting
1303          * it.  It must also be done with the write lock held to prevent a
1304          * race with the EXIT_ZOMBIE case.
1305          */
1306         exit_code = xchg(&p->exit_code, 0);
1307         if (unlikely(p->exit_state)) {
1308                 /*
1309                  * The task resumed and then died.  Let the next iteration
1310                  * catch it in EXIT_ZOMBIE.  Note that exit_code might
1311                  * already be zero here if it resumed and did _exit(0).
1312                  * The task itself is dead and won't touch exit_code again;
1313                  * other processors in this function are locked out.
1314                  */
1315                 p->exit_code = exit_code;
1316                 exit_code = 0;
1317         }
1318         if (unlikely(exit_code == 0)) {
1319                 /*
1320                  * Another thread in this function got to it first, or it
1321                  * resumed, or it resumed and then died.
1322                  */
1323                 write_unlock_irq(&tasklist_lock);
1324 bail_ref:
1325                 put_task_struct(p);
1326                 /*
1327                  * We are returning to the wait loop without having successfully
1328                  * removed the process and having released the lock. We cannot
1329                  * continue, since the "p" task pointer is potentially stale.
1330                  *
1331                  * Return -EAGAIN, and do_wait() will restart the loop from the
1332                  * beginning. Do _not_ re-acquire the lock.
1333                  */
1334                 return -EAGAIN;
1335         }
1336
1337         /* move to end of parent's list to avoid starvation */
1338         remove_parent(p);
1339         add_parent(p);
1340
1341         write_unlock_irq(&tasklist_lock);
1342
1343         retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1344         if (!retval && stat_addr)
1345                 retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1346         if (!retval && infop)
1347                 retval = put_user(SIGCHLD, &infop->si_signo);
1348         if (!retval && infop)
1349                 retval = put_user(0, &infop->si_errno);
1350         if (!retval && infop)
1351                 retval = put_user((short)((p->ptrace & PT_PTRACED)
1352                                           ? CLD_TRAPPED : CLD_STOPPED),
1353                                   &infop->si_code);
1354         if (!retval && infop)
1355                 retval = put_user(exit_code, &infop->si_status);
1356         if (!retval && infop)
1357                 retval = put_user(p->pid, &infop->si_pid);
1358         if (!retval && infop)
1359                 retval = put_user(p->uid, &infop->si_uid);
1360         if (!retval)
1361                 retval = p->pid;
1362         put_task_struct(p);
1363
1364         BUG_ON(!retval);
1365         return retval;
1366 }
1367
1368 /*
1369  * Handle do_wait work for one task in a live, non-stopped state.
1370  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1371  * the lock and this task is uninteresting.  If we return nonzero, we have
1372  * released the lock and the system call should return.
1373  */
1374 static int wait_task_continued(struct task_struct *p, int noreap,
1375                                struct siginfo __user *infop,
1376                                int __user *stat_addr, struct rusage __user *ru)
1377 {
1378         int retval;
1379         pid_t pid;
1380         uid_t uid;
1381
1382         if (unlikely(!p->signal))
1383                 return 0;
1384
1385         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1386                 return 0;
1387
1388         spin_lock_irq(&p->sighand->siglock);
1389         /* Re-check with the lock held.  */
1390         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1391                 spin_unlock_irq(&p->sighand->siglock);
1392                 return 0;
1393         }
1394         if (!noreap)
1395                 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1396         spin_unlock_irq(&p->sighand->siglock);
1397
1398         pid = p->pid;
1399         uid = p->uid;
1400         get_task_struct(p);
1401         read_unlock(&tasklist_lock);
1402
1403         if (!infop) {
1404                 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1405                 put_task_struct(p);
1406                 if (!retval && stat_addr)
1407                         retval = put_user(0xffff, stat_addr);
1408                 if (!retval)
1409                         retval = p->pid;
1410         } else {
1411                 retval = wait_noreap_copyout(p, pid, uid,
1412                                              CLD_CONTINUED, SIGCONT,
1413                                              infop, ru);
1414                 BUG_ON(retval == 0);
1415         }
1416
1417         return retval;
1418 }
1419
1420
1421 static inline int my_ptrace_child(struct task_struct *p)
1422 {
1423         if (!(p->ptrace & PT_PTRACED))
1424                 return 0;
1425         if (!(p->ptrace & PT_ATTACHED))
1426                 return 1;
1427         /*
1428          * This child was PTRACE_ATTACH'd.  We should be seeing it only if
1429          * we are the attacher.  If we are the real parent, this is a race
1430          * inside ptrace_attach.  It is waiting for the tasklist_lock,
1431          * which we have to switch the parent links, but has already set
1432          * the flags in p->ptrace.
1433          */
1434         return (p->parent != p->real_parent);
1435 }
1436
1437 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1438                     int __user *stat_addr, struct rusage __user *ru)
1439 {
1440         DECLARE_WAITQUEUE(wait, current);
1441         struct task_struct *tsk;
1442         int flag, retval;
1443
1444         add_wait_queue(&current->signal->wait_chldexit,&wait);
1445 repeat:
1446         /*
1447          * We will set this flag if we see any child that might later
1448          * match our criteria, even if we are not able to reap it yet.
1449          */
1450         flag = 0;
1451         current->state = TASK_INTERRUPTIBLE;
1452         read_lock(&tasklist_lock);
1453         tsk = current;
1454         do {
1455                 struct task_struct *p;
1456                 struct list_head *_p;
1457                 int ret;
1458
1459                 list_for_each(_p,&tsk->children) {
1460                         p = list_entry(_p, struct task_struct, sibling);
1461
1462                         ret = eligible_child(pid, options, p);
1463                         if (!ret)
1464                                 continue;
1465
1466                         switch (p->state) {
1467                         case TASK_TRACED:
1468                                 /*
1469                                  * When we hit the race with PTRACE_ATTACH,
1470                                  * we will not report this child.  But the
1471                                  * race means it has not yet been moved to
1472                                  * our ptrace_children list, so we need to
1473                                  * set the flag here to avoid a spurious ECHILD
1474                                  * when the race happens with the only child.
1475                                  */
1476                                 flag = 1;
1477                                 if (!my_ptrace_child(p))
1478                                         continue;
1479                                 /*FALLTHROUGH*/
1480                         case TASK_STOPPED:
1481                                 /*
1482                                  * It's stopped now, so it might later
1483                                  * continue, exit, or stop again.
1484                                  */
1485                                 flag = 1;
1486                                 if (!(options & WUNTRACED) &&
1487                                     !my_ptrace_child(p))
1488                                         continue;
1489                                 retval = wait_task_stopped(p, ret == 2,
1490                                                            (options & WNOWAIT),
1491                                                            infop,
1492                                                            stat_addr, ru);
1493                                 if (retval == -EAGAIN)
1494                                         goto repeat;
1495                                 if (retval != 0) /* He released the lock.  */
1496                                         goto end;
1497                                 break;
1498                         default:
1499                         // case EXIT_DEAD:
1500                                 if (p->exit_state == EXIT_DEAD)
1501                                         continue;
1502                         // case EXIT_ZOMBIE:
1503                                 if (p->exit_state == EXIT_ZOMBIE) {
1504                                         /*
1505                                          * Eligible but we cannot release
1506                                          * it yet:
1507                                          */
1508                                         if (ret == 2)
1509                                                 goto check_continued;
1510                                         if (!likely(options & WEXITED))
1511                                                 continue;
1512                                         retval = wait_task_zombie(
1513                                                 p, (options & WNOWAIT),
1514                                                 infop, stat_addr, ru);
1515                                         /* He released the lock.  */
1516                                         if (retval != 0)
1517                                                 goto end;
1518                                         break;
1519                                 }
1520 check_continued:
1521                                 /*
1522                                  * It's running now, so it might later
1523                                  * exit, stop, or stop and then continue.
1524                                  */
1525                                 flag = 1;
1526                                 if (!unlikely(options & WCONTINUED))
1527                                         continue;
1528                                 retval = wait_task_continued(
1529                                         p, (options & WNOWAIT),
1530                                         infop, stat_addr, ru);
1531                                 if (retval != 0) /* He released the lock.  */
1532                                         goto end;
1533                                 break;
1534                         }
1535                 }
1536                 if (!flag) {
1537                         list_for_each(_p, &tsk->ptrace_children) {
1538                                 p = list_entry(_p, struct task_struct,
1539                                                 ptrace_list);
1540                                 if (!eligible_child(pid, options, p))
1541                                         continue;
1542                                 flag = 1;
1543                                 break;
1544                         }
1545                 }
1546                 if (options & __WNOTHREAD)
1547                         break;
1548                 tsk = next_thread(tsk);
1549                 BUG_ON(tsk->signal != current->signal);
1550         } while (tsk != current);
1551
1552         read_unlock(&tasklist_lock);
1553         if (flag) {
1554                 retval = 0;
1555                 if (options & WNOHANG)
1556                         goto end;
1557                 retval = -ERESTARTSYS;
1558                 if (signal_pending(current))
1559                         goto end;
1560                 schedule();
1561                 goto repeat;
1562         }
1563         retval = -ECHILD;
1564 end:
1565         current->state = TASK_RUNNING;
1566         remove_wait_queue(&current->signal->wait_chldexit,&wait);
1567         if (infop) {
1568                 if (retval > 0)
1569                 retval = 0;
1570                 else {
1571                         /*
1572                          * For a WNOHANG return, clear out all the fields
1573                          * we would set so the user can easily tell the
1574                          * difference.
1575                          */
1576                         if (!retval)
1577                                 retval = put_user(0, &infop->si_signo);
1578                         if (!retval)
1579                                 retval = put_user(0, &infop->si_errno);
1580                         if (!retval)
1581                                 retval = put_user(0, &infop->si_code);
1582                         if (!retval)
1583                                 retval = put_user(0, &infop->si_pid);
1584                         if (!retval)
1585                                 retval = put_user(0, &infop->si_uid);
1586                         if (!retval)
1587                                 retval = put_user(0, &infop->si_status);
1588                 }
1589         }
1590         return retval;
1591 }
1592
1593 asmlinkage long sys_waitid(int which, pid_t pid,
1594                            struct siginfo __user *infop, int options,
1595                            struct rusage __user *ru)
1596 {
1597         long ret;
1598
1599         if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1600                 return -EINVAL;
1601         if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1602                 return -EINVAL;
1603
1604         switch (which) {
1605         case P_ALL:
1606                 pid = -1;
1607                 break;
1608         case P_PID:
1609                 if (pid <= 0)
1610                         return -EINVAL;
1611                 break;
1612         case P_PGID:
1613                 if (pid <= 0)
1614                         return -EINVAL;
1615                 pid = -pid;
1616                 break;
1617         default:
1618                 return -EINVAL;
1619         }
1620
1621         ret = do_wait(pid, options, infop, NULL, ru);
1622
1623         /* avoid REGPARM breakage on x86: */
1624         prevent_tail_call(ret);
1625         return ret;
1626 }
1627
1628 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1629                           int options, struct rusage __user *ru)
1630 {
1631         long ret;
1632
1633         if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1634                         __WNOTHREAD|__WCLONE|__WALL))
1635                 return -EINVAL;
1636         ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1637
1638         /* avoid REGPARM breakage on x86: */
1639         prevent_tail_call(ret);
1640         return ret;
1641 }
1642
1643 #ifdef __ARCH_WANT_SYS_WAITPID
1644
1645 /*
1646  * sys_waitpid() remains for compatibility. waitpid() should be
1647  * implemented by calling sys_wait4() from libc.a.
1648  */
1649 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1650 {
1651         return sys_wait4(pid, stat_addr, options, NULL);
1652 }
1653
1654 #endif