Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[safe/jmp/linux-2.6] / kernel / exit.c
index a7799d8..f80dec3 100644 (file)
 #include <linux/cn_proc.h>
 #include <linux/mutex.h>
 #include <linux/futex.h>
-#include <linux/compat.h>
 #include <linux/pipe_fs_i.h>
 #include <linux/audit.h> /* for audit_free() */
 #include <linux/resource.h>
 #include <linux/blkdev.h>
 #include <linux/task_io_accounting_ops.h>
+#include <linux/tracehook.h>
+#include <linux/init_task.h>
+#include <trace/sched.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
+#include "cred-internals.h"
+
+DEFINE_TRACE(sched_process_free);
+DEFINE_TRACE(sched_process_exit);
+DEFINE_TRACE(sched_process_wait);
 
 static void exit_mm(struct task_struct * tsk);
 
@@ -111,16 +118,14 @@ static void __exit_signal(struct task_struct *tsk)
                 * We won't ever get here for the group leader, since it
                 * will have been the last reference on the signal_struct.
                 */
-               sig->utime = cputime_add(sig->utime, tsk->utime);
-               sig->stime = cputime_add(sig->stime, tsk->stime);
-               sig->gtime = cputime_add(sig->gtime, tsk->gtime);
+               sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
                sig->min_flt += tsk->min_flt;
                sig->maj_flt += tsk->maj_flt;
                sig->nvcsw += tsk->nvcsw;
                sig->nivcsw += tsk->nivcsw;
                sig->inblock += task_io_get_inblock(tsk);
                sig->oublock += task_io_get_oublock(tsk);
-               sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
+               task_io_accounting_add(&sig->ioac, &tsk->ioac);
                sig = NULL; /* Marker for below. */
        }
 
@@ -141,36 +146,37 @@ static void __exit_signal(struct task_struct *tsk)
        if (sig) {
                flush_sigqueue(&sig->shared_pending);
                taskstats_tgid_free(sig);
+               /*
+                * Make sure ->signal can't go away under rq->lock,
+                * see account_group_exec_runtime().
+                */
+               task_rq_unlock_wait(tsk);
                __cleanup_signal(sig);
        }
 }
 
 static void delayed_put_task_struct(struct rcu_head *rhp)
 {
-       put_task_struct(container_of(rhp, struct task_struct, rcu));
-}
+       struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
 
-/*
- * Do final ptrace-related cleanup of a zombie being reaped.
- *
- * Called with write_lock(&tasklist_lock) held.
- */
-static void ptrace_release_task(struct task_struct *p)
-{
-       BUG_ON(!list_empty(&p->ptraced));
-       ptrace_unlink(p);
-       BUG_ON(!list_empty(&p->ptrace_entry));
+       trace_sched_process_free(tsk);
+       put_task_struct(tsk);
 }
 
+
 void release_task(struct task_struct * p)
 {
        struct task_struct *leader;
        int zap_leader;
 repeat:
-       atomic_dec(&p->user->processes);
+       tracehook_prepare_release_task(p);
+       /* don't need to get the RCU readlock here - the process is dead and
+        * can't be modifying its own credentials */
+       atomic_dec(&__task_cred(p)->user->processes);
+
        proc_flush_task(p);
        write_lock_irq(&tasklist_lock);
-       ptrace_release_task(p);
+       tracehook_finish_release_task(p);
        __exit_signal(p);
 
        /*
@@ -192,6 +198,13 @@ repeat:
                 * that case.
                 */
                zap_leader = task_detached(leader);
+
+               /*
+                * This maintains the invariant that release_task()
+                * only runs on a task in EXIT_DEAD, just for sanity.
+                */
+               if (zap_leader)
+                       leader->exit_state = EXIT_DEAD;
        }
 
        write_unlock_irq(&tasklist_lock);
@@ -335,12 +348,12 @@ static void reparent_to_kthreadd(void)
        /* cpus_allowed? */
        /* rt_priority? */
        /* signals? */
-       security_task_reparent_to_init(current);
        memcpy(current->signal->rlim, init_task.signal->rlim,
               sizeof(current->signal->rlim));
-       atomic_inc(&(INIT_USER->__count));
+
+       atomic_inc(&init_cred.usage);
+       commit_creds(&init_cred);
        write_unlock_irq(&tasklist_lock);
-       switch_uid(INIT_USER);
 }
 
 void __set_special_pids(struct pid *pid)
@@ -430,7 +443,7 @@ void daemonize(const char *name, ...)
         * We don't want to have TIF_FREEZE set if the system-wide hibernation
         * or suspend transition begins right now.
         */
-       current->flags |= PF_NOFREEZE;
+       current->flags |= (PF_NOFREEZE | PF_KTHREAD);
 
        if (current->nsproxy != &init_nsproxy) {
                get_nsproxy(&init_nsproxy);
@@ -555,8 +568,6 @@ void put_fs_struct(struct fs_struct *fs)
        if (atomic_dec_and_test(&fs->count)) {
                path_put(&fs->root);
                path_put(&fs->pwd);
-               if (fs->altroot.dentry)
-                       path_put(&fs->altroot);
                kmem_cache_free(fs_cachep, fs);
        }
 }
@@ -586,8 +597,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
         * If there are other users of the mm and the owner (us) is exiting
         * we need to find a new owner to take on the responsibility.
         */
-       if (!mm)
-               return 0;
        if (atomic_read(&mm->mm_users) <= 1)
                return 0;
        if (mm->owner != p)
@@ -630,6 +639,12 @@ retry:
        } while_each_thread(g, c);
 
        read_unlock(&tasklist_lock);
+       /*
+        * We found no owner yet mm_users > 1: this implies that we are
+        * most likely racing with swapoff (try_to_unuse()) or /proc or
+        * ptrace or page migration (get_task_mm()).  Mark owner as NULL.
+        */
+       mm->owner = NULL;
        return;
 
 assign_new_owner:
@@ -650,7 +665,6 @@ assign_new_owner:
                put_task_struct(c);
                goto retry;
        }
-       cgroup_mm_owner_callbacks(mm->owner, c);
        mm->owner = c;
        task_unlock(c);
        put_task_struct(c);
@@ -664,26 +678,40 @@ assign_new_owner:
 static void exit_mm(struct task_struct * tsk)
 {
        struct mm_struct *mm = tsk->mm;
+       struct core_state *core_state;
 
        mm_release(tsk, mm);
        if (!mm)
                return;
        /*
         * Serialize with any possible pending coredump.
-        * We must hold mmap_sem around checking core_waiters
+        * We must hold mmap_sem around checking core_state
         * and clearing tsk->mm.  The core-inducing thread
-        * will increment core_waiters for each thread in the
+        * will increment ->nr_threads for each thread in the
         * group with ->mm != NULL.
         */
        down_read(&mm->mmap_sem);
-       if (mm->core_waiters) {
+       core_state = mm->core_state;
+       if (core_state) {
+               struct core_thread self;
                up_read(&mm->mmap_sem);
-               down_write(&mm->mmap_sem);
-               if (!--mm->core_waiters)
-                       complete(mm->core_startup_done);
-               up_write(&mm->mmap_sem);
 
-               wait_for_completion(&mm->core_done);
+               self.task = tsk;
+               self.next = xchg(&core_state->dumper.next, &self);
+               /*
+                * Implies mb(), the result of xchg() must be visible
+                * to core_state->dumper.
+                */
+               if (atomic_dec_and_test(&core_state->nr_threads))
+                       complete(&core_state->startup);
+
+               for (;;) {
+                       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+                       if (!self.task) /* see coredump_finish() */
+                               break;
+                       schedule();
+               }
+               __set_task_state(tsk, TASK_RUNNING);
                down_read(&mm->mmap_sem);
        }
        atomic_inc(&mm->mm_count);
@@ -820,26 +848,50 @@ static void reparent_thread(struct task_struct *p, struct task_struct *father)
  * the child reaper process (ie "init") in our pid
  * space.
  */
+static struct task_struct *find_new_reaper(struct task_struct *father)
+{
+       struct pid_namespace *pid_ns = task_active_pid_ns(father);
+       struct task_struct *thread;
+
+       thread = father;
+       while_each_thread(father, thread) {
+               if (thread->flags & PF_EXITING)
+                       continue;
+               if (unlikely(pid_ns->child_reaper == father))
+                       pid_ns->child_reaper = thread;
+               return thread;
+       }
+
+       if (unlikely(pid_ns->child_reaper == father)) {
+               write_unlock_irq(&tasklist_lock);
+               if (unlikely(pid_ns == &init_pid_ns))
+                       panic("Attempted to kill init!");
+
+               zap_pid_ns_processes(pid_ns);
+               write_lock_irq(&tasklist_lock);
+               /*
+                * We can not clear ->child_reaper or leave it alone.
+                * There may by stealth EXIT_DEAD tasks on ->children,
+                * forget_original_parent() must move them somewhere.
+                */
+               pid_ns->child_reaper = init_pid_ns.child_reaper;
+       }
+
+       return pid_ns->child_reaper;
+}
+
 static void forget_original_parent(struct task_struct *father)
 {
-       struct task_struct *p, *n, *reaper = father;
+       struct task_struct *p, *n, *reaper;
        LIST_HEAD(ptrace_dead);
 
        write_lock_irq(&tasklist_lock);
-
+       reaper = find_new_reaper(father);
        /*
         * First clean up ptrace if we were using it.
         */
        ptrace_exit(father, &ptrace_dead);
 
-       do {
-               reaper = next_thread(reaper);
-               if (reaper == father) {
-                       reaper = task_child_reaper(father);
-                       break;
-               }
-       } while (reaper->flags & PF_EXITING);
-
        list_for_each_entry_safe(p, n, &father->children, sibling) {
                p->real_parent = reaper;
                if (p->parent == father) {
@@ -861,7 +913,8 @@ static void forget_original_parent(struct task_struct *father)
  */
 static void exit_notify(struct task_struct *tsk, int group_dead)
 {
-       int state;
+       int signal;
+       void *cookie;
 
        /*
         * This does two things:
@@ -898,33 +951,24 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
            !capable(CAP_KILL))
                tsk->exit_signal = SIGCHLD;
 
-       /* If something other than our normal parent is ptracing us, then
-        * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
-        * only has special meaning to our real parent.
-        */
-       if (!task_detached(tsk) && thread_group_empty(tsk)) {
-               int signal = ptrace_reparented(tsk) ?
-                               SIGCHLD : tsk->exit_signal;
-               do_notify_parent(tsk, signal);
-       } else if (tsk->ptrace) {
-               do_notify_parent(tsk, SIGCHLD);
-       }
+       signal = tracehook_notify_death(tsk, &cookie, group_dead);
+       if (signal >= 0)
+               signal = do_notify_parent(tsk, signal);
 
-       state = EXIT_ZOMBIE;
-       if (task_detached(tsk) && likely(!tsk->ptrace))
-               state = EXIT_DEAD;
-       tsk->exit_state = state;
+       tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
 
        /* mt-exec, de_thread() is waiting for us */
        if (thread_group_leader(tsk) &&
-           tsk->signal->notify_count < 0 &&
-           tsk->signal->group_exit_task)
+           tsk->signal->group_exit_task &&
+           tsk->signal->notify_count < 0)
                wake_up_process(tsk->signal->group_exit_task);
 
        write_unlock_irq(&tasklist_lock);
 
+       tracehook_report_death(tsk, signal, cookie, group_dead);
+
        /* If the process is dead, release it - nobody will wait for it */
-       if (state == EXIT_DEAD)
+       if (signal == DEATH_REAP)
                release_task(tsk);
 }
 
@@ -956,39 +1000,6 @@ static void check_stack_usage(void)
 static inline void check_stack_usage(void) {}
 #endif
 
-static inline void exit_child_reaper(struct task_struct *tsk)
-{
-       if (likely(tsk->group_leader != task_child_reaper(tsk)))
-               return;
-
-       if (tsk->nsproxy->pid_ns == &init_pid_ns)
-               panic("Attempted to kill init!");
-
-       /*
-        * @tsk is the last thread in the 'cgroup-init' and is exiting.
-        * Terminate all remaining processes in the namespace and reap them
-        * before exiting @tsk.
-        *
-        * Note that @tsk (last thread of cgroup-init) may not necessarily
-        * be the child-reaper (i.e main thread of cgroup-init) of the
-        * namespace i.e the child_reaper may have already exited.
-        *
-        * Even after a child_reaper exits, we let it inherit orphaned children,
-        * because, pid_ns->child_reaper remains valid as long as there is
-        * at least one living sub-thread in the cgroup init.
-
-        * This living sub-thread of the cgroup-init will be notified when
-        * a child inherited by the 'child-reaper' exits (do_notify_parent()
-        * uses __group_send_sig_info()). Further, when reaping child processes,
-        * do_wait() iterates over children of all living sub threads.
-
-        * i.e even though 'child_reaper' thread is listed as the parent of the
-        * orphaned children, any living sub-thread in the cgroup-init can
-        * perform the role of the child_reaper.
-        */
-       zap_pid_ns_processes(tsk->nsproxy->pid_ns);
-}
-
 NORET_TYPE void do_exit(long code)
 {
        struct task_struct *tsk = current;
@@ -1003,10 +1014,7 @@ NORET_TYPE void do_exit(long code)
        if (unlikely(!tsk->pid))
                panic("Attempted to kill the idle task!");
 
-       if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
-               current->ptrace_message = code;
-               ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
-       }
+       tracehook_report_exit(&code);
 
        /*
         * We're taking recursive faults here in do_exit. Safest is to just
@@ -1025,8 +1033,6 @@ NORET_TYPE void do_exit(long code)
                 * task into the wait for ever nirwana as well.
                 */
                tsk->flags |= PF_EXITPIDONE;
-               if (tsk->io_context)
-                       exit_io_context();
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule();
        }
@@ -1045,25 +1051,13 @@ NORET_TYPE void do_exit(long code)
                                preempt_count());
 
        acct_update_integrals(tsk);
-       if (tsk->mm) {
-               update_hiwater_rss(tsk->mm);
-               update_hiwater_vm(tsk->mm);
-       }
+
        group_dead = atomic_dec_and_test(&tsk->signal->live);
        if (group_dead) {
-               exit_child_reaper(tsk);
                hrtimer_cancel(&tsk->signal->real_timer);
                exit_itimers(tsk->signal);
        }
        acct_collect(code, group_dead);
-#ifdef CONFIG_FUTEX
-       if (unlikely(tsk->robust_list))
-               exit_robust_list(tsk);
-#ifdef CONFIG_COMPAT
-       if (unlikely(tsk->compat_robust_list))
-               compat_exit_robust_list(tsk);
-#endif
-#endif
        if (group_dead)
                tty_audit_exit();
        if (unlikely(tsk->audit_context))
@@ -1076,13 +1070,14 @@ NORET_TYPE void do_exit(long code)
 
        if (group_dead)
                acct_process();
+       trace_sched_process_exit(tsk);
+
        exit_sem(tsk);
        exit_files(tsk);
        exit_fs(tsk);
        check_stack_usage();
        exit_thread();
        cgroup_exit(tsk, 1);
-       exit_keys(tsk);
 
        if (group_dead && tsk->signal->leader)
                disassociate_ctty(1);
@@ -1127,7 +1122,6 @@ NORET_TYPE void do_exit(long code)
        preempt_disable();
        /* causes final put_task_struct in finish_task_switch(). */
        tsk->state = TASK_DEAD;
-
        schedule();
        BUG();
        /* Avoid "noreturn function does return".  */
@@ -1147,7 +1141,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code)
 
 EXPORT_SYMBOL(complete_and_exit);
 
-asmlinkage long sys_exit(int error_code)
+SYSCALL_DEFINE1(exit, int, error_code)
 {
        do_exit((error_code&0xff)<<8);
 }
@@ -1188,9 +1182,11 @@ do_group_exit(int exit_code)
  * wait4()-ing process will get the correct exit code - even if this
  * thread is not the thread group leader.
  */
-asmlinkage void sys_exit_group(int error_code)
+SYSCALL_DEFINE1(exit_group, int, error_code)
 {
        do_group_exit((error_code & 0xff) << 8);
+       /* NOTREACHED */
+       return 0;
 }
 
 static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
@@ -1267,12 +1263,12 @@ static int wait_task_zombie(struct task_struct *p, int options,
        unsigned long state;
        int retval, status, traced;
        pid_t pid = task_pid_vnr(p);
+       uid_t uid = __task_cred(p)->uid;
 
        if (!likely(options & WEXITED))
                return 0;
 
        if (unlikely(options & WNOWAIT)) {
-               uid_t uid = p->uid;
                int exit_code = p->exit_code;
                int why, status;
 
@@ -1304,6 +1300,7 @@ static int wait_task_zombie(struct task_struct *p, int options,
        if (likely(!traced)) {
                struct signal_struct *psig;
                struct signal_struct *sig;
+               struct task_cputime cputime;
 
                /*
                 * The resource counters for the group leader are in its
@@ -1319,20 +1316,23 @@ static int wait_task_zombie(struct task_struct *p, int options,
                 * need to protect the access to p->parent->signal fields,
                 * as other threads in the parent group can be right
                 * here reaping other children at the same time.
+                *
+                * We use thread_group_cputime() to get times for the thread
+                * group, which consolidates times for all threads in the
+                * group including the group leader.
                 */
+               thread_group_cputime(p, &cputime);
                spin_lock_irq(&p->parent->sighand->siglock);
                psig = p->parent->signal;
                sig = p->signal;
                psig->cutime =
                        cputime_add(psig->cutime,
-                       cputime_add(p->utime,
-                       cputime_add(sig->utime,
-                                   sig->cutime)));
+                       cputime_add(cputime.utime,
+                                   sig->cutime));
                psig->cstime =
                        cputime_add(psig->cstime,
-                       cputime_add(p->stime,
-                       cputime_add(sig->stime,
-                                   sig->cstime)));
+                       cputime_add(cputime.stime,
+                                   sig->cstime));
                psig->cgtime =
                        cputime_add(psig->cgtime,
                        cputime_add(p->gtime,
@@ -1352,6 +1352,8 @@ static int wait_task_zombie(struct task_struct *p, int options,
                psig->coublock +=
                        task_io_get_oublock(p) +
                        sig->oublock + sig->coublock;
+               task_io_accounting_add(&psig->ioac, &p->ioac);
+               task_io_accounting_add(&psig->ioac, &sig->ioac);
                spin_unlock_irq(&p->parent->sighand->siglock);
        }
 
@@ -1387,7 +1389,7 @@ static int wait_task_zombie(struct task_struct *p, int options,
        if (!retval && infop)
                retval = put_user(pid, &infop->si_pid);
        if (!retval && infop)
-               retval = put_user(p->uid, &infop->si_uid);
+               retval = put_user(uid, &infop->si_uid);
        if (!retval)
                retval = pid;
 
@@ -1452,7 +1454,8 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
        if (!unlikely(options & WNOWAIT))
                p->exit_code = 0;
 
-       uid = p->uid;
+       /* don't need the RCU readlock here as we're holding a spinlock */
+       uid = __task_cred(p)->uid;
 unlock_sig:
        spin_unlock_irq(&p->sighand->siglock);
        if (!exit_code)
@@ -1526,10 +1529,10 @@ static int wait_task_continued(struct task_struct *p, int options,
        }
        if (!unlikely(options & WNOWAIT))
                p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
+       uid = __task_cred(p)->uid;
        spin_unlock_irq(&p->sighand->siglock);
 
        pid = task_pid_vnr(p);
-       uid = p->uid;
        get_task_struct(p);
        read_unlock(&tasklist_lock);
 
@@ -1675,6 +1678,8 @@ static long do_wait(enum pid_type type, struct pid *pid, int options,
        struct task_struct *tsk;
        int retval;
 
+       trace_sched_process_wait(pid);
+
        add_wait_queue(&current->signal->wait_chldexit,&wait);
 repeat:
        /*
@@ -1749,9 +1754,8 @@ end:
        return retval;
 }
 
-asmlinkage long sys_waitid(int which, pid_t upid,
-                          struct siginfo __user *infop, int options,
-                          struct rusage __user *ru)
+SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
+               infop, int, options, struct rusage __user *, ru)
 {
        struct pid *pid = NULL;
        enum pid_type type;
@@ -1790,8 +1794,8 @@ asmlinkage long sys_waitid(int which, pid_t upid,
        return ret;
 }
 
-asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
-                         int options, struct rusage __user *ru)
+SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
+               int, options, struct rusage __user *, ru)
 {
        struct pid *pid = NULL;
        enum pid_type type;
@@ -1828,7 +1832,7 @@ asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
  * sys_waitpid() remains for compatibility. waitpid() should be
  * implemented by calling sys_wait4() from libc.a.
  */
-asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
+SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
 {
        return sys_wait4(pid, stat_addr, options, NULL);
 }