tunnels: fix netns vs proto registration ordering
[safe/jmp/linux-2.6] / kernel / fork.c
index 2cebfb2..5b2959b 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/ftrace.h>
 #include <linux/profile.h>
 #include <linux/rmap.h>
+#include <linux/ksm.h>
 #include <linux/acct.h>
 #include <linux/tsacct_kern.h>
 #include <linux/cn_proc.h>
@@ -62,6 +63,8 @@
 #include <linux/fs_struct.h>
 #include <linux/magic.h>
 #include <linux/perf_event.h>
+#include <linux/posix-timers.h>
+#include <linux/user-return-notifier.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -89,7 +92,7 @@ int nr_processes(void)
        int cpu;
        int total = 0;
 
-       for_each_online_cpu(cpu)
+       for_each_possible_cpu(cpu)
                total += per_cpu(process_counts, cpu);
 
        return total;
@@ -136,9 +139,17 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
+static void account_kernel_stack(struct thread_info *ti, int account)
+{
+       struct zone *zone = page_zone(virt_to_page(ti));
+
+       mod_zone_page_state(zone, NR_KERNEL_STACK, account);
+}
+
 void free_task(struct task_struct *tsk)
 {
        prop_local_destroy_single(&tsk->dirties);
+       account_kernel_stack(tsk->stack, -1);
        free_thread_info(tsk->stack);
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
@@ -239,6 +250,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
                goto out;
 
        setup_thread_stack(tsk, orig);
+       clear_user_return_notifier(tsk);
        stackend = end_of_stack(tsk);
        *stackend = STACK_END_MAGIC;    /* for overflow detection */
 
@@ -253,6 +265,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        tsk->btrace_seq = 0;
 #endif
        tsk->splice_pipe = NULL;
+
+       account_kernel_stack(ti, 1);
+
        return tsk;
 
 out:
@@ -288,6 +303,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
        rb_link = &mm->mm_rb.rb_node;
        rb_parent = NULL;
        pprev = &mm->mmap;
+       retval = ksm_fork(mm, oldmm);
+       if (retval)
+               goto out;
 
        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
                struct file *file;
@@ -418,22 +436,30 @@ __setup("coredump_filter=", coredump_filter_setup);
 
 #include <linux/init_task.h>
 
+static void mm_init_aio(struct mm_struct *mm)
+{
+#ifdef CONFIG_AIO
+       spin_lock_init(&mm->ioctx_lock);
+       INIT_HLIST_HEAD(&mm->ioctx_list);
+#endif
+}
+
 static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
 {
        atomic_set(&mm->mm_users, 1);
        atomic_set(&mm->mm_count, 1);
        init_rwsem(&mm->mmap_sem);
        INIT_LIST_HEAD(&mm->mmlist);
-       mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
+       mm->flags = (current->mm) ?
+               (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
        mm->core_state = NULL;
        mm->nr_ptes = 0;
        set_mm_counter(mm, file_rss, 0);
        set_mm_counter(mm, anon_rss, 0);
        spin_lock_init(&mm->page_table_lock);
-       spin_lock_init(&mm->ioctx_lock);
-       INIT_HLIST_HEAD(&mm->ioctx_list);
        mm->free_area_cache = TASK_UNMAPPED_BASE;
        mm->cached_hole_size = ~0UL;
+       mm_init_aio(mm);
        mm_init_owner(mm, p);
 
        if (likely(!mm_alloc_pgd(mm))) {
@@ -485,6 +511,7 @@ void mmput(struct mm_struct *mm)
 
        if (atomic_dec_and_test(&mm->mm_users)) {
                exit_aio(mm);
+               ksm_exit(mm);
                exit_mmap(mm);
                set_mm_exe_file(mm, NULL);
                if (!list_empty(&mm->mmlist)) {
@@ -493,6 +520,8 @@ void mmput(struct mm_struct *mm)
                        spin_unlock(&mmlist_lock);
                }
                put_swap_token(mm);
+               if (mm->binfmt)
+                       module_put(mm->binfmt->module);
                mmdrop(mm);
        }
 }
@@ -543,12 +572,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
 
        /* Get rid of any futexes when releasing the mm */
 #ifdef CONFIG_FUTEX
-       if (unlikely(tsk->robust_list))
+       if (unlikely(tsk->robust_list)) {
                exit_robust_list(tsk);
+               tsk->robust_list = NULL;
+       }
 #ifdef CONFIG_COMPAT
-       if (unlikely(tsk->compat_robust_list))
+       if (unlikely(tsk->compat_robust_list)) {
                compat_exit_robust_list(tsk);
+               tsk->compat_robust_list = NULL;
+       }
 #endif
+       if (unlikely(!list_empty(&tsk->pi_state_list)))
+               exit_pi_state_list(tsk);
 #endif
 
        /* Get rid of any cached register state */
@@ -618,9 +653,14 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
        mm->hiwater_rss = get_mm_rss(mm);
        mm->hiwater_vm = mm->total_vm;
 
+       if (mm->binfmt && !try_module_get(mm->binfmt->module))
+               goto free_pt;
+
        return mm;
 
 free_pt:
+       /* don't put binfmt in mmput, we haven't got module yet */
+       mm->binfmt = NULL;
        mmput(mm);
 
 fail_nomem:
@@ -788,10 +828,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
        thread_group_cputime_init(sig);
 
        /* Expiration times and increments. */
-       sig->it_virt_expires = cputime_zero;
-       sig->it_virt_incr = cputime_zero;
-       sig->it_prof_expires = cputime_zero;
-       sig->it_prof_incr = cputime_zero;
+       sig->it[CPUCLOCK_PROF].expires = cputime_zero;
+       sig->it[CPUCLOCK_PROF].incr = cputime_zero;
+       sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
+       sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
 
        /* Cached expiration times. */
        sig->cputime_expires.prof_exp = cputime_zero;
@@ -846,9 +886,13 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
        sig->gtime = cputime_zero;
        sig->cgtime = cputime_zero;
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+       sig->prev_utime = sig->prev_stime = cputime_zero;
+#endif
        sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
        sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
        sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
+       sig->maxrss = sig->cmaxrss = 0;
        task_io_accounting_init(&sig->ioac);
        sig->sum_sched_runtime = 0;
        taskstats_tgid_init(sig);
@@ -863,6 +907,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 
        tty_audit_fork(sig);
 
+       sig->oom_adj = current->signal->oom_adj;
+
        return 0;
 }
 
@@ -893,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
 
 static void rt_mutex_init_task(struct task_struct *p)
 {
-       spin_lock_init(&p->pi_lock);
+       raw_spin_lock_init(&p->pi_lock);
 #ifdef CONFIG_RT_MUTEXES
-       plist_head_init(&p->pi_waiters, &p->pi_lock);
+       plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
        p->pi_blocked_on = NULL;
 #endif
 }
@@ -958,6 +1004,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
                return ERR_PTR(-EINVAL);
 
+       /*
+        * Siblings of global init remain as zombies on exit since they are
+        * not reaped by their parent (swapper). To solve this and to avoid
+        * multi-rooted process trees, prevent global and container-inits
+        * from creating siblings.
+        */
+       if ((clone_flags & CLONE_PARENT) &&
+                               current->signal->flags & SIGNAL_UNKILLABLE)
+               return ERR_PTR(-EINVAL);
+
        retval = security_task_create(clone_flags);
        if (retval)
                goto fork_out;
@@ -999,9 +1055,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if (!try_module_get(task_thread_info(p)->exec_domain->module))
                goto bad_fork_cleanup_count;
 
-       if (p->binfmt && !try_module_get(p->binfmt->module))
-               goto bad_fork_cleanup_put_domain;
-
        p->did_exec = 0;
        delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
        copy_flags(clone_flags, p);
@@ -1018,8 +1071,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->gtime = cputime_zero;
        p->utimescaled = cputime_zero;
        p->stimescaled = cputime_zero;
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
        p->prev_utime = cputime_zero;
        p->prev_stime = cputime_zero;
+#endif
 
        p->default_timer_slack_ns = current->timer_slack_ns;
 
@@ -1072,9 +1127,15 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 #ifdef CONFIG_DEBUG_MUTEXES
        p->blocked_on = NULL; /* not blocked yet */
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+       p->memcg_batch.do_batch = 0;
+       p->memcg_batch.memcg = NULL;
+#endif
 
        p->bts = NULL;
 
+       p->stack_start = stack_start;
+
        /* Perform scheduler related setup. Assign this task to a CPU. */
        sched_fork(p, clone_flags);
 
@@ -1149,9 +1210,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                p->sas_ss_sp = p->sas_ss_size = 0;
 
        /*
-        * Syscall tracing should be turned off in the child regardless
-        * of CLONE_PTRACE.
+        * Syscall tracing and stepping should be turned off in the
+        * child regardless of CLONE_PTRACE.
         */
+       user_disable_single_step(p);
        clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
 #ifdef TIF_SYSCALL_EMU
        clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
@@ -1229,7 +1291,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        }
 
        if (likely(p->pid)) {
-               list_add_tail(&p->sibling, &p->real_parent->children);
                tracehook_finish_clone(p, clone_flags, trace);
 
                if (thread_group_leader(p)) {
@@ -1241,6 +1302,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                        p->signal->tty = tty_kref_get(current->signal->tty);
                        attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
                        attach_pid(p, PIDTYPE_SID, task_session(current));
+                       list_add_tail(&p->sibling, &p->real_parent->children);
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
                        __get_cpu_var(process_counts)++;
                }
@@ -1260,7 +1322,8 @@ bad_fork_free_pid:
        if (pid != &init_struct_pid)
                free_pid(pid);
 bad_fork_cleanup_io:
-       put_io_context(p->io_context);
+       if (p->io_context)
+               exit_io_context(p);
 bad_fork_cleanup_namespaces:
        exit_task_namespaces(p);
 bad_fork_cleanup_mm:
@@ -1287,9 +1350,6 @@ bad_fork_cleanup_cgroup:
 #endif
        cgroup_exit(p, cgroup_callbacks_done);
        delayacct_tsk_free(p);
-       if (p->binfmt)
-               module_put(p->binfmt->module);
-bad_fork_cleanup_put_domain:
        module_put(task_thread_info(p)->exec_domain->module);
 bad_fork_cleanup_count:
        atomic_dec(&p->cred->user->processes);