[PATCH] slab: remove kmem_cache_t
[safe/jmp/linux-2.6] / kernel / fork.c
index b18d645..2cf74ed 100644 (file)
@@ -11,7 +11,6 @@
  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
  */
 
-#include <linux/config.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/unistd.h>
@@ -28,6 +27,8 @@
 #include <linux/binfmts.h>
 #include <linux/mman.h>
 #include <linux/fs.h>
+#include <linux/nsproxy.h>
+#include <linux/capability.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/security.h>
 #include <linux/profile.h>
 #include <linux/rmap.h>
 #include <linux/acct.h>
+#include <linux/tsacct_kern.h>
 #include <linux/cn_proc.h>
+#include <linux/delayacct.h>
+#include <linux/taskstats_kern.h>
+#include <linux/random.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -61,9 +66,7 @@ int max_threads;              /* tunable limit on nr_threads */
 
 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
- __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
-
-EXPORT_SYMBOL(tasklist_lock);
+__cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
 
 int nr_processes(void)
 {
@@ -79,30 +82,31 @@ int nr_processes(void)
 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
 # define alloc_task_struct()   kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
-static kmem_cache_t *task_struct_cachep;
+static struct kmem_cache *task_struct_cachep;
 #endif
 
 /* SLAB cache for signal_struct structures (tsk->signal) */
-kmem_cache_t *signal_cachep;
+static struct kmem_cache *signal_cachep;
 
 /* SLAB cache for sighand_struct structures (tsk->sighand) */
-kmem_cache_t *sighand_cachep;
+struct kmem_cache *sighand_cachep;
 
 /* SLAB cache for files_struct structures (tsk->files) */
-kmem_cache_t *files_cachep;
+struct kmem_cache *files_cachep;
 
 /* SLAB cache for fs_struct structures (tsk->fs) */
-kmem_cache_t *fs_cachep;
+struct kmem_cache *fs_cachep;
 
 /* SLAB cache for vm_area_struct structures */
-kmem_cache_t *vm_area_cachep;
+struct kmem_cache *vm_area_cachep;
 
 /* SLAB cache for mm_struct structures (tsk->mm) */
-static kmem_cache_t *mm_cachep;
+static struct kmem_cache *mm_cachep;
 
 void free_task(struct task_struct *tsk)
 {
        free_thread_info(tsk->thread_info);
+       rt_mutex_debug_task_free(tsk);
        free_task_struct(tsk);
 }
 EXPORT_SYMBOL(free_task);
@@ -113,11 +117,10 @@ void __put_task_struct(struct task_struct *tsk)
        WARN_ON(atomic_read(&tsk->usage));
        WARN_ON(tsk == current);
 
-       if (unlikely(tsk->audit_context))
-               audit_free(tsk);
        security_task_free(tsk);
        free_uid(tsk->user);
        put_group_info(tsk->group_info);
+       delayacct_tsk_free(tsk);
 
        if (!profile_handoff_task(tsk))
                free_task(tsk);
@@ -175,9 +178,17 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        tsk->thread_info = ti;
        setup_thread_stack(tsk, orig);
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+       tsk->stack_canary = get_random_int();
+#endif
+
        /* One for us, one for whoever does the "release_task()" (usually parent) */
        atomic_set(&tsk->usage,2);
        atomic_set(&tsk->fs_excl, 0);
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+       tsk->btrace_seq = 0;
+#endif
+       tsk->splice_pipe = NULL;
        return tsk;
 }
 
@@ -192,7 +203,10 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 
        down_write(&oldmm->mmap_sem);
        flush_cache_mm(oldmm);
-       down_write(&mm->mmap_sem);
+       /*
+        * Not linked in yet - no deadlock potential:
+        */
+       down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
 
        mm->locked_vm = 0;
        mm->mmap = NULL;
@@ -223,7 +237,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                                goto fail_nomem;
                        charge = len;
                }
-               tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+               tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (!tmp)
                        goto fail_nomem;
                *tmp = *mpnt;
@@ -305,7 +319,7 @@ static inline void mm_free_pgd(struct mm_struct * mm)
 
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
 
-#define allocate_mm()  (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
+#define allocate_mm()  (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
 #define free_mm(mm)    (kmem_cache_free(mm_cachep, (mm)))
 
 #include <linux/init_task.h>
@@ -367,6 +381,8 @@ void fastcall __mmdrop(struct mm_struct *mm)
  */
 void mmput(struct mm_struct *mm)
 {
+       might_sleep();
+
        if (atomic_dec_and_test(&mm->mm_users)) {
                exit_aio(mm);
                exit_mmap(mm);
@@ -445,6 +461,59 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
        }
 }
 
+/*
+ * Allocate a new mm structure and copy contents from the
+ * mm structure of the passed in task structure.
+ */
+static struct mm_struct *dup_mm(struct task_struct *tsk)
+{
+       struct mm_struct *mm, *oldmm = current->mm;
+       int err;
+
+       if (!oldmm)
+               return NULL;
+
+       mm = allocate_mm();
+       if (!mm)
+               goto fail_nomem;
+
+       memcpy(mm, oldmm, sizeof(*mm));
+
+       /* Initializing for Swap token stuff */
+       mm->token_priority = 0;
+       mm->last_interval = 0;
+
+       if (!mm_init(mm))
+               goto fail_nomem;
+
+       if (init_new_context(tsk, mm))
+               goto fail_nocontext;
+
+       err = dup_mmap(mm, oldmm);
+       if (err)
+               goto free_pt;
+
+       mm->hiwater_rss = get_mm_rss(mm);
+       mm->hiwater_vm = mm->total_vm;
+
+       return mm;
+
+free_pt:
+       mmput(mm);
+
+fail_nomem:
+       return NULL;
+
+fail_nocontext:
+       /*
+        * If init_new_context() failed, we cannot use mmput() to free the mm
+        * because it calls destroy_context()
+        */
+       mm_free_pgd(mm);
+       free_mm(mm);
+       return NULL;
+}
+
 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
 {
        struct mm_struct * mm, *oldmm;
@@ -472,43 +541,21 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
        }
 
        retval = -ENOMEM;
-       mm = allocate_mm();
+       mm = dup_mm(tsk);
        if (!mm)
                goto fail_nomem;
 
-       /* Copy the current MM stuff.. */
-       memcpy(mm, oldmm, sizeof(*mm));
-       if (!mm_init(mm))
-               goto fail_nomem;
-
-       if (init_new_context(tsk,mm))
-               goto fail_nocontext;
-
-       retval = dup_mmap(mm, oldmm);
-       if (retval)
-               goto free_pt;
-
-       mm->hiwater_rss = get_mm_rss(mm);
-       mm->hiwater_vm = mm->total_vm;
-
 good_mm:
+       /* Initializing for Swap token stuff */
+       mm->token_priority = 0;
+       mm->last_interval = 0;
+
        tsk->mm = mm;
        tsk->active_mm = mm;
        return 0;
 
-free_pt:
-       mmput(mm);
 fail_nomem:
        return retval;
-
-fail_nocontext:
-       /*
-        * If init_new_context() failed, we cannot use mmput() to free the mm
-        * because it calls destroy_context()
-        */
-       mm_free_pgd(mm);
-       free_mm(mm);
-       return retval;
 }
 
 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
@@ -574,19 +621,19 @@ static struct files_struct *alloc_files(void)
        struct files_struct *newf;
        struct fdtable *fdt;
 
-       newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
+       newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
        if (!newf)
                goto out;
 
        atomic_set(&newf->count, 1);
 
        spin_lock_init(&newf->file_lock);
+       newf->next_fd = 0;
        fdt = &newf->fdtab;
-       fdt->next_fd = 0;
        fdt->max_fds = NR_OPEN_DEFAULT;
-       fdt->max_fdset = __FD_SETSIZE;
-       fdt->close_on_exec = &newf->close_on_exec_init;
-       fdt->open_fds = &newf->open_fds_init;
+       fdt->max_fdset = EMBEDDED_FD_SET_SIZE;
+       fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
+       fdt->open_fds = (fd_set *)&newf->open_fds_init;
        fdt->fd = &newf->fd_array[0];
        INIT_RCU_HEAD(&fdt->rcu);
        fdt->free_files = NULL;
@@ -596,32 +643,19 @@ out:
        return newf;
 }
 
-static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
+/*
+ * Allocate a new files structure and copy contents from the
+ * passed in files structure.
+ * errorp will be valid only when the returned files_struct is NULL.
+ */
+static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
 {
-       struct files_struct *oldf, *newf;
+       struct files_struct *newf;
        struct file **old_fds, **new_fds;
-       int open_files, size, i, error = 0, expand;
+       int open_files, size, i, expand;
        struct fdtable *old_fdt, *new_fdt;
 
-       /*
-        * A background process may not have any files ...
-        */
-       oldf = current->files;
-       if (!oldf)
-               goto out;
-
-       if (clone_flags & CLONE_FILES) {
-               atomic_inc(&oldf->count);
-               goto out;
-       }
-
-       /*
-        * Note: we may be using current for both targets (See exec.c)
-        * This works because we cache current->files (old) as oldf. Don't
-        * break this.
-        */
-       tsk->files = NULL;
-       error = -ENOMEM;
+       *errorp = -ENOMEM;
        newf = alloc_files();
        if (!newf)
                goto out;
@@ -650,9 +684,9 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
        if (expand) {
                spin_unlock(&oldf->file_lock);
                spin_lock(&newf->file_lock);
-               error = expand_files(newf, open_files-1);
+               *errorp = expand_files(newf, open_files-1);
                spin_unlock(&newf->file_lock);
-               if (error < 0)
+               if (*errorp < 0)
                        goto out_release;
                new_fdt = files_fdtable(newf);
                /*
@@ -701,17 +735,48 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
                memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
        }
 
-       tsk->files = newf;
-       error = 0;
 out:
-       return error;
+       return newf;
 
 out_release:
        free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
        free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
        free_fd_array(new_fdt->fd, new_fdt->max_fds);
        kmem_cache_free(files_cachep, newf);
-       goto out;
+       return NULL;
+}
+
+static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
+{
+       struct files_struct *oldf, *newf;
+       int error = 0;
+
+       /*
+        * A background process may not have any files ...
+        */
+       oldf = current->files;
+       if (!oldf)
+               goto out;
+
+       if (clone_flags & CLONE_FILES) {
+               atomic_inc(&oldf->count);
+               goto out;
+       }
+
+       /*
+        * Note: we may be using current for both targets (See exec.c)
+        * This works because we cache current->files (old) as oldf. Don't
+        * break this.
+        */
+       tsk->files = NULL;
+       newf = dup_fd(oldf, &error);
+       if (!newf)
+               goto out;
+
+       tsk->files = newf;
+       error = 0;
+out:
+       return error;
 }
 
 /*
@@ -725,8 +790,7 @@ int unshare_files(void)
        struct files_struct *files  = current->files;
        int rc;
 
-       if(!files)
-               BUG();
+       BUG_ON(!files);
 
        /* This can race but the race causes us to copy when we don't
           need to and drop the copy */
@@ -743,14 +807,6 @@ int unshare_files(void)
 
 EXPORT_SYMBOL(unshare_files);
 
-void sighand_free_cb(struct rcu_head *rhp)
-{
-       struct sighand_struct *sp;
-
-       sp = container_of(rhp, struct sighand_struct, rcu);
-       kmem_cache_free(sighand_cachep, sp);
-}
-
 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
 {
        struct sighand_struct *sig;
@@ -763,12 +819,17 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
        rcu_assign_pointer(tsk->sighand, sig);
        if (!sig)
                return -ENOMEM;
-       spin_lock_init(&sig->siglock);
        atomic_set(&sig->count, 1);
        memcpy(sig->action, current->sighand->action, sizeof(sig->action));
        return 0;
 }
 
+void __cleanup_sighand(struct sighand_struct *sighand)
+{
+       if (atomic_dec_and_test(&sighand->count))
+               kmem_cache_free(sighand_cachep, sighand);
+}
+
 static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
 {
        struct signal_struct *sig;
@@ -777,6 +838,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
        if (clone_flags & CLONE_THREAD) {
                atomic_inc(&current->signal->count);
                atomic_inc(&current->signal->live);
+               taskstats_tgid_alloc(current);
                return 0;
        }
        sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
@@ -801,10 +863,10 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
        init_sigpending(&sig->shared_pending);
        INIT_LIST_HEAD(&sig->posix_timers);
 
-       sig->it_real_value = sig->it_real_incr = 0;
+       hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
+       sig->it_real_incr.tv64 = 0;
        sig->real_timer.function = it_real_fn;
-       sig->real_timer.data = (unsigned long) tsk;
-       init_timer(&sig->real_timer);
+       sig->tsk = tsk;
 
        sig->it_virt_expires = cputime_zero;
        sig->it_virt_incr = cputime_zero;
@@ -821,6 +883,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
        INIT_LIST_HEAD(&sig->cpu_timers[0]);
        INIT_LIST_HEAD(&sig->cpu_timers[1]);
        INIT_LIST_HEAD(&sig->cpu_timers[2]);
+       taskstats_tgid_init(sig);
 
        task_lock(current->group_leader);
        memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
@@ -834,10 +897,27 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
                tsk->it_prof_expires =
                        secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
        }
+       acct_init_pacct(&sig->pacct);
 
        return 0;
 }
 
+void __cleanup_signal(struct signal_struct *sig)
+{
+       exit_thread_group_keys(sig);
+       kmem_cache_free(signal_cachep, sig);
+}
+
+static inline void cleanup_signal(struct task_struct *tsk)
+{
+       struct signal_struct *sig = tsk->signal;
+
+       atomic_dec(&sig->live);
+
+       if (atomic_dec_and_test(&sig->count))
+               __cleanup_signal(sig);
+}
+
 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
 {
        unsigned long new_flags = p->flags;
@@ -856,6 +936,15 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
        return current->pid;
 }
 
+static inline void rt_mutex_init_task(struct task_struct *p)
+{
+#ifdef CONFIG_RT_MUTEXES
+       spin_lock_init(&p->pi_lock);
+       plist_head_init(&p->pi_waiters, &p->pi_lock);
+       p->pi_blocked_on = NULL;
+#endif
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -864,13 +953,13 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
  * parts of the process environment (as per the clone
  * flags). The actual kick-off is left to the caller.
  */
-static task_t *copy_process(unsigned long clone_flags,
-                                unsigned long stack_start,
-                                struct pt_regs *regs,
-                                unsigned long stack_size,
-                                int __user *parent_tidptr,
-                                int __user *child_tidptr,
-                                int pid)
+static struct task_struct *copy_process(unsigned long clone_flags,
+                                       unsigned long stack_start,
+                                       struct pt_regs *regs,
+                                       unsigned long stack_size,
+                                       int __user *parent_tidptr,
+                                       int __user *child_tidptr,
+                                       int pid)
 {
        int retval;
        struct task_struct *p = NULL;
@@ -902,6 +991,12 @@ static task_t *copy_process(unsigned long clone_flags,
        if (!p)
                goto fork_out;
 
+       rt_mutex_init_task(p);
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+       DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
+       DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+#endif
        retval = -EAGAIN;
        if (atomic_read(&p->user->processes) >=
                        p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
@@ -929,20 +1024,18 @@ static task_t *copy_process(unsigned long clone_flags,
                goto bad_fork_cleanup_put_domain;
 
        p->did_exec = 0;
+       delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
        copy_flags(clone_flags, p);
        p->pid = pid;
        retval = -EFAULT;
        if (clone_flags & CLONE_PARENT_SETTID)
                if (put_user(p->pid, parent_tidptr))
-                       goto bad_fork_cleanup;
-
-       p->proc_dentry = NULL;
+                       goto bad_fork_cleanup_delays_binfmt;
 
        INIT_LIST_HEAD(&p->children);
        INIT_LIST_HEAD(&p->sibling);
        p->vfork_done = NULL;
        spin_lock_init(&p->alloc_lock);
-       spin_lock_init(&p->proc_lock);
 
        clear_tsk_thread_flag(p, TIF_SIGPENDING);
        init_sigpending(&p->pending);
@@ -977,6 +1070,31 @@ static task_t *copy_process(unsigned long clone_flags,
                p->mempolicy = NULL;
                goto bad_fork_cleanup_cpuset;
        }
+       mpol_fix_fork_child_flag(p);
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+       p->irq_events = 0;
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+       p->hardirqs_enabled = 1;
+#else
+       p->hardirqs_enabled = 0;
+#endif
+       p->hardirq_enable_ip = 0;
+       p->hardirq_enable_event = 0;
+       p->hardirq_disable_ip = _THIS_IP_;
+       p->hardirq_disable_event = 0;
+       p->softirqs_enabled = 1;
+       p->softirq_enable_ip = _THIS_IP_;
+       p->softirq_enable_event = 0;
+       p->softirq_disable_ip = 0;
+       p->softirq_disable_event = 0;
+       p->hardirq_context = 0;
+       p->softirq_context = 0;
+#endif
+#ifdef CONFIG_LOCKDEP
+       p->lockdep_depth = 0; /* no locks held yet */
+       p->curr_chain_key = 0;
+       p->lockdep_recursion = 0;
 #endif
 
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -1006,17 +1124,29 @@ static task_t *copy_process(unsigned long clone_flags,
                goto bad_fork_cleanup_signal;
        if ((retval = copy_keys(clone_flags, p)))
                goto bad_fork_cleanup_mm;
-       if ((retval = copy_namespace(clone_flags, p)))
+       if ((retval = copy_namespaces(clone_flags, p)))
                goto bad_fork_cleanup_keys;
        retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
        if (retval)
-               goto bad_fork_cleanup_namespace;
+               goto bad_fork_cleanup_namespaces;
 
        p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
        /*
         * Clear TID on mm_release()?
         */
        p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
+       p->robust_list = NULL;
+#ifdef CONFIG_COMPAT
+       p->compat_robust_list = NULL;
+#endif
+       INIT_LIST_HEAD(&p->pi_state_list);
+       p->pi_state_cache = NULL;
+
+       /*
+        * sigaltstack should be cleared when sharing the same VM
+        */
+       if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
+               p->sas_ss_sp = p->sas_ss_size = 0;
 
        /*
         * Syscall tracing should be turned off in the child regardless
@@ -1029,7 +1159,6 @@ static task_t *copy_process(unsigned long clone_flags,
 
        /* Our parent execution domain becomes current domain
           These must match for thread signalling to apply */
-          
        p->parent_exec_id = p->self_exec_id;
 
        /* ok, now we should be set up.. */
@@ -1042,6 +1171,7 @@ static task_t *copy_process(unsigned long clone_flags,
         * We dont wake it up yet.
         */
        p->group_leader = p;
+       INIT_LIST_HEAD(&p->thread_group);
        INIT_LIST_HEAD(&p->ptrace_children);
        INIT_LIST_HEAD(&p->ptrace_list);
 
@@ -1051,6 +1181,9 @@ static task_t *copy_process(unsigned long clone_flags,
        /* Need tasklist lock for parent etc handling! */
        write_lock_irq(&tasklist_lock);
 
+       /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
+       p->ioprio = current->ioprio;
+
        /*
         * The task hasn't been attached yet, so its cpus_allowed mask will
         * not be changed, nor will its assigned CPU.
@@ -1065,16 +1198,6 @@ static task_t *copy_process(unsigned long clone_flags,
                        !cpu_online(task_cpu(p))))
                set_task_cpu(p, smp_processor_id());
 
-       /*
-        * Check for pending SIGKILL! The new thread should not be allowed
-        * to slip out of an OOM kill. (or normal SIGKILL.)
-        */
-       if (sigismember(&current->pending.signal, SIGKILL)) {
-               write_unlock_irq(&tasklist_lock);
-               retval = -EINTR;
-               goto bad_fork_cleanup_namespace;
-       }
-
        /* CLONE_PARENT re-uses the old parent */
        if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
                p->real_parent = current->real_parent;
@@ -1082,30 +1205,27 @@ static task_t *copy_process(unsigned long clone_flags,
                p->real_parent = current;
        p->parent = p->real_parent;
 
+       spin_lock(&current->sighand->siglock);
+
+       /*
+        * Process group and session signals need to be delivered to just the
+        * parent before the fork or both the parent and the child after the
+        * fork. Restart if a signal comes in before we add the new process to
+        * it's process group.
+        * A fatal signal pending means that current will exit, so the new
+        * thread can't slip out of an OOM kill (or normal SIGKILL).
+        */
+       recalc_sigpending();
+       if (signal_pending(current)) {
+               spin_unlock(&current->sighand->siglock);
+               write_unlock_irq(&tasklist_lock);
+               retval = -ERESTARTNOINTR;
+               goto bad_fork_cleanup_namespaces;
+       }
+
        if (clone_flags & CLONE_THREAD) {
-               spin_lock(&current->sighand->siglock);
-               /*
-                * Important: if an exit-all has been started then
-                * do not create this new thread - the whole thread
-                * group is supposed to exit anyway.
-                */
-               if (current->signal->flags & SIGNAL_GROUP_EXIT) {
-                       spin_unlock(&current->sighand->siglock);
-                       write_unlock_irq(&tasklist_lock);
-                       retval = -EAGAIN;
-                       goto bad_fork_cleanup_namespace;
-               }
                p->group_leader = current->group_leader;
-
-               if (current->signal->group_stop_count > 0) {
-                       /*
-                        * There is an all-stop in progress for the group.
-                        * We ourselves will stop as soon as we check signals.
-                        * Make the new thread part of that group stop too.
-                        */
-                       current->signal->group_stop_count++;
-                       set_tsk_thread_flag(p, TIF_SIGPENDING);
-               }
+               list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
 
                if (!cputime_eq(current->signal->it_virt_expires,
                                cputime_zero) ||
@@ -1121,48 +1241,44 @@ static task_t *copy_process(unsigned long clone_flags,
                         */
                        p->it_prof_expires = jiffies_to_cputime(1);
                }
-
-               spin_unlock(&current->sighand->siglock);
        }
 
-       /*
-        * inherit ioprio
-        */
-       p->ioprio = current->ioprio;
+       if (likely(p->pid)) {
+               add_parent(p);
+               if (unlikely(p->ptrace & PT_PTRACED))
+                       __ptrace_link(p, current->parent);
+
+               if (thread_group_leader(p)) {
+                       p->signal->tty = current->signal->tty;
+                       p->signal->pgrp = process_group(current);
+                       p->signal->session = current->signal->session;
+                       attach_pid(p, PIDTYPE_PGID, process_group(p));
+                       attach_pid(p, PIDTYPE_SID, p->signal->session);
 
-       SET_LINKS(p);
-       if (unlikely(p->ptrace & PT_PTRACED))
-               __ptrace_link(p, current->parent);
-
-       attach_pid(p, PIDTYPE_PID, p->pid);
-       attach_pid(p, PIDTYPE_TGID, p->tgid);
-       if (thread_group_leader(p)) {
-               p->signal->tty = current->signal->tty;
-               p->signal->pgrp = process_group(current);
-               p->signal->session = current->signal->session;
-               attach_pid(p, PIDTYPE_PGID, process_group(p));
-               attach_pid(p, PIDTYPE_SID, p->signal->session);
-               if (p->pid)
+                       list_add_tail_rcu(&p->tasks, &init_task.tasks);
                        __get_cpu_var(process_counts)++;
+               }
+               attach_pid(p, PIDTYPE_PID, p->pid);
+               nr_threads++;
        }
 
-       nr_threads++;
        total_forks++;
+       spin_unlock(&current->sighand->siglock);
        write_unlock_irq(&tasklist_lock);
        proc_fork_connector(p);
        return p;
 
-bad_fork_cleanup_namespace:
-       exit_namespace(p);
+bad_fork_cleanup_namespaces:
+       exit_task_namespaces(p);
 bad_fork_cleanup_keys:
        exit_keys(p);
 bad_fork_cleanup_mm:
        if (p->mm)
                mmput(p->mm);
 bad_fork_cleanup_signal:
-       exit_signal(p);
+       cleanup_signal(p);
 bad_fork_cleanup_sighand:
-       exit_sighand(p);
+       __cleanup_sighand(p->sighand);
 bad_fork_cleanup_fs:
        exit_fs(p); /* blocking */
 bad_fork_cleanup_files:
@@ -1179,7 +1295,8 @@ bad_fork_cleanup_policy:
 bad_fork_cleanup_cpuset:
 #endif
        cpuset_exit(p);
-bad_fork_cleanup:
+bad_fork_cleanup_delays_binfmt:
+       delayacct_tsk_free(p);
        if (p->binfmt)
                module_put(p->binfmt->module);
 bad_fork_cleanup_put_domain:
@@ -1200,16 +1317,15 @@ struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
        return regs;
 }
 
-task_t * __devinit fork_idle(int cpu)
+struct task_struct * __devinit fork_idle(int cpu)
 {
-       task_t *task;
+       struct task_struct *task;
        struct pt_regs regs;
 
        task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
-       if (!task)
-               return ERR_PTR(-ENOMEM);
-       init_idle(task, cpu);
-       unhash_process(task);
+       if (!IS_ERR(task))
+               init_idle(task, cpu);
+
        return task;
 }
 
@@ -1244,17 +1360,19 @@ long do_fork(unsigned long clone_flags,
 {
        struct task_struct *p;
        int trace = 0;
-       long pid = alloc_pidmap();
+       struct pid *pid = alloc_pid();
+       long nr;
 
-       if (pid < 0)
+       if (!pid)
                return -EAGAIN;
+       nr = pid->nr;
        if (unlikely(current->ptrace)) {
                trace = fork_traceflag (clone_flags);
                if (trace)
                        clone_flags |= CLONE_PTRACE;
        }
 
-       p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
+       p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
        /*
         * Do this prior waking up the new thread - the thread pointer
         * might get invalid after that point, if the thread exits quickly.
@@ -1281,27 +1399,43 @@ long do_fork(unsigned long clone_flags,
                        p->state = TASK_STOPPED;
 
                if (unlikely (trace)) {
-                       current->ptrace_message = pid;
+                       current->ptrace_message = nr;
                        ptrace_notify ((trace << 8) | SIGTRAP);
                }
 
                if (clone_flags & CLONE_VFORK) {
                        wait_for_completion(&vfork);
-                       if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
+                       if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
+                               current->ptrace_message = nr;
                                ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
+                       }
                }
        } else {
-               free_pidmap(pid);
-               pid = PTR_ERR(p);
+               free_pid(pid);
+               nr = PTR_ERR(p);
        }
-       return pid;
+       return nr;
+}
+
+#ifndef ARCH_MIN_MMSTRUCT_ALIGN
+#define ARCH_MIN_MMSTRUCT_ALIGN 0
+#endif
+
+static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags)
+{
+       struct sighand_struct *sighand = data;
+
+       if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
+                                       SLAB_CTOR_CONSTRUCTOR)
+               spin_lock_init(&sighand->siglock);
 }
 
 void __init proc_caches_init(void)
 {
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
+                       sighand_ctor, NULL);
        signal_cachep = kmem_cache_create("signal_cache",
                        sizeof(struct signal_struct), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
@@ -1315,6 +1449,312 @@ void __init proc_caches_init(void)
                        sizeof(struct vm_area_struct), 0,
                        SLAB_PANIC, NULL, NULL);
        mm_cachep = kmem_cache_create("mm_struct",
-                       sizeof(struct mm_struct), 0,
+                       sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
 }
+
+
+/*
+ * Check constraints on flags passed to the unshare system call and
+ * force unsharing of additional process context as appropriate.
+ */
+static inline void check_unshare_flags(unsigned long *flags_ptr)
+{
+       /*
+        * If unsharing a thread from a thread group, must also
+        * unshare vm.
+        */
+       if (*flags_ptr & CLONE_THREAD)
+               *flags_ptr |= CLONE_VM;
+
+       /*
+        * If unsharing vm, must also unshare signal handlers.
+        */
+       if (*flags_ptr & CLONE_VM)
+               *flags_ptr |= CLONE_SIGHAND;
+
+       /*
+        * If unsharing signal handlers and the task was created
+        * using CLONE_THREAD, then must unshare the thread
+        */
+       if ((*flags_ptr & CLONE_SIGHAND) &&
+           (atomic_read(&current->signal->count) > 1))
+               *flags_ptr |= CLONE_THREAD;
+
+       /*
+        * If unsharing namespace, must also unshare filesystem information.
+        */
+       if (*flags_ptr & CLONE_NEWNS)
+               *flags_ptr |= CLONE_FS;
+}
+
+/*
+ * Unsharing of tasks created with CLONE_THREAD is not supported yet
+ */
+static int unshare_thread(unsigned long unshare_flags)
+{
+       if (unshare_flags & CLONE_THREAD)
+               return -EINVAL;
+
+       return 0;
+}
+
+/*
+ * Unshare the filesystem structure if it is being shared
+ */
+static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+{
+       struct fs_struct *fs = current->fs;
+
+       if ((unshare_flags & CLONE_FS) &&
+           (fs && atomic_read(&fs->count) > 1)) {
+               *new_fsp = __copy_fs_struct(current->fs);
+               if (!*new_fsp)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/*
+ * Unshare the namespace structure if it is being shared
+ */
+static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs)
+{
+       struct namespace *ns = current->nsproxy->namespace;
+
+       if ((unshare_flags & CLONE_NEWNS) && ns) {
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               *new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs);
+               if (!*new_nsp)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/*
+ * Unsharing of sighand for tasks created with CLONE_SIGHAND is not
+ * supported yet
+ */
+static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
+{
+       struct sighand_struct *sigh = current->sighand;
+
+       if ((unshare_flags & CLONE_SIGHAND) &&
+           (sigh && atomic_read(&sigh->count) > 1))
+               return -EINVAL;
+       else
+               return 0;
+}
+
+/*
+ * Unshare vm if it is being shared
+ */
+static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
+{
+       struct mm_struct *mm = current->mm;
+
+       if ((unshare_flags & CLONE_VM) &&
+           (mm && atomic_read(&mm->mm_users) > 1)) {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/*
+ * Unshare file descriptor table if it is being shared
+ */
+static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
+{
+       struct files_struct *fd = current->files;
+       int error = 0;
+
+       if ((unshare_flags & CLONE_FILES) &&
+           (fd && atomic_read(&fd->count) > 1)) {
+               *new_fdp = dup_fd(fd, &error);
+               if (!*new_fdp)
+                       return error;
+       }
+
+       return 0;
+}
+
+/*
+ * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
+ * supported yet
+ */
+static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
+{
+       if (unshare_flags & CLONE_SYSVSEM)
+               return -EINVAL;
+
+       return 0;
+}
+
+#ifndef CONFIG_IPC_NS
+static inline int unshare_ipcs(unsigned long flags, struct ipc_namespace **ns)
+{
+       if (flags & CLONE_NEWIPC)
+               return -EINVAL;
+
+       return 0;
+}
+#endif
+
+/*
+ * unshare allows a process to 'unshare' part of the process
+ * context which was originally shared using clone.  copy_*
+ * functions used by do_fork() cannot be used here directly
+ * because they modify an inactive task_struct that is being
+ * constructed. Here we are modifying the current, active,
+ * task_struct.
+ */
+asmlinkage long sys_unshare(unsigned long unshare_flags)
+{
+       int err = 0;
+       struct fs_struct *fs, *new_fs = NULL;
+       struct namespace *ns, *new_ns = NULL;
+       struct sighand_struct *sigh, *new_sigh = NULL;
+       struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
+       struct files_struct *fd, *new_fd = NULL;
+       struct sem_undo_list *new_ulist = NULL;
+       struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL;
+       struct uts_namespace *uts, *new_uts = NULL;
+       struct ipc_namespace *ipc, *new_ipc = NULL;
+
+       check_unshare_flags(&unshare_flags);
+
+       /* Return -EINVAL for all unsupported flags */
+       err = -EINVAL;
+       if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
+                               CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
+                               CLONE_NEWUTS|CLONE_NEWIPC))
+               goto bad_unshare_out;
+
+       if ((err = unshare_thread(unshare_flags)))
+               goto bad_unshare_out;
+       if ((err = unshare_fs(unshare_flags, &new_fs)))
+               goto bad_unshare_cleanup_thread;
+       if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs)))
+               goto bad_unshare_cleanup_fs;
+       if ((err = unshare_sighand(unshare_flags, &new_sigh)))
+               goto bad_unshare_cleanup_ns;
+       if ((err = unshare_vm(unshare_flags, &new_mm)))
+               goto bad_unshare_cleanup_sigh;
+       if ((err = unshare_fd(unshare_flags, &new_fd)))
+               goto bad_unshare_cleanup_vm;
+       if ((err = unshare_semundo(unshare_flags, &new_ulist)))
+               goto bad_unshare_cleanup_fd;
+       if ((err = unshare_utsname(unshare_flags, &new_uts)))
+               goto bad_unshare_cleanup_semundo;
+       if ((err = unshare_ipcs(unshare_flags, &new_ipc)))
+               goto bad_unshare_cleanup_uts;
+
+       if (new_ns || new_uts || new_ipc) {
+               old_nsproxy = current->nsproxy;
+               new_nsproxy = dup_namespaces(old_nsproxy);
+               if (!new_nsproxy) {
+                       err = -ENOMEM;
+                       goto bad_unshare_cleanup_ipc;
+               }
+       }
+
+       if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist ||
+                               new_uts || new_ipc) {
+
+               task_lock(current);
+
+               if (new_nsproxy) {
+                       current->nsproxy = new_nsproxy;
+                       new_nsproxy = old_nsproxy;
+               }
+
+               if (new_fs) {
+                       fs = current->fs;
+                       current->fs = new_fs;
+                       new_fs = fs;
+               }
+
+               if (new_ns) {
+                       ns = current->nsproxy->namespace;
+                       current->nsproxy->namespace = new_ns;
+                       new_ns = ns;
+               }
+
+               if (new_sigh) {
+                       sigh = current->sighand;
+                       rcu_assign_pointer(current->sighand, new_sigh);
+                       new_sigh = sigh;
+               }
+
+               if (new_mm) {
+                       mm = current->mm;
+                       active_mm = current->active_mm;
+                       current->mm = new_mm;
+                       current->active_mm = new_mm;
+                       activate_mm(active_mm, new_mm);
+                       new_mm = mm;
+               }
+
+               if (new_fd) {
+                       fd = current->files;
+                       current->files = new_fd;
+                       new_fd = fd;
+               }
+
+               if (new_uts) {
+                       uts = current->nsproxy->uts_ns;
+                       current->nsproxy->uts_ns = new_uts;
+                       new_uts = uts;
+               }
+
+               if (new_ipc) {
+                       ipc = current->nsproxy->ipc_ns;
+                       current->nsproxy->ipc_ns = new_ipc;
+                       new_ipc = ipc;
+               }
+
+               task_unlock(current);
+       }
+
+       if (new_nsproxy)
+               put_nsproxy(new_nsproxy);
+
+bad_unshare_cleanup_ipc:
+       if (new_ipc)
+               put_ipc_ns(new_ipc);
+
+bad_unshare_cleanup_uts:
+       if (new_uts)
+               put_uts_ns(new_uts);
+
+bad_unshare_cleanup_semundo:
+bad_unshare_cleanup_fd:
+       if (new_fd)
+               put_files_struct(new_fd);
+
+bad_unshare_cleanup_vm:
+       if (new_mm)
+               mmput(new_mm);
+
+bad_unshare_cleanup_sigh:
+       if (new_sigh)
+               if (atomic_dec_and_test(&new_sigh->count))
+                       kmem_cache_free(sighand_cachep, new_sigh);
+
+bad_unshare_cleanup_ns:
+       if (new_ns)
+               put_namespace(new_ns);
+
+bad_unshare_cleanup_fs:
+       if (new_fs)
+               put_fs_struct(new_fs);
+
+bad_unshare_cleanup_thread:
+bad_unshare_out:
+       return err;
+}