Merge branch 'nfs-for-2.6.32' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6...
[safe/jmp/linux-2.6] / kernel / fork.c
index 5449efb..021e113 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/completion.h>
-#include <linux/mnt_namespace.h>
 #include <linux/personality.h>
 #include <linux/mempolicy.h>
 #include <linux/sem.h>
@@ -62,6 +61,7 @@
 #include <linux/blkdev.h>
 #include <linux/fs_struct.h>
 #include <linux/magic.h>
+#include <linux/perf_counter.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -177,7 +177,7 @@ void __init fork_init(unsigned long mempages)
        /* create a slab on which task_structs can be allocated */
        task_struct_cachep =
                kmem_cache_create("task_struct", sizeof(struct task_struct),
-                       ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
+                       ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
 #endif
 
        /* do the arch specific task caches init */
@@ -426,6 +426,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
        init_rwsem(&mm->mmap_sem);
        INIT_LIST_HEAD(&mm->mmlist);
        mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
+       mm->oom_adj = (current->mm) ? current->mm->oom_adj : 0;
        mm->core_state = NULL;
        mm->nr_ptes = 0;
        set_mm_counter(mm, file_rss, 0);
@@ -567,18 +568,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
         * the value intact in a core dump, and to save the unnecessary
         * trouble otherwise.  Userland only wants this done for a sys_exit.
         */
-       if (tsk->clear_child_tid
-           && !(tsk->flags & PF_SIGNALED)
-           && atomic_read(&mm->mm_users) > 1) {
-               u32 __user * tidptr = tsk->clear_child_tid;
+       if (tsk->clear_child_tid) {
+               if (!(tsk->flags & PF_SIGNALED) &&
+                   atomic_read(&mm->mm_users) > 1) {
+                       /*
+                        * We don't check the error code - if userspace has
+                        * not set up a proper pointer then tough luck.
+                        */
+                       put_user(0, tsk->clear_child_tid);
+                       sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
+                                       1, NULL, NULL, 0);
+               }
                tsk->clear_child_tid = NULL;
-
-               /*
-                * We don't check the error code - if userspace has
-                * not set up a proper pointer then tough luck.
-                */
-               put_user(0, tidptr);
-               sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
        }
 }
 
@@ -981,6 +982,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if (!p)
                goto fork_out;
 
+       ftrace_graph_init_task(p);
+
        rt_mutex_init_task(p);
 
 #ifdef CONFIG_PROVE_LOCKING
@@ -1026,7 +1029,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->vfork_done = NULL;
        spin_lock_init(&p->alloc_lock);
 
-       clear_tsk_thread_flag(p, TIF_SIGPENDING);
        init_sigpending(&p->pending);
 
        p->utime = cputime_zero;
@@ -1094,6 +1096,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        /* Perform scheduler related setup. Assign this task to a CPU. */
        sched_fork(p, clone_flags);
 
+       retval = perf_counter_init_task(p);
+       if (retval)
+               goto bad_fork_cleanup_policy;
+
        if ((retval = audit_alloc(p)))
                goto bad_fork_cleanup_policy;
        /* copy all the process information */
@@ -1130,8 +1136,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                }
        }
 
-       ftrace_graph_init_task(p);
-
        p->pid = pid_nr(pid);
        p->tgid = p->pid;
        if (clone_flags & CLONE_THREAD)
@@ -1140,7 +1144,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if (current->nsproxy != p->nsproxy) {
                retval = ns_cgroup_clone(p, pid);
                if (retval)
-                       goto bad_fork_free_graph;
+                       goto bad_fork_free_pid;
        }
 
        p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1232,7 +1236,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                spin_unlock(&current->sighand->siglock);
                write_unlock_irq(&tasklist_lock);
                retval = -ERESTARTNOINTR;
-               goto bad_fork_free_graph;
+               goto bad_fork_free_pid;
        }
 
        if (clone_flags & CLONE_THREAD) {
@@ -1265,10 +1269,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        write_unlock_irq(&tasklist_lock);
        proc_fork_connector(p);
        cgroup_post_fork(p);
+       perf_counter_fork(p);
        return p;
 
-bad_fork_free_graph:
-       ftrace_graph_exit_task(p);
 bad_fork_free_pid:
        if (pid != &init_struct_pid)
                free_pid(pid);
@@ -1292,6 +1295,7 @@ bad_fork_cleanup_semundo:
 bad_fork_cleanup_audit:
        audit_free(p);
 bad_fork_cleanup_policy:
+       perf_counter_free_task(p);
 #ifdef CONFIG_NUMA
        mpol_put(p->mempolicy);
 bad_fork_cleanup_cgroup:
@@ -1460,20 +1464,20 @@ void __init proc_caches_init(void)
 {
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
-                       sighand_ctor);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
+                       SLAB_NOTRACK, sighand_ctor);
        signal_cachep = kmem_cache_create("signal_cache",
                        sizeof(struct signal_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        files_cachep = kmem_cache_create("files_cache",
                        sizeof(struct files_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        fs_cachep = kmem_cache_create("fs_cache",
                        sizeof(struct fs_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        mm_cachep = kmem_cache_create("mm_struct",
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
        mmap_init();
 }