tracing/filters: Don't use pred on alloc failure
[safe/jmp/linux-2.6] / kernel / fork.c
index c4b1e35..9b42695 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/completion.h>
-#include <linux/mnt_namespace.h>
 #include <linux/personality.h>
 #include <linux/mempolicy.h>
 #include <linux/sem.h>
@@ -61,8 +60,8 @@
 #include <linux/proc_fs.h>
 #include <linux/blkdev.h>
 #include <linux/fs_struct.h>
-#include <trace/sched.h>
 #include <linux/magic.h>
+#include <linux/perf_counter.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -71,6 +70,8 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
+#include <trace/events/sched.h>
+
 /*
  * Protected counters by write_lock_irq(&tasklist_lock)
  */
@@ -83,8 +84,6 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
 
-DEFINE_TRACE(sched_process_fork);
-
 int nr_processes(void)
 {
        int cpu;
@@ -178,7 +177,7 @@ void __init fork_init(unsigned long mempages)
        /* create a slab on which task_structs can be allocated */
        task_struct_cachep =
                kmem_cache_create("task_struct", sizeof(struct task_struct),
-                       ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
+                       ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
 #endif
 
        /* do the arch specific task caches init */
@@ -1029,7 +1028,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->vfork_done = NULL;
        spin_lock_init(&p->alloc_lock);
 
-       clear_tsk_thread_flag(p, TIF_SIGPENDING);
        init_sigpending(&p->pending);
 
        p->utime = cputime_zero;
@@ -1091,12 +1089,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 #ifdef CONFIG_DEBUG_MUTEXES
        p->blocked_on = NULL; /* not blocked yet */
 #endif
-       if (unlikely(current->ptrace))
-               ptrace_fork(p, clone_flags);
+
+       p->bts = NULL;
 
        /* Perform scheduler related setup. Assign this task to a CPU. */
        sched_fork(p, clone_flags);
 
+       retval = perf_counter_init_task(p);
+       if (retval)
+               goto bad_fork_cleanup_policy;
+
        if ((retval = audit_alloc(p)))
                goto bad_fork_cleanup_policy;
        /* copy all the process information */
@@ -1291,6 +1293,7 @@ bad_fork_cleanup_semundo:
 bad_fork_cleanup_audit:
        audit_free(p);
 bad_fork_cleanup_policy:
+       perf_counter_free_task(p);
 #ifdef CONFIG_NUMA
        mpol_put(p->mempolicy);
 bad_fork_cleanup_cgroup:
@@ -1406,8 +1409,11 @@ long do_fork(unsigned long clone_flags,
                        init_completion(&vfork);
                }
 
+               if (!(clone_flags & CLONE_THREAD))
+                       perf_counter_fork(p);
+
                audit_finish_fork(p);
-               tracehook_report_clone(trace, regs, clone_flags, nr, p);
+               tracehook_report_clone(regs, clone_flags, nr, p);
 
                /*
                 * We set PF_STARTING at creation in case tracing wants to
@@ -1459,20 +1465,20 @@ void __init proc_caches_init(void)
 {
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
-                       sighand_ctor);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
+                       SLAB_NOTRACK, sighand_ctor);
        signal_cachep = kmem_cache_create("signal_cache",
                        sizeof(struct signal_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        files_cachep = kmem_cache_create("files_cache",
                        sizeof(struct files_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        fs_cachep = kmem_cache_create("fs_cache",
                        sizeof(struct fs_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        mm_cachep = kmem_cache_create("mm_struct",
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
        mmap_init();
 }