Merge branches 'release' and 'throttling-domains' into release
[safe/jmp/linux-2.6] / kernel / fork.c
index a794bfc..3995297 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/random.h>
 #include <linux/tty.h>
 #include <linux/proc_fs.h>
+#include <linux/blkdev.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -117,7 +118,7 @@ EXPORT_SYMBOL(free_task);
 
 void __put_task_struct(struct task_struct *tsk)
 {
-       WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
+       WARN_ON(!tsk->exit_state);
        WARN_ON(atomic_read(&tsk->usage));
        WARN_ON(tsk == current);
 
@@ -206,7 +207,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
 }
 
 #ifdef CONFIG_MMU
-static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
        struct vm_area_struct *mpnt, *tmp, **pprev;
        struct rb_node **rb_link, *rb_parent;
@@ -324,7 +325,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm)
 
 static inline void mm_free_pgd(struct mm_struct * mm)
 {
-       pgd_free(mm->pgd);
+       pgd_free(mm, mm->pgd);
 }
 #else
 #define dup_mmap(mm, oldmm)    (0)
@@ -392,6 +393,7 @@ void fastcall __mmdrop(struct mm_struct *mm)
        destroy_context(mm);
        free_mm(mm);
 }
+EXPORT_SYMBOL_GPL(__mmdrop);
 
 /*
  * Decrement the use count and release all resources for an mm.
@@ -584,7 +586,7 @@ fail_nomem:
        return retval;
 }
 
-static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
+static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
 {
        struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
        /* We don't need to lock fs - think why ;-) */
@@ -616,7 +618,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
 
 EXPORT_SYMBOL_GPL(copy_fs_struct);
 
-static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
 {
        if (clone_flags & CLONE_FS) {
                atomic_inc(&current->fs->count);
@@ -791,6 +793,31 @@ out:
        return error;
 }
 
+static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+#ifdef CONFIG_BLOCK
+       struct io_context *ioc = current->io_context;
+
+       if (!ioc)
+               return 0;
+       /*
+        * Share io context with parent, if CLONE_IO is set
+        */
+       if (clone_flags & CLONE_IO) {
+               tsk->io_context = ioc_task_link(ioc);
+               if (unlikely(!tsk->io_context))
+                       return -ENOMEM;
+       } else if (ioprio_valid(ioc->ioprio)) {
+               tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
+               if (unlikely(!tsk->io_context))
+                       return -ENOMEM;
+
+               tsk->io_context->ioprio = ioc->ioprio;
+       }
+#endif
+       return 0;
+}
+
 /*
  *     Helper to unshare the files of the current task.
  *     We don't want to expose copy_files internals to
@@ -819,7 +846,7 @@ int unshare_files(void)
 
 EXPORT_SYMBOL(unshare_files);
 
-static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
 {
        struct sighand_struct *sig;
 
@@ -842,7 +869,7 @@ void __cleanup_sighand(struct sighand_struct *sighand)
                kmem_cache_free(sighand_cachep, sighand);
 }
 
-static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 {
        struct signal_struct *sig;
        int ret;
@@ -924,7 +951,7 @@ void __cleanup_signal(struct signal_struct *sig)
        kmem_cache_free(signal_cachep, sig);
 }
 
-static inline void cleanup_signal(struct task_struct *tsk)
+static void cleanup_signal(struct task_struct *tsk)
 {
        struct signal_struct *sig = tsk->signal;
 
@@ -934,7 +961,7 @@ static inline void cleanup_signal(struct task_struct *tsk)
                __cleanup_signal(sig);
 }
 
-static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
+static void copy_flags(unsigned long clone_flags, struct task_struct *p)
 {
        unsigned long new_flags = p->flags;
 
@@ -953,7 +980,7 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
        return task_pid_vnr(current);
 }
 
-static inline void rt_mutex_init_task(struct task_struct *p)
+static void rt_mutex_init_task(struct task_struct *p)
 {
        spin_lock_init(&p->pi_lock);
 #ifdef CONFIG_RT_MUTEXES
@@ -978,7 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                                        struct pid *pid)
 {
        int retval;
-       struct task_struct *p = NULL;
+       struct task_struct *p;
        int cgroup_callbacks_done = 0;
 
        if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
@@ -1045,6 +1072,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        copy_flags(clone_flags, p);
        INIT_LIST_HEAD(&p->children);
        INIT_LIST_HEAD(&p->sibling);
+#ifdef CONFIG_PREEMPT_RCU
+       p->rcu_read_lock_nesting = 0;
+       p->rcu_flipctr_idx = 0;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
        p->vfork_done = NULL;
        spin_lock_init(&p->alloc_lock);
 
@@ -1056,6 +1087,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->gtime = cputime_zero;
        p->utimescaled = cputime_zero;
        p->stimescaled = cputime_zero;
+       p->prev_utime = cputime_zero;
+       p->prev_stime = cputime_zero;
+
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+       p->last_switch_count = 0;
+       p->last_switch_timestamp = 0;
+#endif
 
 #ifdef CONFIG_TASK_XACCT
        p->rchar = 0;           /* I/O counter: bytes read */
@@ -1080,6 +1118,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 #ifdef CONFIG_SECURITY
        p->security = NULL;
 #endif
+       p->cap_bset = current->cap_bset;
        p->io_context = NULL;
        p->audit_context = NULL;
        cgroup_fork(p);
@@ -1121,6 +1160,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->blocked_on = NULL; /* not blocked yet */
 #endif
 
+       /* Perform scheduler related setup. Assign this task to a CPU. */
+       sched_fork(p, clone_flags);
+
        if ((retval = security_task_alloc(p)))
                goto bad_fork_cleanup_policy;
        if ((retval = audit_alloc(p)))
@@ -1142,15 +1184,17 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                goto bad_fork_cleanup_mm;
        if ((retval = copy_namespaces(clone_flags, p)))
                goto bad_fork_cleanup_keys;
+       if ((retval = copy_io(clone_flags, p)))
+               goto bad_fork_cleanup_namespaces;
        retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
        if (retval)
-               goto bad_fork_cleanup_namespaces;
+               goto bad_fork_cleanup_io;
 
        if (pid != &init_struct_pid) {
                retval = -ENOMEM;
                pid = alloc_pid(task_active_pid_ns(p));
                if (!pid)
-                       goto bad_fork_cleanup_namespaces;
+                       goto bad_fork_cleanup_io;
 
                if (clone_flags & CLONE_NEWPID) {
                        retval = pid_ns_prepare_proc(task_active_pid_ns(p));
@@ -1191,6 +1235,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 #ifdef TIF_SYSCALL_EMU
        clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
 #endif
+       clear_all_latency_tracing(p);
 
        /* Our parent execution domain becomes current domain
           These must match for thread signalling to apply */
@@ -1210,9 +1255,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        INIT_LIST_HEAD(&p->ptrace_children);
        INIT_LIST_HEAD(&p->ptrace_list);
 
-       /* Perform scheduler related setup. Assign this task to a CPU. */
-       sched_fork(p, clone_flags);
-
        /* Now that the task is set up, run cgroup callbacks if
         * necessary. We need to run them before the task is visible
         * on the tasklist. */
@@ -1222,9 +1264,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        /* Need tasklist lock for parent etc handling! */
        write_lock_irq(&tasklist_lock);
 
-       /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
-       p->ioprio = current->ioprio;
-
        /*
         * The task hasn't been attached yet, so its cpus_allowed mask will
         * not be changed, nor will its assigned CPU.
@@ -1235,6 +1274,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
         * parent's CPU). This avoids alot of nasty races.
         */
        p->cpus_allowed = current->cpus_allowed;
+       p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
        if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
                        !cpu_online(task_cpu(p))))
                set_task_cpu(p, smp_processor_id());
@@ -1290,23 +1330,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                        __ptrace_link(p, current->parent);
 
                if (thread_group_leader(p)) {
-                       if (clone_flags & CLONE_NEWPID) {
+                       if (clone_flags & CLONE_NEWPID)
                                p->nsproxy->pid_ns->child_reaper = p;
-                               p->signal->tty = NULL;
-                               p->signal->pgrp = p->pid;
-                               set_task_session(p, p->pid);
-                               attach_pid(p, PIDTYPE_PGID, pid);
-                               attach_pid(p, PIDTYPE_SID, pid);
-                       } else {
-                               p->signal->tty = current->signal->tty;
-                               p->signal->pgrp = task_pgrp_nr(current);
-                               set_task_session(p, task_session_nr(current));
-                               attach_pid(p, PIDTYPE_PGID,
-                                               task_pgrp(current));
-                               attach_pid(p, PIDTYPE_SID,
-                                               task_session(current));
-                       }
 
+                       p->signal->tty = current->signal->tty;
+                       set_task_pgrp(p, task_pgrp_nr(current));
+                       set_task_session(p, task_session_nr(current));
+                       attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
+                       attach_pid(p, PIDTYPE_SID, task_session(current));
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
                        __get_cpu_var(process_counts)++;
                }
@@ -1324,6 +1355,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 bad_fork_free_pid:
        if (pid != &init_struct_pid)
                free_pid(pid);
+bad_fork_cleanup_io:
+       put_io_context(p->io_context);
 bad_fork_cleanup_namespaces:
        exit_task_namespaces(p);
 bad_fork_cleanup_keys:
@@ -1366,7 +1399,7 @@ fork_out:
        return ERR_PTR(retval);
 }
 
-noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
+noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
 {
        memset(regs, 0, sizeof(struct pt_regs));
        return regs;
@@ -1385,7 +1418,7 @@ struct task_struct * __cpuinit fork_idle(int cpu)
        return task;
 }
 
-static inline int fork_traceflag (unsigned clone_flags)
+static int fork_traceflag(unsigned clone_flags)
 {
        if (clone_flags & CLONE_UNTRACED)
                return 0;
@@ -1418,6 +1451,23 @@ long do_fork(unsigned long clone_flags,
        int trace = 0;
        long nr;
 
+       /*
+        * We hope to recycle these flags after 2.6.26
+        */
+       if (unlikely(clone_flags & CLONE_STOPPED)) {
+               static int __read_mostly count = 100;
+
+               if (count > 0 && printk_ratelimit()) {
+                       char comm[TASK_COMM_LEN];
+
+                       count--;
+                       printk(KERN_INFO "fork(): process `%s' used deprecated "
+                                       "clone flags 0x%lx\n",
+                               get_task_comm(comm, current),
+                               clone_flags & CLONE_STOPPED);
+               }
+       }
+
        if (unlikely(current->ptrace)) {
                trace = fork_traceflag (clone_flags);
                if (trace)
@@ -1460,7 +1510,7 @@ long do_fork(unsigned long clone_flags,
                if (!(clone_flags & CLONE_STOPPED))
                        wake_up_new_task(p, clone_flags);
                else
-                       p->state = TASK_STOPPED;
+                       __set_task_state(p, TASK_STOPPED);
 
                if (unlikely (trace)) {
                        current->ptrace_message = nr;
@@ -1521,7 +1571,7 @@ void __init proc_caches_init(void)
  * Check constraints on flags passed to the unshare system call and
  * force unsharing of additional process context as appropriate.
  */
-static inline void check_unshare_flags(unsigned long *flags_ptr)
+static void check_unshare_flags(unsigned long *flags_ptr)
 {
        /*
         * If unsharing a thread from a thread group, must also