X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Ffork.c;h=9c042f901570e1b789d40fdbda111739c733cdb4;hb=eadd3c4b9a90e31d5b6034a8813bfabecbe48681;hp=fd22245e388102dbc085257810e1f62801016221;hpb=f95d47caae5302a63d92be9a0292abc90e2a14e1;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/fork.c b/kernel/fork.c index fd22245..9c042f9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -14,11 +14,10 @@ #include #include #include -#include #include #include #include -#include +#include #include #include #include @@ -30,24 +29,30 @@ #include #include #include -#include +#include #include #include #include #include #include +#include #include #include #include #include +#include #include #include #include #include #include +#include #include #include #include +#include +#include +#include #include #include @@ -82,30 +87,31 @@ int nr_processes(void) #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) -static kmem_cache_t *task_struct_cachep; +static struct kmem_cache *task_struct_cachep; #endif /* SLAB cache for signal_struct structures (tsk->signal) */ -static kmem_cache_t *signal_cachep; +static struct kmem_cache *signal_cachep; /* SLAB cache for sighand_struct structures (tsk->sighand) */ -kmem_cache_t *sighand_cachep; +struct kmem_cache *sighand_cachep; /* SLAB cache for files_struct structures (tsk->files) */ -kmem_cache_t *files_cachep; +struct kmem_cache *files_cachep; /* SLAB cache for fs_struct structures (tsk->fs) */ -kmem_cache_t *fs_cachep; +struct kmem_cache *fs_cachep; /* SLAB cache for vm_area_struct structures */ -kmem_cache_t *vm_area_cachep; +struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ -static kmem_cache_t *mm_cachep; +static struct kmem_cache *mm_cachep; void free_task(struct task_struct *tsk) { - free_thread_info(tsk->thread_info); + prop_local_destroy_single(&tsk->dirties); + free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); free_task_struct(tsk); } @@ -113,7 +119,7 @@ EXPORT_SYMBOL(free_task); void __put_task_struct(struct task_struct *tsk) { - WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE))); + WARN_ON(!tsk->exit_state); WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); @@ -135,7 +141,7 @@ void __init fork_init(unsigned long mempages) /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", sizeof(struct task_struct), - ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL); + ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); #endif /* @@ -161,6 +167,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; + int err; prepare_to_copy(orig); @@ -175,7 +182,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) } *tsk = *orig; - tsk->thread_info = ti; + tsk->stack = ti; + + err = prop_local_init_single(&tsk->dirties); + if (err) { + free_thread_info(ti); + free_task_struct(tsk); + return NULL; + } + setup_thread_stack(tsk, orig); #ifdef CONFIG_CC_STACKPROTECTOR @@ -193,7 +208,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) } #ifdef CONFIG_MMU -static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, **pprev; struct rb_node **rb_link, *rb_parent; @@ -202,7 +217,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) struct mempolicy *pol; down_write(&oldmm->mmap_sem); - flush_cache_mm(oldmm); + flush_cache_dup_mm(oldmm); /* * Not linked in yet - no deadlock potential: */ @@ -237,7 +252,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) goto fail_nomem; charge = len; } - tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!tmp) goto fail_nomem; *tmp = *mpnt; @@ -252,11 +267,11 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) anon_vma_link(tmp); file = tmp->vm_file; if (file) { - struct inode *inode = file->f_dentry->d_inode; + struct inode *inode = file->f_path.dentry->d_inode; get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); - + /* insert tmp into the share list, just after mpnt */ spin_lock(&file->f_mapping->i_mmap_lock); tmp->vm_truncate_count = mpnt->vm_truncate_count; @@ -285,6 +300,8 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) if (retval) goto out; } + /* a new mm has just been created */ + arch_dup_mmap(oldmm, mm); retval = 0; out: up_write(&mm->mmap_sem); @@ -309,7 +326,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm) static inline void mm_free_pgd(struct mm_struct * mm) { - pgd_free(mm->pgd); + pgd_free(mm, mm->pgd); } #else #define dup_mmap(mm, oldmm) (0) @@ -317,19 +334,21 @@ static inline void mm_free_pgd(struct mm_struct * mm) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ - __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); +__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); -#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL)) +#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) #include -static struct mm_struct * mm_init(struct mm_struct * mm) +static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) { atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); INIT_LIST_HEAD(&mm->mmlist); + mm->flags = (current->mm) ? current->mm->flags + : MMF_DUMP_FILTER_DEFAULT; mm->core_waiters = 0; mm->nr_ptes = 0; set_mm_counter(mm, file_rss, 0); @@ -339,11 +358,14 @@ static struct mm_struct * mm_init(struct mm_struct * mm) mm->ioctx_list = NULL; mm->free_area_cache = TASK_UNMAPPED_BASE; mm->cached_hole_size = ~0UL; + mm_init_cgroup(mm, p); if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; return mm; } + + mm_free_cgroup(mm); free_mm(mm); return NULL; } @@ -358,7 +380,7 @@ struct mm_struct * mm_alloc(void) mm = allocate_mm(); if (mm) { memset(mm, 0, sizeof(*mm)); - mm = mm_init(mm); + mm = mm_init(mm, current); } return mm; } @@ -368,13 +390,14 @@ struct mm_struct * mm_alloc(void) * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */ -void fastcall __mmdrop(struct mm_struct *mm) +void __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); mm_free_pgd(mm); destroy_context(mm); free_mm(mm); } +EXPORT_SYMBOL_GPL(__mmdrop); /* * Decrement the use count and release all resources for an mm. @@ -392,6 +415,7 @@ void mmput(struct mm_struct *mm) spin_unlock(&mmlist_lock); } put_swap_token(mm); + mm_free_cgroup(mm); mmdrop(mm); } } @@ -448,7 +472,16 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) tsk->vfork_done = NULL; complete(vfork_done); } - if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) { + + /* + * If we're exiting normally, clear a user-space tid field if + * requested. We leave this alone when dying by signal, to leave + * the value intact in a core dump, and to save the unnecessary + * trouble otherwise. Userland only wants this done for a sys_exit. + */ + if (tsk->clear_child_tid + && !(tsk->flags & PF_SIGNALED) + && atomic_read(&mm->mm_users) > 1) { u32 __user * tidptr = tsk->clear_child_tid; tsk->clear_child_tid = NULL; @@ -479,7 +512,11 @@ static struct mm_struct *dup_mm(struct task_struct *tsk) memcpy(mm, oldmm, sizeof(*mm)); - if (!mm_init(mm)) + /* Initializing for Swap token stuff */ + mm->token_priority = 0; + mm->last_interval = 0; + + if (!mm_init(mm, tsk)) goto fail_nomem; if (init_new_context(tsk, mm)) @@ -542,6 +579,10 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) goto fail_nomem; good_mm: + /* Initializing for Swap token stuff */ + mm->token_priority = 0; + mm->last_interval = 0; + tsk->mm = mm; tsk->active_mm = mm; return 0; @@ -550,7 +591,7 @@ fail_nomem: return retval; } -static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old) +static struct fs_struct *__copy_fs_struct(struct fs_struct *old) { struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); /* We don't need to lock fs - think why ;-) */ @@ -559,16 +600,16 @@ static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old) rwlock_init(&fs->lock); fs->umask = old->umask; read_lock(&old->lock); - fs->rootmnt = mntget(old->rootmnt); - fs->root = dget(old->root); - fs->pwdmnt = mntget(old->pwdmnt); - fs->pwd = dget(old->pwd); - if (old->altroot) { - fs->altrootmnt = mntget(old->altrootmnt); - fs->altroot = dget(old->altroot); + fs->root = old->root; + path_get(&old->root); + fs->pwd = old->pwd; + path_get(&old->pwd); + if (old->altroot.dentry) { + fs->altroot = old->altroot; + path_get(&old->altroot); } else { - fs->altrootmnt = NULL; - fs->altroot = NULL; + fs->altroot.mnt = NULL; + fs->altroot.dentry = NULL; } read_unlock(&old->lock); } @@ -582,7 +623,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) EXPORT_SYMBOL_GPL(copy_fs_struct); -static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk) +static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { if (clone_flags & CLONE_FS) { atomic_inc(¤t->fs->count); @@ -596,7 +637,7 @@ static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk) static int count_open_files(struct fdtable *fdt) { - int size = fdt->max_fdset; + int size = fdt->max_fds; int i; /* Find the last open fd */ @@ -613,7 +654,7 @@ static struct files_struct *alloc_files(void) struct files_struct *newf; struct fdtable *fdt; - newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL); + newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); if (!newf) goto out; @@ -623,12 +664,10 @@ static struct files_struct *alloc_files(void) newf->next_fd = 0; fdt = &newf->fdtab; fdt->max_fds = NR_OPEN_DEFAULT; - fdt->max_fdset = EMBEDDED_FD_SET_SIZE; fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; fdt->open_fds = (fd_set *)&newf->open_fds_init; fdt->fd = &newf->fd_array[0]; INIT_RCU_HEAD(&fdt->rcu); - fdt->free_files = NULL; fdt->next = NULL; rcu_assign_pointer(newf->fdt, fdt); out: @@ -644,7 +683,7 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) { struct files_struct *newf; struct file **old_fds, **new_fds; - int open_files, size, i, expand; + int open_files, size, i; struct fdtable *old_fdt, *new_fdt; *errorp = -ENOMEM; @@ -655,25 +694,14 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); new_fdt = files_fdtable(newf); - size = old_fdt->max_fdset; open_files = count_open_files(old_fdt); - expand = 0; /* - * Check whether we need to allocate a larger fd array or fd set. - * Note: we're not a clone task, so the open count won't change. + * Check whether we need to allocate a larger fd array and fd set. + * Note: we're not a clone task, so the open count won't change. */ - if (open_files > new_fdt->max_fdset) { - new_fdt->max_fdset = 0; - expand = 1; - } if (open_files > new_fdt->max_fds) { new_fdt->max_fds = 0; - expand = 1; - } - - /* if the old fdset gets grown now, we'll only copy up to "size" fds */ - if (expand) { spin_unlock(&oldf->file_lock); spin_lock(&newf->file_lock); *errorp = expand_files(newf, open_files-1); @@ -693,8 +721,10 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) old_fds = old_fdt->fd; new_fds = new_fdt->fd; - memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8); - memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8); + memcpy(new_fdt->open_fds->fds_bits, + old_fdt->open_fds->fds_bits, open_files/8); + memcpy(new_fdt->close_on_exec->fds_bits, + old_fdt->close_on_exec->fds_bits, open_files/8); for (i = open_files; i != 0; i--) { struct file *f = *old_fds++; @@ -716,25 +746,22 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) /* compute the remainder to be cleared */ size = (new_fdt->max_fds - open_files) * sizeof(struct file *); - /* This is long word aligned thus could use a optimized version */ - memset(new_fds, 0, size); + /* This is long word aligned thus could use a optimized version */ + memset(new_fds, 0, size); - if (new_fdt->max_fdset > open_files) { - int left = (new_fdt->max_fdset-open_files)/8; + if (new_fdt->max_fds > open_files) { + int left = (new_fdt->max_fds-open_files)/8; int start = open_files / (8 * sizeof(unsigned long)); memset(&new_fdt->open_fds->fds_bits[start], 0, left); memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); } -out: return newf; out_release: - free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset); - free_fdset (new_fdt->open_fds, new_fdt->max_fdset); - free_fd_array(new_fdt->fd, new_fdt->max_fds); kmem_cache_free(files_cachep, newf); +out: return NULL; } @@ -771,6 +798,31 @@ out: return error; } +static int copy_io(unsigned long clone_flags, struct task_struct *tsk) +{ +#ifdef CONFIG_BLOCK + struct io_context *ioc = current->io_context; + + if (!ioc) + return 0; + /* + * Share io context with parent, if CLONE_IO is set + */ + if (clone_flags & CLONE_IO) { + tsk->io_context = ioc_task_link(ioc); + if (unlikely(!tsk->io_context)) + return -ENOMEM; + } else if (ioprio_valid(ioc->ioprio)) { + tsk->io_context = alloc_io_context(GFP_KERNEL, -1); + if (unlikely(!tsk->io_context)) + return -ENOMEM; + + tsk->io_context->ioprio = ioc->ioprio; + } +#endif + return 0; +} + /* * Helper to unshare the files of the current task. * We don't want to expose copy_files internals to @@ -799,7 +851,7 @@ int unshare_files(void) EXPORT_SYMBOL(unshare_files); -static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) +static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) { struct sighand_struct *sig; @@ -822,7 +874,7 @@ void __cleanup_sighand(struct sighand_struct *sighand) kmem_cache_free(sighand_cachep, sighand); } -static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) +static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; int ret; @@ -830,7 +882,6 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts if (clone_flags & CLONE_THREAD) { atomic_inc(¤t->signal->count); atomic_inc(¤t->signal->live); - taskstats_tgid_alloc(current); return 0; } sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); @@ -855,10 +906,9 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts init_sigpending(&sig->shared_pending); INIT_LIST_HEAD(&sig->posix_timers); - hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL); + hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sig->it_real_incr.tv64 = 0; sig->real_timer.function = it_real_fn; - sig->tsk = tsk; sig->it_virt_expires = cputime_zero; sig->it_virt_incr = cputime_zero; @@ -866,12 +916,15 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts sig->it_prof_incr = cputime_zero; sig->leader = 0; /* session leadership doesn't inherit */ - sig->tty_old_pgrp = 0; + sig->tty_old_pgrp = NULL; sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; + sig->gtime = cputime_zero; + sig->cgtime = cputime_zero; sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; - sig->sched_time = 0; + sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; + sig->sum_sched_runtime = 0; INIT_LIST_HEAD(&sig->cpu_timers[0]); INIT_LIST_HEAD(&sig->cpu_timers[1]); INIT_LIST_HEAD(&sig->cpu_timers[2]); @@ -891,6 +944,8 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts } acct_init_pacct(&sig->pacct); + tty_audit_fork(sig); + return 0; } @@ -900,7 +955,7 @@ void __cleanup_signal(struct signal_struct *sig) kmem_cache_free(signal_cachep, sig); } -static inline void cleanup_signal(struct task_struct *tsk) +static void cleanup_signal(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; @@ -910,28 +965,29 @@ static inline void cleanup_signal(struct task_struct *tsk) __cleanup_signal(sig); } -static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) +static void copy_flags(unsigned long clone_flags, struct task_struct *p) { unsigned long new_flags = p->flags; - new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE); + new_flags &= ~PF_SUPERPRIV; new_flags |= PF_FORKNOEXEC; if (!(clone_flags & CLONE_PTRACE)) p->ptrace = 0; p->flags = new_flags; + clear_freeze_flag(p); } asmlinkage long sys_set_tid_address(int __user *tidptr) { current->clear_child_tid = tidptr; - return current->pid; + return task_pid_vnr(current); } -static inline void rt_mutex_init_task(struct task_struct *p) +static void rt_mutex_init_task(struct task_struct *p) { -#ifdef CONFIG_RT_MUTEXES spin_lock_init(&p->pi_lock); +#ifdef CONFIG_RT_MUTEXES plist_head_init(&p->pi_waiters, &p->pi_lock); p->pi_blocked_on = NULL; #endif @@ -949,12 +1005,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, - int __user *parent_tidptr, int __user *child_tidptr, - int pid) + struct pid *pid) { int retval; - struct task_struct *p = NULL; + struct task_struct *p; + int cgroup_callbacks_done = 0; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); @@ -993,7 +1049,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, if (atomic_read(&p->user->processes) >= p->signal->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && - p->user != &root_user) + p->user != current->nsproxy->user_ns->root_user) goto bad_fork_free; } @@ -1018,14 +1074,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->did_exec = 0; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ copy_flags(clone_flags, p); - p->pid = pid; - retval = -EFAULT; - if (clone_flags & CLONE_PARENT_SETTID) - if (put_user(p->pid, parent_tidptr)) - goto bad_fork_cleanup_delays_binfmt; - INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); +#ifdef CONFIG_PREEMPT_RCU + p->rcu_read_lock_nesting = 0; + p->rcu_flipctr_idx = 0; +#endif /* #ifdef CONFIG_PREEMPT_RCU */ p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); @@ -1034,33 +1088,50 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->utime = cputime_zero; p->stime = cputime_zero; - p->sched_time = 0; + p->gtime = cputime_zero; + p->utimescaled = cputime_zero; + p->stimescaled = cputime_zero; + p->prev_utime = cputime_zero; + p->prev_stime = cputime_zero; + +#ifdef CONFIG_DETECT_SOFTLOCKUP + p->last_switch_count = 0; + p->last_switch_timestamp = 0; +#endif + +#ifdef CONFIG_TASK_XACCT p->rchar = 0; /* I/O counter: bytes read */ p->wchar = 0; /* I/O counter: bytes written */ p->syscr = 0; /* I/O counter: read syscalls */ p->syscw = 0; /* I/O counter: write syscalls */ +#endif + task_io_accounting_init(p); acct_clear_integrals(p); - p->it_virt_expires = cputime_zero; + p->it_virt_expires = cputime_zero; p->it_prof_expires = cputime_zero; - p->it_sched_expires = 0; - INIT_LIST_HEAD(&p->cpu_timers[0]); - INIT_LIST_HEAD(&p->cpu_timers[1]); - INIT_LIST_HEAD(&p->cpu_timers[2]); + p->it_sched_expires = 0; + INIT_LIST_HEAD(&p->cpu_timers[0]); + INIT_LIST_HEAD(&p->cpu_timers[1]); + INIT_LIST_HEAD(&p->cpu_timers[2]); p->lock_depth = -1; /* -1 = no lock */ do_posix_clock_monotonic_gettime(&p->start_time); + p->real_start_time = p->start_time; + monotonic_to_bootbased(&p->real_start_time); +#ifdef CONFIG_SECURITY p->security = NULL; +#endif + p->cap_bset = current->cap_bset; p->io_context = NULL; - p->io_wait = NULL; p->audit_context = NULL; - cpuset_fork(p); + cgroup_fork(p); #ifdef CONFIG_NUMA p->mempolicy = mpol_copy(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; - goto bad_fork_cleanup_cpuset; + goto bad_fork_cleanup_cgroup; } mpol_fix_fork_child_flag(p); #endif @@ -1093,9 +1164,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->blocked_on = NULL; /* not blocked yet */ #endif - p->tgid = p->pid; - if (clone_flags & CLONE_THREAD) - p->tgid = current->tgid; + /* Perform scheduler related setup. Assign this task to a CPU. */ + sched_fork(p, clone_flags); if ((retval = security_task_alloc(p))) goto bad_fork_cleanup_policy; @@ -1118,22 +1188,43 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto bad_fork_cleanup_mm; if ((retval = copy_namespaces(clone_flags, p))) goto bad_fork_cleanup_keys; + if ((retval = copy_io(clone_flags, p))) + goto bad_fork_cleanup_namespaces; retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); if (retval) - goto bad_fork_cleanup_namespaces; + goto bad_fork_cleanup_io; + + if (pid != &init_struct_pid) { + retval = -ENOMEM; + pid = alloc_pid(task_active_pid_ns(p)); + if (!pid) + goto bad_fork_cleanup_io; + + if (clone_flags & CLONE_NEWPID) { + retval = pid_ns_prepare_proc(task_active_pid_ns(p)); + if (retval < 0) + goto bad_fork_free_pid; + } + } + + p->pid = pid_nr(pid); + p->tgid = p->pid; + if (clone_flags & CLONE_THREAD) + p->tgid = current->tgid; p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; +#ifdef CONFIG_FUTEX p->robust_list = NULL; #ifdef CONFIG_COMPAT p->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; - +#endif /* * sigaltstack should be cleared when sharing the same VM */ @@ -1148,6 +1239,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); #endif + clear_all_latency_tracing(p); /* Our parent execution domain becomes current domain These must match for thread signalling to apply */ @@ -1167,15 +1259,15 @@ static struct task_struct *copy_process(unsigned long clone_flags, INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); - /* Perform scheduler related setup. Assign this task to a CPU. */ - sched_fork(p, clone_flags); + /* Now that the task is set up, run cgroup callbacks if + * necessary. We need to run them before the task is visible + * on the tasklist. */ + cgroup_fork_callbacks(p); + cgroup_callbacks_done = 1; /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); - /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */ - p->ioprio = current->ioprio; - /* * The task hasn't been attached yet, so its cpus_allowed mask will * not be changed, nor will its assigned CPU. @@ -1186,6 +1278,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, * parent's CPU). This avoids alot of nasty races. */ p->cpus_allowed = current->cpus_allowed; + p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || !cpu_online(task_cpu(p)))) set_task_cpu(p, smp_processor_id()); @@ -1207,12 +1300,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ - recalc_sigpending(); + recalc_sigpending(); if (signal_pending(current)) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; - goto bad_fork_cleanup_namespaces; + goto bad_fork_free_pid; } if (clone_flags & CLONE_THREAD) { @@ -1241,16 +1334,19 @@ static struct task_struct *copy_process(unsigned long clone_flags, __ptrace_link(p, current->parent); if (thread_group_leader(p)) { - p->signal->tty = current->signal->tty; - p->signal->pgrp = process_group(current); - p->signal->session = current->signal->session; - attach_pid(p, PIDTYPE_PGID, process_group(p)); - attach_pid(p, PIDTYPE_SID, p->signal->session); + if (clone_flags & CLONE_NEWPID) + p->nsproxy->pid_ns->child_reaper = p; + p->signal->leader_pid = pid; + p->signal->tty = current->signal->tty; + set_task_pgrp(p, task_pgrp_nr(current)); + set_task_session(p, task_session_nr(current)); + attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); + attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail_rcu(&p->tasks, &init_task.tasks); __get_cpu_var(process_counts)++; } - attach_pid(p, PIDTYPE_PID, p->pid); + attach_pid(p, PIDTYPE_PID, pid); nr_threads++; } @@ -1258,8 +1354,14 @@ static struct task_struct *copy_process(unsigned long clone_flags, spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); + cgroup_post_fork(p); return p; +bad_fork_free_pid: + if (pid != &init_struct_pid) + free_pid(pid); +bad_fork_cleanup_io: + put_io_context(p->io_context); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_keys: @@ -1284,10 +1386,9 @@ bad_fork_cleanup_security: bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_free(p->mempolicy); -bad_fork_cleanup_cpuset: +bad_fork_cleanup_cgroup: #endif - cpuset_exit(p); -bad_fork_cleanup_delays_binfmt: + cgroup_exit(p, cgroup_callbacks_done); delayacct_tsk_free(p); if (p->binfmt) module_put(p->binfmt->module); @@ -1303,25 +1404,26 @@ fork_out: return ERR_PTR(retval); } -noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) +noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs) { memset(regs, 0, sizeof(struct pt_regs)); return regs; } -struct task_struct * __devinit fork_idle(int cpu) +struct task_struct * __cpuinit fork_idle(int cpu) { struct task_struct *task; struct pt_regs regs; - task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); + task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, + &init_struct_pid); if (!IS_ERR(task)) init_idle(task, cpu); return task; } -static inline int fork_traceflag (unsigned clone_flags) +static int fork_traceflag(unsigned clone_flags) { if (clone_flags & CLONE_UNTRACED) return 0; @@ -1352,19 +1454,33 @@ long do_fork(unsigned long clone_flags, { struct task_struct *p; int trace = 0; - struct pid *pid = alloc_pid(); long nr; - if (!pid) - return -EAGAIN; - nr = pid->nr; + /* + * We hope to recycle these flags after 2.6.26 + */ + if (unlikely(clone_flags & CLONE_STOPPED)) { + static int __read_mostly count = 100; + + if (count > 0 && printk_ratelimit()) { + char comm[TASK_COMM_LEN]; + + count--; + printk(KERN_INFO "fork(): process `%s' used deprecated " + "clone flags 0x%lx\n", + get_task_comm(comm, current), + clone_flags & CLONE_STOPPED); + } + } + if (unlikely(current->ptrace)) { trace = fork_traceflag (clone_flags); if (trace) clone_flags |= CLONE_PTRACE; } - p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr); + p = copy_process(clone_flags, stack_start, regs, stack_size, + child_tidptr, NULL); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. @@ -1372,6 +1488,11 @@ long do_fork(unsigned long clone_flags, if (!IS_ERR(p)) { struct completion vfork; + nr = task_pid_vnr(p); + + if (clone_flags & CLONE_PARENT_SETTID) + put_user(nr, parent_tidptr); + if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); @@ -1388,7 +1509,7 @@ long do_fork(unsigned long clone_flags, if (!(clone_flags & CLONE_STOPPED)) wake_up_new_task(p, clone_flags); else - p->state = TASK_STOPPED; + __set_task_state(p, TASK_STOPPED); if (unlikely (trace)) { current->ptrace_message = nr; @@ -1396,14 +1517,15 @@ long do_fork(unsigned long clone_flags, } if (clone_flags & CLONE_VFORK) { + freezer_do_not_count(); wait_for_completion(&vfork); + freezer_count(); if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { current->ptrace_message = nr; ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); } } } else { - free_pid(pid); nr = PTR_ERR(p); } return nr; @@ -1413,13 +1535,12 @@ long do_fork(unsigned long clone_flags, #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif -static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) +static void sighand_ctor(struct kmem_cache *cachep, void *data) { struct sighand_struct *sighand = data; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - spin_lock_init(&sighand->siglock); + spin_lock_init(&sighand->siglock); + init_waitqueue_head(&sighand->signalfd_wqh); } void __init proc_caches_init(void) @@ -1427,30 +1548,29 @@ void __init proc_caches_init(void) sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, - sighand_ctor, NULL); + sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - files_cachep = kmem_cache_create("files_cache", + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - fs_cachep = kmem_cache_create("fs_cache", + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); vm_area_cachep = kmem_cache_create("vm_area_struct", sizeof(struct vm_area_struct), 0, - SLAB_PANIC, NULL, NULL); + SLAB_PANIC, NULL); mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } - /* * Check constraints on flags passed to the unshare system call and * force unsharing of additional process context as appropriate. */ -static inline void check_unshare_flags(unsigned long *flags_ptr) +static void check_unshare_flags(unsigned long *flags_ptr) { /* * If unsharing a thread from a thread group, must also @@ -1509,34 +1629,13 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) } /* - * Unshare the namespace structure if it is being shared - */ -static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs) -{ - struct namespace *ns = current->nsproxy->namespace; - - if ((unshare_flags & CLONE_NEWNS) && ns) { - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - *new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs); - if (!*new_nsp) - return -ENOMEM; - } - - return 0; -} - -/* - * Unsharing of sighand for tasks created with CLONE_SIGHAND is not - * supported yet + * Unsharing of sighand is not supported yet */ static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp) { struct sighand_struct *sigh = current->sighand; - if ((unshare_flags & CLONE_SIGHAND) && - (sigh && atomic_read(&sigh->count) > 1)) + if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1) return -EINVAL; else return 0; @@ -1587,16 +1686,6 @@ static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **n return 0; } -#ifndef CONFIG_IPC_NS -static inline int unshare_ipcs(unsigned long flags, struct ipc_namespace **ns) -{ - if (flags & CLONE_NEWIPC) - return -EINVAL; - - return 0; -} -#endif - /* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* @@ -1609,14 +1698,11 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) { int err = 0; struct fs_struct *fs, *new_fs = NULL; - struct namespace *ns, *new_ns = NULL; - struct sighand_struct *sigh, *new_sigh = NULL; + struct sighand_struct *new_sigh = NULL; struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; struct files_struct *fd, *new_fd = NULL; struct sem_undo_list *new_ulist = NULL; - struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL; - struct uts_namespace *uts, *new_uts = NULL; - struct ipc_namespace *ipc, *new_ipc = NULL; + struct nsproxy *new_nsproxy = NULL; check_unshare_flags(&unshare_flags); @@ -1624,65 +1710,41 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) err = -EINVAL; if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| - CLONE_NEWUTS|CLONE_NEWIPC)) + CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER| + CLONE_NEWNET)) goto bad_unshare_out; if ((err = unshare_thread(unshare_flags))) goto bad_unshare_out; if ((err = unshare_fs(unshare_flags, &new_fs))) goto bad_unshare_cleanup_thread; - if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs))) - goto bad_unshare_cleanup_fs; if ((err = unshare_sighand(unshare_flags, &new_sigh))) - goto bad_unshare_cleanup_ns; + goto bad_unshare_cleanup_fs; if ((err = unshare_vm(unshare_flags, &new_mm))) goto bad_unshare_cleanup_sigh; if ((err = unshare_fd(unshare_flags, &new_fd))) goto bad_unshare_cleanup_vm; if ((err = unshare_semundo(unshare_flags, &new_ulist))) goto bad_unshare_cleanup_fd; - if ((err = unshare_utsname(unshare_flags, &new_uts))) + if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, + new_fs))) goto bad_unshare_cleanup_semundo; - if ((err = unshare_ipcs(unshare_flags, &new_ipc))) - goto bad_unshare_cleanup_uts; - - if (new_ns || new_uts || new_ipc) { - old_nsproxy = current->nsproxy; - new_nsproxy = dup_namespaces(old_nsproxy); - if (!new_nsproxy) { - err = -ENOMEM; - goto bad_unshare_cleanup_ipc; - } - } - - if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist || - new_uts || new_ipc) { - task_lock(current); + if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) { if (new_nsproxy) { - current->nsproxy = new_nsproxy; - new_nsproxy = old_nsproxy; + switch_task_namespaces(current, new_nsproxy); + new_nsproxy = NULL; } + task_lock(current); + if (new_fs) { fs = current->fs; current->fs = new_fs; new_fs = fs; } - if (new_ns) { - ns = current->nsproxy->namespace; - current->nsproxy->namespace = new_ns; - new_ns = ns; - } - - if (new_sigh) { - sigh = current->sighand; - rcu_assign_pointer(current->sighand, new_sigh); - new_sigh = sigh; - } - if (new_mm) { mm = current->mm; active_mm = current->active_mm; @@ -1698,32 +1760,12 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) new_fd = fd; } - if (new_uts) { - uts = current->nsproxy->uts_ns; - current->nsproxy->uts_ns = new_uts; - new_uts = uts; - } - - if (new_ipc) { - ipc = current->nsproxy->ipc_ns; - current->nsproxy->ipc_ns = new_ipc; - new_ipc = ipc; - } - task_unlock(current); } if (new_nsproxy) put_nsproxy(new_nsproxy); -bad_unshare_cleanup_ipc: - if (new_ipc) - put_ipc_ns(new_ipc); - -bad_unshare_cleanup_uts: - if (new_uts) - put_uts_ns(new_uts); - bad_unshare_cleanup_semundo: bad_unshare_cleanup_fd: if (new_fd) @@ -1738,10 +1780,6 @@ bad_unshare_cleanup_sigh: if (atomic_dec_and_test(&new_sigh->count)) kmem_cache_free(sighand_cachep, new_sigh); -bad_unshare_cleanup_ns: - if (new_ns) - put_namespace(new_ns); - bad_unshare_cleanup_fs: if (new_fs) put_fs_struct(new_fs);