Merge branches 'tracing/ftrace' and 'tracing/urgent' into tracing/core
[safe/jmp/linux-2.6] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/mnt_namespace.h>
21 #include <linux/personality.h>
22 #include <linux/mempolicy.h>
23 #include <linux/sem.h>
24 #include <linux/file.h>
25 #include <linux/fdtable.h>
26 #include <linux/iocontext.h>
27 #include <linux/key.h>
28 #include <linux/binfmts.h>
29 #include <linux/mman.h>
30 #include <linux/mmu_notifier.h>
31 #include <linux/fs.h>
32 #include <linux/nsproxy.h>
33 #include <linux/capability.h>
34 #include <linux/cpu.h>
35 #include <linux/cgroup.h>
36 #include <linux/security.h>
37 #include <linux/hugetlb.h>
38 #include <linux/swap.h>
39 #include <linux/syscalls.h>
40 #include <linux/jiffies.h>
41 #include <linux/tracehook.h>
42 #include <linux/futex.h>
43 #include <linux/compat.h>
44 #include <linux/task_io_accounting_ops.h>
45 #include <linux/rcupdate.h>
46 #include <linux/ptrace.h>
47 #include <linux/mount.h>
48 #include <linux/audit.h>
49 #include <linux/memcontrol.h>
50 #include <linux/profile.h>
51 #include <linux/rmap.h>
52 #include <linux/acct.h>
53 #include <linux/tsacct_kern.h>
54 #include <linux/cn_proc.h>
55 #include <linux/freezer.h>
56 #include <linux/delayacct.h>
57 #include <linux/taskstats_kern.h>
58 #include <linux/random.h>
59 #include <linux/tty.h>
60 #include <linux/proc_fs.h>
61 #include <linux/blkdev.h>
62 #include <trace/sched.h>
63
64 #include <asm/pgtable.h>
65 #include <asm/pgalloc.h>
66 #include <asm/uaccess.h>
67 #include <asm/mmu_context.h>
68 #include <asm/cacheflush.h>
69 #include <asm/tlbflush.h>
70
71 /*
72  * Protected counters by write_lock_irq(&tasklist_lock)
73  */
74 unsigned long total_forks;      /* Handle normal Linux uptimes. */
75 int nr_threads;                 /* The idle threads do not count.. */
76
77 int max_threads;                /* tunable limit on nr_threads */
78
79 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
80
81 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
82
83 DEFINE_TRACE(sched_process_fork);
84
85 int nr_processes(void)
86 {
87         int cpu;
88         int total = 0;
89
90         for_each_online_cpu(cpu)
91                 total += per_cpu(process_counts, cpu);
92
93         return total;
94 }
95
96 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
97 # define alloc_task_struct()    kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
98 # define free_task_struct(tsk)  kmem_cache_free(task_struct_cachep, (tsk))
99 static struct kmem_cache *task_struct_cachep;
100 #endif
101
102 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
103 static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
104 {
105 #ifdef CONFIG_DEBUG_STACK_USAGE
106         gfp_t mask = GFP_KERNEL | __GFP_ZERO;
107 #else
108         gfp_t mask = GFP_KERNEL;
109 #endif
110         return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
111 }
112
113 static inline void free_thread_info(struct thread_info *ti)
114 {
115         free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
116 }
117 #endif
118
119 /* SLAB cache for signal_struct structures (tsk->signal) */
120 static struct kmem_cache *signal_cachep;
121
122 /* SLAB cache for sighand_struct structures (tsk->sighand) */
123 struct kmem_cache *sighand_cachep;
124
125 /* SLAB cache for files_struct structures (tsk->files) */
126 struct kmem_cache *files_cachep;
127
128 /* SLAB cache for fs_struct structures (tsk->fs) */
129 struct kmem_cache *fs_cachep;
130
131 /* SLAB cache for vm_area_struct structures */
132 struct kmem_cache *vm_area_cachep;
133
134 /* SLAB cache for mm_struct structures (tsk->mm) */
135 static struct kmem_cache *mm_cachep;
136
137 void free_task(struct task_struct *tsk)
138 {
139         prop_local_destroy_single(&tsk->dirties);
140         free_thread_info(tsk->stack);
141         rt_mutex_debug_task_free(tsk);
142         free_task_struct(tsk);
143 }
144 EXPORT_SYMBOL(free_task);
145
146 void __put_task_struct(struct task_struct *tsk)
147 {
148         WARN_ON(!tsk->exit_state);
149         WARN_ON(atomic_read(&tsk->usage));
150         WARN_ON(tsk == current);
151
152         security_task_free(tsk);
153         free_uid(tsk->user);
154         put_group_info(tsk->group_info);
155         delayacct_tsk_free(tsk);
156
157         if (!profile_handoff_task(tsk))
158                 free_task(tsk);
159 }
160
161 /*
162  * macro override instead of weak attribute alias, to workaround
163  * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
164  */
165 #ifndef arch_task_cache_init
166 #define arch_task_cache_init()
167 #endif
168
169 void __init fork_init(unsigned long mempages)
170 {
171 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
172 #ifndef ARCH_MIN_TASKALIGN
173 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
174 #endif
175         /* create a slab on which task_structs can be allocated */
176         task_struct_cachep =
177                 kmem_cache_create("task_struct", sizeof(struct task_struct),
178                         ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
179 #endif
180
181         /* do the arch specific task caches init */
182         arch_task_cache_init();
183
184         /*
185          * The default maximum number of threads is set to a safe
186          * value: the thread structures can take up at most half
187          * of memory.
188          */
189         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
190
191         /*
192          * we need to allow at least 20 threads to boot a system
193          */
194         if(max_threads < 20)
195                 max_threads = 20;
196
197         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
198         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
199         init_task.signal->rlim[RLIMIT_SIGPENDING] =
200                 init_task.signal->rlim[RLIMIT_NPROC];
201 }
202
203 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
204                                                struct task_struct *src)
205 {
206         *dst = *src;
207         return 0;
208 }
209
210 static struct task_struct *dup_task_struct(struct task_struct *orig)
211 {
212         struct task_struct *tsk;
213         struct thread_info *ti;
214         int err;
215
216         prepare_to_copy(orig);
217
218         tsk = alloc_task_struct();
219         if (!tsk)
220                 return NULL;
221
222         ti = alloc_thread_info(tsk);
223         if (!ti) {
224                 free_task_struct(tsk);
225                 return NULL;
226         }
227
228         err = arch_dup_task_struct(tsk, orig);
229         if (err)
230                 goto out;
231
232         tsk->stack = ti;
233
234         err = prop_local_init_single(&tsk->dirties);
235         if (err)
236                 goto out;
237
238         setup_thread_stack(tsk, orig);
239
240 #ifdef CONFIG_CC_STACKPROTECTOR
241         tsk->stack_canary = get_random_int();
242 #endif
243
244         /* One for us, one for whoever does the "release_task()" (usually parent) */
245         atomic_set(&tsk->usage,2);
246         atomic_set(&tsk->fs_excl, 0);
247 #ifdef CONFIG_BLK_DEV_IO_TRACE
248         tsk->btrace_seq = 0;
249 #endif
250         tsk->splice_pipe = NULL;
251         return tsk;
252
253 out:
254         free_thread_info(ti);
255         free_task_struct(tsk);
256         return NULL;
257 }
258
259 #ifdef CONFIG_MMU
260 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
261 {
262         struct vm_area_struct *mpnt, *tmp, **pprev;
263         struct rb_node **rb_link, *rb_parent;
264         int retval;
265         unsigned long charge;
266         struct mempolicy *pol;
267
268         down_write(&oldmm->mmap_sem);
269         flush_cache_dup_mm(oldmm);
270         /*
271          * Not linked in yet - no deadlock potential:
272          */
273         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
274
275         mm->locked_vm = 0;
276         mm->mmap = NULL;
277         mm->mmap_cache = NULL;
278         mm->free_area_cache = oldmm->mmap_base;
279         mm->cached_hole_size = ~0UL;
280         mm->map_count = 0;
281         cpus_clear(mm->cpu_vm_mask);
282         mm->mm_rb = RB_ROOT;
283         rb_link = &mm->mm_rb.rb_node;
284         rb_parent = NULL;
285         pprev = &mm->mmap;
286
287         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
288                 struct file *file;
289
290                 if (mpnt->vm_flags & VM_DONTCOPY) {
291                         long pages = vma_pages(mpnt);
292                         mm->total_vm -= pages;
293                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
294                                                                 -pages);
295                         continue;
296                 }
297                 charge = 0;
298                 if (mpnt->vm_flags & VM_ACCOUNT) {
299                         unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
300                         if (security_vm_enough_memory(len))
301                                 goto fail_nomem;
302                         charge = len;
303                 }
304                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
305                 if (!tmp)
306                         goto fail_nomem;
307                 *tmp = *mpnt;
308                 pol = mpol_dup(vma_policy(mpnt));
309                 retval = PTR_ERR(pol);
310                 if (IS_ERR(pol))
311                         goto fail_nomem_policy;
312                 vma_set_policy(tmp, pol);
313                 tmp->vm_flags &= ~VM_LOCKED;
314                 tmp->vm_mm = mm;
315                 tmp->vm_next = NULL;
316                 anon_vma_link(tmp);
317                 file = tmp->vm_file;
318                 if (file) {
319                         struct inode *inode = file->f_path.dentry->d_inode;
320                         get_file(file);
321                         if (tmp->vm_flags & VM_DENYWRITE)
322                                 atomic_dec(&inode->i_writecount);
323
324                         /* insert tmp into the share list, just after mpnt */
325                         spin_lock(&file->f_mapping->i_mmap_lock);
326                         tmp->vm_truncate_count = mpnt->vm_truncate_count;
327                         flush_dcache_mmap_lock(file->f_mapping);
328                         vma_prio_tree_add(tmp, mpnt);
329                         flush_dcache_mmap_unlock(file->f_mapping);
330                         spin_unlock(&file->f_mapping->i_mmap_lock);
331                 }
332
333                 /*
334                  * Clear hugetlb-related page reserves for children. This only
335                  * affects MAP_PRIVATE mappings. Faults generated by the child
336                  * are not guaranteed to succeed, even if read-only
337                  */
338                 if (is_vm_hugetlb_page(tmp))
339                         reset_vma_resv_huge_pages(tmp);
340
341                 /*
342                  * Link in the new vma and copy the page table entries.
343                  */
344                 *pprev = tmp;
345                 pprev = &tmp->vm_next;
346
347                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
348                 rb_link = &tmp->vm_rb.rb_right;
349                 rb_parent = &tmp->vm_rb;
350
351                 mm->map_count++;
352                 retval = copy_page_range(mm, oldmm, mpnt);
353
354                 if (tmp->vm_ops && tmp->vm_ops->open)
355                         tmp->vm_ops->open(tmp);
356
357                 if (retval)
358                         goto out;
359         }
360         /* a new mm has just been created */
361         arch_dup_mmap(oldmm, mm);
362         retval = 0;
363 out:
364         up_write(&mm->mmap_sem);
365         flush_tlb_mm(oldmm);
366         up_write(&oldmm->mmap_sem);
367         return retval;
368 fail_nomem_policy:
369         kmem_cache_free(vm_area_cachep, tmp);
370 fail_nomem:
371         retval = -ENOMEM;
372         vm_unacct_memory(charge);
373         goto out;
374 }
375
376 static inline int mm_alloc_pgd(struct mm_struct * mm)
377 {
378         mm->pgd = pgd_alloc(mm);
379         if (unlikely(!mm->pgd))
380                 return -ENOMEM;
381         return 0;
382 }
383
384 static inline void mm_free_pgd(struct mm_struct * mm)
385 {
386         pgd_free(mm, mm->pgd);
387 }
388 #else
389 #define dup_mmap(mm, oldmm)     (0)
390 #define mm_alloc_pgd(mm)        (0)
391 #define mm_free_pgd(mm)
392 #endif /* CONFIG_MMU */
393
394 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
395
396 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
397 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
398
399 #include <linux/init_task.h>
400
401 static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
402 {
403         atomic_set(&mm->mm_users, 1);
404         atomic_set(&mm->mm_count, 1);
405         init_rwsem(&mm->mmap_sem);
406         INIT_LIST_HEAD(&mm->mmlist);
407         mm->flags = (current->mm) ? current->mm->flags
408                                   : MMF_DUMP_FILTER_DEFAULT;
409         mm->core_state = NULL;
410         mm->nr_ptes = 0;
411         set_mm_counter(mm, file_rss, 0);
412         set_mm_counter(mm, anon_rss, 0);
413         spin_lock_init(&mm->page_table_lock);
414         rwlock_init(&mm->ioctx_list_lock);
415         mm->ioctx_list = NULL;
416         mm->free_area_cache = TASK_UNMAPPED_BASE;
417         mm->cached_hole_size = ~0UL;
418         mm_init_owner(mm, p);
419
420         if (likely(!mm_alloc_pgd(mm))) {
421                 mm->def_flags = 0;
422                 mmu_notifier_mm_init(mm);
423                 return mm;
424         }
425
426         free_mm(mm);
427         return NULL;
428 }
429
430 /*
431  * Allocate and initialize an mm_struct.
432  */
433 struct mm_struct * mm_alloc(void)
434 {
435         struct mm_struct * mm;
436
437         mm = allocate_mm();
438         if (mm) {
439                 memset(mm, 0, sizeof(*mm));
440                 mm = mm_init(mm, current);
441         }
442         return mm;
443 }
444
445 /*
446  * Called when the last reference to the mm
447  * is dropped: either by a lazy thread or by
448  * mmput. Free the page directory and the mm.
449  */
450 void __mmdrop(struct mm_struct *mm)
451 {
452         BUG_ON(mm == &init_mm);
453         mm_free_pgd(mm);
454         destroy_context(mm);
455         mmu_notifier_mm_destroy(mm);
456         free_mm(mm);
457 }
458 EXPORT_SYMBOL_GPL(__mmdrop);
459
460 /*
461  * Decrement the use count and release all resources for an mm.
462  */
463 void mmput(struct mm_struct *mm)
464 {
465         might_sleep();
466
467         if (atomic_dec_and_test(&mm->mm_users)) {
468                 exit_aio(mm);
469                 exit_mmap(mm);
470                 set_mm_exe_file(mm, NULL);
471                 if (!list_empty(&mm->mmlist)) {
472                         spin_lock(&mmlist_lock);
473                         list_del(&mm->mmlist);
474                         spin_unlock(&mmlist_lock);
475                 }
476                 put_swap_token(mm);
477                 mmdrop(mm);
478         }
479 }
480 EXPORT_SYMBOL_GPL(mmput);
481
482 /**
483  * get_task_mm - acquire a reference to the task's mm
484  *
485  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
486  * this kernel workthread has transiently adopted a user mm with use_mm,
487  * to do its AIO) is not set and if so returns a reference to it, after
488  * bumping up the use count.  User must release the mm via mmput()
489  * after use.  Typically used by /proc and ptrace.
490  */
491 struct mm_struct *get_task_mm(struct task_struct *task)
492 {
493         struct mm_struct *mm;
494
495         task_lock(task);
496         mm = task->mm;
497         if (mm) {
498                 if (task->flags & PF_KTHREAD)
499                         mm = NULL;
500                 else
501                         atomic_inc(&mm->mm_users);
502         }
503         task_unlock(task);
504         return mm;
505 }
506 EXPORT_SYMBOL_GPL(get_task_mm);
507
508 /* Please note the differences between mmput and mm_release.
509  * mmput is called whenever we stop holding onto a mm_struct,
510  * error success whatever.
511  *
512  * mm_release is called after a mm_struct has been removed
513  * from the current process.
514  *
515  * This difference is important for error handling, when we
516  * only half set up a mm_struct for a new process and need to restore
517  * the old one.  Because we mmput the new mm_struct before
518  * restoring the old one. . .
519  * Eric Biederman 10 January 1998
520  */
521 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
522 {
523         struct completion *vfork_done = tsk->vfork_done;
524
525         /* Get rid of any futexes when releasing the mm */
526 #ifdef CONFIG_FUTEX
527         if (unlikely(tsk->robust_list))
528                 exit_robust_list(tsk);
529 #ifdef CONFIG_COMPAT
530         if (unlikely(tsk->compat_robust_list))
531                 compat_exit_robust_list(tsk);
532 #endif
533 #endif
534
535         /* Get rid of any cached register state */
536         deactivate_mm(tsk, mm);
537
538         /* notify parent sleeping on vfork() */
539         if (vfork_done) {
540                 tsk->vfork_done = NULL;
541                 complete(vfork_done);
542         }
543
544         /*
545          * If we're exiting normally, clear a user-space tid field if
546          * requested.  We leave this alone when dying by signal, to leave
547          * the value intact in a core dump, and to save the unnecessary
548          * trouble otherwise.  Userland only wants this done for a sys_exit.
549          */
550         if (tsk->clear_child_tid
551             && !(tsk->flags & PF_SIGNALED)
552             && atomic_read(&mm->mm_users) > 1) {
553                 u32 __user * tidptr = tsk->clear_child_tid;
554                 tsk->clear_child_tid = NULL;
555
556                 /*
557                  * We don't check the error code - if userspace has
558                  * not set up a proper pointer then tough luck.
559                  */
560                 put_user(0, tidptr);
561                 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
562         }
563 }
564
565 /*
566  * Allocate a new mm structure and copy contents from the
567  * mm structure of the passed in task structure.
568  */
569 struct mm_struct *dup_mm(struct task_struct *tsk)
570 {
571         struct mm_struct *mm, *oldmm = current->mm;
572         int err;
573
574         if (!oldmm)
575                 return NULL;
576
577         mm = allocate_mm();
578         if (!mm)
579                 goto fail_nomem;
580
581         memcpy(mm, oldmm, sizeof(*mm));
582
583         /* Initializing for Swap token stuff */
584         mm->token_priority = 0;
585         mm->last_interval = 0;
586
587         if (!mm_init(mm, tsk))
588                 goto fail_nomem;
589
590         if (init_new_context(tsk, mm))
591                 goto fail_nocontext;
592
593         dup_mm_exe_file(oldmm, mm);
594
595         err = dup_mmap(mm, oldmm);
596         if (err)
597                 goto free_pt;
598
599         mm->hiwater_rss = get_mm_rss(mm);
600         mm->hiwater_vm = mm->total_vm;
601
602         return mm;
603
604 free_pt:
605         mmput(mm);
606
607 fail_nomem:
608         return NULL;
609
610 fail_nocontext:
611         /*
612          * If init_new_context() failed, we cannot use mmput() to free the mm
613          * because it calls destroy_context()
614          */
615         mm_free_pgd(mm);
616         free_mm(mm);
617         return NULL;
618 }
619
620 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
621 {
622         struct mm_struct * mm, *oldmm;
623         int retval;
624
625         tsk->min_flt = tsk->maj_flt = 0;
626         tsk->nvcsw = tsk->nivcsw = 0;
627
628         tsk->mm = NULL;
629         tsk->active_mm = NULL;
630
631         /*
632          * Are we cloning a kernel thread?
633          *
634          * We need to steal a active VM for that..
635          */
636         oldmm = current->mm;
637         if (!oldmm)
638                 return 0;
639
640         if (clone_flags & CLONE_VM) {
641                 atomic_inc(&oldmm->mm_users);
642                 mm = oldmm;
643                 goto good_mm;
644         }
645
646         retval = -ENOMEM;
647         mm = dup_mm(tsk);
648         if (!mm)
649                 goto fail_nomem;
650
651 good_mm:
652         /* Initializing for Swap token stuff */
653         mm->token_priority = 0;
654         mm->last_interval = 0;
655
656         tsk->mm = mm;
657         tsk->active_mm = mm;
658         return 0;
659
660 fail_nomem:
661         return retval;
662 }
663
664 static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
665 {
666         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
667         /* We don't need to lock fs - think why ;-) */
668         if (fs) {
669                 atomic_set(&fs->count, 1);
670                 rwlock_init(&fs->lock);
671                 fs->umask = old->umask;
672                 read_lock(&old->lock);
673                 fs->root = old->root;
674                 path_get(&old->root);
675                 fs->pwd = old->pwd;
676                 path_get(&old->pwd);
677                 read_unlock(&old->lock);
678         }
679         return fs;
680 }
681
682 struct fs_struct *copy_fs_struct(struct fs_struct *old)
683 {
684         return __copy_fs_struct(old);
685 }
686
687 EXPORT_SYMBOL_GPL(copy_fs_struct);
688
689 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
690 {
691         if (clone_flags & CLONE_FS) {
692                 atomic_inc(&current->fs->count);
693                 return 0;
694         }
695         tsk->fs = __copy_fs_struct(current->fs);
696         if (!tsk->fs)
697                 return -ENOMEM;
698         return 0;
699 }
700
701 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
702 {
703         struct files_struct *oldf, *newf;
704         int error = 0;
705
706         /*
707          * A background process may not have any files ...
708          */
709         oldf = current->files;
710         if (!oldf)
711                 goto out;
712
713         if (clone_flags & CLONE_FILES) {
714                 atomic_inc(&oldf->count);
715                 goto out;
716         }
717
718         newf = dup_fd(oldf, &error);
719         if (!newf)
720                 goto out;
721
722         tsk->files = newf;
723         error = 0;
724 out:
725         return error;
726 }
727
728 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
729 {
730 #ifdef CONFIG_BLOCK
731         struct io_context *ioc = current->io_context;
732
733         if (!ioc)
734                 return 0;
735         /*
736          * Share io context with parent, if CLONE_IO is set
737          */
738         if (clone_flags & CLONE_IO) {
739                 tsk->io_context = ioc_task_link(ioc);
740                 if (unlikely(!tsk->io_context))
741                         return -ENOMEM;
742         } else if (ioprio_valid(ioc->ioprio)) {
743                 tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
744                 if (unlikely(!tsk->io_context))
745                         return -ENOMEM;
746
747                 tsk->io_context->ioprio = ioc->ioprio;
748         }
749 #endif
750         return 0;
751 }
752
753 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
754 {
755         struct sighand_struct *sig;
756
757         if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
758                 atomic_inc(&current->sighand->count);
759                 return 0;
760         }
761         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
762         rcu_assign_pointer(tsk->sighand, sig);
763         if (!sig)
764                 return -ENOMEM;
765         atomic_set(&sig->count, 1);
766         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
767         return 0;
768 }
769
770 void __cleanup_sighand(struct sighand_struct *sighand)
771 {
772         if (atomic_dec_and_test(&sighand->count))
773                 kmem_cache_free(sighand_cachep, sighand);
774 }
775
776
777 /*
778  * Initialize POSIX timer handling for a thread group.
779  */
780 static void posix_cpu_timers_init_group(struct signal_struct *sig)
781 {
782         /* Thread group counters. */
783         thread_group_cputime_init(sig);
784
785         /* Expiration times and increments. */
786         sig->it_virt_expires = cputime_zero;
787         sig->it_virt_incr = cputime_zero;
788         sig->it_prof_expires = cputime_zero;
789         sig->it_prof_incr = cputime_zero;
790
791         /* Cached expiration times. */
792         sig->cputime_expires.prof_exp = cputime_zero;
793         sig->cputime_expires.virt_exp = cputime_zero;
794         sig->cputime_expires.sched_exp = 0;
795
796         /* The timer lists. */
797         INIT_LIST_HEAD(&sig->cpu_timers[0]);
798         INIT_LIST_HEAD(&sig->cpu_timers[1]);
799         INIT_LIST_HEAD(&sig->cpu_timers[2]);
800 }
801
802 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
803 {
804         struct signal_struct *sig;
805         int ret;
806
807         if (clone_flags & CLONE_THREAD) {
808                 ret = thread_group_cputime_clone_thread(current);
809                 if (likely(!ret)) {
810                         atomic_inc(&current->signal->count);
811                         atomic_inc(&current->signal->live);
812                 }
813                 return ret;
814         }
815         sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
816         tsk->signal = sig;
817         if (!sig)
818                 return -ENOMEM;
819
820         ret = copy_thread_group_keys(tsk);
821         if (ret < 0) {
822                 kmem_cache_free(signal_cachep, sig);
823                 return ret;
824         }
825
826         atomic_set(&sig->count, 1);
827         atomic_set(&sig->live, 1);
828         init_waitqueue_head(&sig->wait_chldexit);
829         sig->flags = 0;
830         sig->group_exit_code = 0;
831         sig->group_exit_task = NULL;
832         sig->group_stop_count = 0;
833         sig->curr_target = tsk;
834         init_sigpending(&sig->shared_pending);
835         INIT_LIST_HEAD(&sig->posix_timers);
836
837         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
838         sig->it_real_incr.tv64 = 0;
839         sig->real_timer.function = it_real_fn;
840
841         sig->leader = 0;        /* session leadership doesn't inherit */
842         sig->tty_old_pgrp = NULL;
843         sig->tty = NULL;
844
845         sig->cutime = sig->cstime = cputime_zero;
846         sig->gtime = cputime_zero;
847         sig->cgtime = cputime_zero;
848         sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
849         sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
850         sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
851         task_io_accounting_init(&sig->ioac);
852         taskstats_tgid_init(sig);
853
854         task_lock(current->group_leader);
855         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
856         task_unlock(current->group_leader);
857
858         posix_cpu_timers_init_group(sig);
859
860         acct_init_pacct(&sig->pacct);
861
862         tty_audit_fork(sig);
863
864         return 0;
865 }
866
867 void __cleanup_signal(struct signal_struct *sig)
868 {
869         thread_group_cputime_free(sig);
870         exit_thread_group_keys(sig);
871         tty_kref_put(sig->tty);
872         kmem_cache_free(signal_cachep, sig);
873 }
874
875 static void cleanup_signal(struct task_struct *tsk)
876 {
877         struct signal_struct *sig = tsk->signal;
878
879         atomic_dec(&sig->live);
880
881         if (atomic_dec_and_test(&sig->count))
882                 __cleanup_signal(sig);
883 }
884
885 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
886 {
887         unsigned long new_flags = p->flags;
888
889         new_flags &= ~PF_SUPERPRIV;
890         new_flags |= PF_FORKNOEXEC;
891         new_flags |= PF_STARTING;
892         p->flags = new_flags;
893         clear_freeze_flag(p);
894 }
895
896 asmlinkage long sys_set_tid_address(int __user *tidptr)
897 {
898         current->clear_child_tid = tidptr;
899
900         return task_pid_vnr(current);
901 }
902
903 static void rt_mutex_init_task(struct task_struct *p)
904 {
905         spin_lock_init(&p->pi_lock);
906 #ifdef CONFIG_RT_MUTEXES
907         plist_head_init(&p->pi_waiters, &p->pi_lock);
908         p->pi_blocked_on = NULL;
909 #endif
910 }
911
912 #ifdef CONFIG_MM_OWNER
913 void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
914 {
915         mm->owner = p;
916 }
917 #endif /* CONFIG_MM_OWNER */
918
919 /*
920  * Initialize POSIX timer handling for a single task.
921  */
922 static void posix_cpu_timers_init(struct task_struct *tsk)
923 {
924         tsk->cputime_expires.prof_exp = cputime_zero;
925         tsk->cputime_expires.virt_exp = cputime_zero;
926         tsk->cputime_expires.sched_exp = 0;
927         INIT_LIST_HEAD(&tsk->cpu_timers[0]);
928         INIT_LIST_HEAD(&tsk->cpu_timers[1]);
929         INIT_LIST_HEAD(&tsk->cpu_timers[2]);
930 }
931
932 /*
933  * This creates a new process as a copy of the old one,
934  * but does not actually start it yet.
935  *
936  * It copies the registers, and all the appropriate
937  * parts of the process environment (as per the clone
938  * flags). The actual kick-off is left to the caller.
939  */
940 static struct task_struct *copy_process(unsigned long clone_flags,
941                                         unsigned long stack_start,
942                                         struct pt_regs *regs,
943                                         unsigned long stack_size,
944                                         int __user *child_tidptr,
945                                         struct pid *pid,
946                                         int trace)
947 {
948         int retval;
949         struct task_struct *p;
950         int cgroup_callbacks_done = 0;
951
952         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
953                 return ERR_PTR(-EINVAL);
954
955         /*
956          * Thread groups must share signals as well, and detached threads
957          * can only be started up within the thread group.
958          */
959         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
960                 return ERR_PTR(-EINVAL);
961
962         /*
963          * Shared signal handlers imply shared VM. By way of the above,
964          * thread groups also imply shared VM. Blocking this case allows
965          * for various simplifications in other code.
966          */
967         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
968                 return ERR_PTR(-EINVAL);
969
970         retval = security_task_create(clone_flags);
971         if (retval)
972                 goto fork_out;
973
974         retval = -ENOMEM;
975         p = dup_task_struct(current);
976         if (!p)
977                 goto fork_out;
978
979         rt_mutex_init_task(p);
980
981 #ifdef CONFIG_PROVE_LOCKING
982         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
983         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
984 #endif
985         retval = -EAGAIN;
986         if (atomic_read(&p->user->processes) >=
987                         p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
988                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
989                     p->user != current->nsproxy->user_ns->root_user)
990                         goto bad_fork_free;
991         }
992
993         atomic_inc(&p->user->__count);
994         atomic_inc(&p->user->processes);
995         get_group_info(p->group_info);
996
997         /*
998          * If multiple threads are within copy_process(), then this check
999          * triggers too late. This doesn't hurt, the check is only there
1000          * to stop root fork bombs.
1001          */
1002         if (nr_threads >= max_threads)
1003                 goto bad_fork_cleanup_count;
1004
1005         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1006                 goto bad_fork_cleanup_count;
1007
1008         if (p->binfmt && !try_module_get(p->binfmt->module))
1009                 goto bad_fork_cleanup_put_domain;
1010
1011         p->did_exec = 0;
1012         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1013         copy_flags(clone_flags, p);
1014         INIT_LIST_HEAD(&p->children);
1015         INIT_LIST_HEAD(&p->sibling);
1016 #ifdef CONFIG_PREEMPT_RCU
1017         p->rcu_read_lock_nesting = 0;
1018         p->rcu_flipctr_idx = 0;
1019 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1020         p->vfork_done = NULL;
1021         spin_lock_init(&p->alloc_lock);
1022
1023         clear_tsk_thread_flag(p, TIF_SIGPENDING);
1024         init_sigpending(&p->pending);
1025
1026         p->utime = cputime_zero;
1027         p->stime = cputime_zero;
1028         p->gtime = cputime_zero;
1029         p->utimescaled = cputime_zero;
1030         p->stimescaled = cputime_zero;
1031         p->prev_utime = cputime_zero;
1032         p->prev_stime = cputime_zero;
1033
1034         p->default_timer_slack_ns = current->timer_slack_ns;
1035
1036 #ifdef CONFIG_DETECT_SOFTLOCKUP
1037         p->last_switch_count = 0;
1038         p->last_switch_timestamp = 0;
1039 #endif
1040
1041         task_io_accounting_init(&p->ioac);
1042         acct_clear_integrals(p);
1043
1044         posix_cpu_timers_init(p);
1045
1046         p->lock_depth = -1;             /* -1 = no lock */
1047         do_posix_clock_monotonic_gettime(&p->start_time);
1048         p->real_start_time = p->start_time;
1049         monotonic_to_bootbased(&p->real_start_time);
1050 #ifdef CONFIG_SECURITY
1051         p->security = NULL;
1052 #endif
1053         p->cap_bset = current->cap_bset;
1054         p->io_context = NULL;
1055         p->audit_context = NULL;
1056         cgroup_fork(p);
1057 #ifdef CONFIG_NUMA
1058         p->mempolicy = mpol_dup(p->mempolicy);
1059         if (IS_ERR(p->mempolicy)) {
1060                 retval = PTR_ERR(p->mempolicy);
1061                 p->mempolicy = NULL;
1062                 goto bad_fork_cleanup_cgroup;
1063         }
1064         mpol_fix_fork_child_flag(p);
1065 #endif
1066 #ifdef CONFIG_TRACE_IRQFLAGS
1067         p->irq_events = 0;
1068 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1069         p->hardirqs_enabled = 1;
1070 #else
1071         p->hardirqs_enabled = 0;
1072 #endif
1073         p->hardirq_enable_ip = 0;
1074         p->hardirq_enable_event = 0;
1075         p->hardirq_disable_ip = _THIS_IP_;
1076         p->hardirq_disable_event = 0;
1077         p->softirqs_enabled = 1;
1078         p->softirq_enable_ip = _THIS_IP_;
1079         p->softirq_enable_event = 0;
1080         p->softirq_disable_ip = 0;
1081         p->softirq_disable_event = 0;
1082         p->hardirq_context = 0;
1083         p->softirq_context = 0;
1084 #endif
1085 #ifdef CONFIG_LOCKDEP
1086         p->lockdep_depth = 0; /* no locks held yet */
1087         p->curr_chain_key = 0;
1088         p->lockdep_recursion = 0;
1089 #endif
1090
1091 #ifdef CONFIG_DEBUG_MUTEXES
1092         p->blocked_on = NULL; /* not blocked yet */
1093 #endif
1094
1095         /* Perform scheduler related setup. Assign this task to a CPU. */
1096         sched_fork(p, clone_flags);
1097
1098         if ((retval = security_task_alloc(p)))
1099                 goto bad_fork_cleanup_policy;
1100         if ((retval = audit_alloc(p)))
1101                 goto bad_fork_cleanup_security;
1102         /* copy all the process information */
1103         if ((retval = copy_semundo(clone_flags, p)))
1104                 goto bad_fork_cleanup_audit;
1105         if ((retval = copy_files(clone_flags, p)))
1106                 goto bad_fork_cleanup_semundo;
1107         if ((retval = copy_fs(clone_flags, p)))
1108                 goto bad_fork_cleanup_files;
1109         if ((retval = copy_sighand(clone_flags, p)))
1110                 goto bad_fork_cleanup_fs;
1111         if ((retval = copy_signal(clone_flags, p)))
1112                 goto bad_fork_cleanup_sighand;
1113         if ((retval = copy_mm(clone_flags, p)))
1114                 goto bad_fork_cleanup_signal;
1115         if ((retval = copy_keys(clone_flags, p)))
1116                 goto bad_fork_cleanup_mm;
1117         if ((retval = copy_namespaces(clone_flags, p)))
1118                 goto bad_fork_cleanup_keys;
1119         if ((retval = copy_io(clone_flags, p)))
1120                 goto bad_fork_cleanup_namespaces;
1121         retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1122         if (retval)
1123                 goto bad_fork_cleanup_io;
1124
1125         if (pid != &init_struct_pid) {
1126                 retval = -ENOMEM;
1127                 pid = alloc_pid(task_active_pid_ns(p));
1128                 if (!pid)
1129                         goto bad_fork_cleanup_io;
1130
1131                 if (clone_flags & CLONE_NEWPID) {
1132                         retval = pid_ns_prepare_proc(task_active_pid_ns(p));
1133                         if (retval < 0)
1134                                 goto bad_fork_free_pid;
1135                 }
1136         }
1137
1138         p->pid = pid_nr(pid);
1139         p->tgid = p->pid;
1140         if (clone_flags & CLONE_THREAD)
1141                 p->tgid = current->tgid;
1142
1143         if (current->nsproxy != p->nsproxy) {
1144                 retval = ns_cgroup_clone(p, pid);
1145                 if (retval)
1146                         goto bad_fork_free_pid;
1147         }
1148
1149         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1150         /*
1151          * Clear TID on mm_release()?
1152          */
1153         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1154 #ifdef CONFIG_FUTEX
1155         p->robust_list = NULL;
1156 #ifdef CONFIG_COMPAT
1157         p->compat_robust_list = NULL;
1158 #endif
1159         INIT_LIST_HEAD(&p->pi_state_list);
1160         p->pi_state_cache = NULL;
1161 #endif
1162         /*
1163          * sigaltstack should be cleared when sharing the same VM
1164          */
1165         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1166                 p->sas_ss_sp = p->sas_ss_size = 0;
1167
1168         /*
1169          * Syscall tracing should be turned off in the child regardless
1170          * of CLONE_PTRACE.
1171          */
1172         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1173 #ifdef TIF_SYSCALL_EMU
1174         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1175 #endif
1176         clear_all_latency_tracing(p);
1177
1178         /* Our parent execution domain becomes current domain
1179            These must match for thread signalling to apply */
1180         p->parent_exec_id = p->self_exec_id;
1181
1182         /* ok, now we should be set up.. */
1183         p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1184         p->pdeath_signal = 0;
1185         p->exit_state = 0;
1186
1187         /*
1188          * Ok, make it visible to the rest of the system.
1189          * We dont wake it up yet.
1190          */
1191         p->group_leader = p;
1192         INIT_LIST_HEAD(&p->thread_group);
1193
1194         /* Now that the task is set up, run cgroup callbacks if
1195          * necessary. We need to run them before the task is visible
1196          * on the tasklist. */
1197         cgroup_fork_callbacks(p);
1198         cgroup_callbacks_done = 1;
1199
1200         /* Need tasklist lock for parent etc handling! */
1201         write_lock_irq(&tasklist_lock);
1202
1203         /*
1204          * The task hasn't been attached yet, so its cpus_allowed mask will
1205          * not be changed, nor will its assigned CPU.
1206          *
1207          * The cpus_allowed mask of the parent may have changed after it was
1208          * copied first time - so re-copy it here, then check the child's CPU
1209          * to ensure it is on a valid CPU (and if not, just force it back to
1210          * parent's CPU). This avoids alot of nasty races.
1211          */
1212         p->cpus_allowed = current->cpus_allowed;
1213         p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
1214         if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1215                         !cpu_online(task_cpu(p))))
1216                 set_task_cpu(p, smp_processor_id());
1217
1218         /* CLONE_PARENT re-uses the old parent */
1219         if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1220                 p->real_parent = current->real_parent;
1221         else
1222                 p->real_parent = current;
1223
1224         spin_lock(&current->sighand->siglock);
1225
1226         /*
1227          * Process group and session signals need to be delivered to just the
1228          * parent before the fork or both the parent and the child after the
1229          * fork. Restart if a signal comes in before we add the new process to
1230          * it's process group.
1231          * A fatal signal pending means that current will exit, so the new
1232          * thread can't slip out of an OOM kill (or normal SIGKILL).
1233          */
1234         recalc_sigpending();
1235         if (signal_pending(current)) {
1236                 spin_unlock(&current->sighand->siglock);
1237                 write_unlock_irq(&tasklist_lock);
1238                 retval = -ERESTARTNOINTR;
1239                 goto bad_fork_free_pid;
1240         }
1241
1242         if (clone_flags & CLONE_THREAD) {
1243                 p->group_leader = current->group_leader;
1244                 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1245         }
1246
1247         if (likely(p->pid)) {
1248                 list_add_tail(&p->sibling, &p->real_parent->children);
1249                 tracehook_finish_clone(p, clone_flags, trace);
1250
1251                 if (thread_group_leader(p)) {
1252                         if (clone_flags & CLONE_NEWPID)
1253                                 p->nsproxy->pid_ns->child_reaper = p;
1254
1255                         p->signal->leader_pid = pid;
1256                         tty_kref_put(p->signal->tty);
1257                         p->signal->tty = tty_kref_get(current->signal->tty);
1258                         set_task_pgrp(p, task_pgrp_nr(current));
1259                         set_task_session(p, task_session_nr(current));
1260                         attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1261                         attach_pid(p, PIDTYPE_SID, task_session(current));
1262                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1263                         __get_cpu_var(process_counts)++;
1264                 }
1265                 attach_pid(p, PIDTYPE_PID, pid);
1266                 nr_threads++;
1267         }
1268
1269         total_forks++;
1270         spin_unlock(&current->sighand->siglock);
1271         write_unlock_irq(&tasklist_lock);
1272         proc_fork_connector(p);
1273         cgroup_post_fork(p);
1274         return p;
1275
1276 bad_fork_free_pid:
1277         if (pid != &init_struct_pid)
1278                 free_pid(pid);
1279 bad_fork_cleanup_io:
1280         put_io_context(p->io_context);
1281 bad_fork_cleanup_namespaces:
1282         exit_task_namespaces(p);
1283 bad_fork_cleanup_keys:
1284         exit_keys(p);
1285 bad_fork_cleanup_mm:
1286         if (p->mm)
1287                 mmput(p->mm);
1288 bad_fork_cleanup_signal:
1289         cleanup_signal(p);
1290 bad_fork_cleanup_sighand:
1291         __cleanup_sighand(p->sighand);
1292 bad_fork_cleanup_fs:
1293         exit_fs(p); /* blocking */
1294 bad_fork_cleanup_files:
1295         exit_files(p); /* blocking */
1296 bad_fork_cleanup_semundo:
1297         exit_sem(p);
1298 bad_fork_cleanup_audit:
1299         audit_free(p);
1300 bad_fork_cleanup_security:
1301         security_task_free(p);
1302 bad_fork_cleanup_policy:
1303 #ifdef CONFIG_NUMA
1304         mpol_put(p->mempolicy);
1305 bad_fork_cleanup_cgroup:
1306 #endif
1307         cgroup_exit(p, cgroup_callbacks_done);
1308         delayacct_tsk_free(p);
1309         if (p->binfmt)
1310                 module_put(p->binfmt->module);
1311 bad_fork_cleanup_put_domain:
1312         module_put(task_thread_info(p)->exec_domain->module);
1313 bad_fork_cleanup_count:
1314         put_group_info(p->group_info);
1315         atomic_dec(&p->user->processes);
1316         free_uid(p->user);
1317 bad_fork_free:
1318         free_task(p);
1319 fork_out:
1320         return ERR_PTR(retval);
1321 }
1322
1323 noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1324 {
1325         memset(regs, 0, sizeof(struct pt_regs));
1326         return regs;
1327 }
1328
1329 struct task_struct * __cpuinit fork_idle(int cpu)
1330 {
1331         struct task_struct *task;
1332         struct pt_regs regs;
1333
1334         task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1335                             &init_struct_pid, 0);
1336         if (!IS_ERR(task))
1337                 init_idle(task, cpu);
1338
1339         return task;
1340 }
1341
1342 /*
1343  *  Ok, this is the main fork-routine.
1344  *
1345  * It copies the process, and if successful kick-starts
1346  * it and waits for it to finish using the VM if required.
1347  */
1348 long do_fork(unsigned long clone_flags,
1349               unsigned long stack_start,
1350               struct pt_regs *regs,
1351               unsigned long stack_size,
1352               int __user *parent_tidptr,
1353               int __user *child_tidptr)
1354 {
1355         struct task_struct *p;
1356         int trace = 0;
1357         long nr;
1358
1359         /*
1360          * We hope to recycle these flags after 2.6.26
1361          */
1362         if (unlikely(clone_flags & CLONE_STOPPED)) {
1363                 static int __read_mostly count = 100;
1364
1365                 if (count > 0 && printk_ratelimit()) {
1366                         char comm[TASK_COMM_LEN];
1367
1368                         count--;
1369                         printk(KERN_INFO "fork(): process `%s' used deprecated "
1370                                         "clone flags 0x%lx\n",
1371                                 get_task_comm(comm, current),
1372                                 clone_flags & CLONE_STOPPED);
1373                 }
1374         }
1375
1376         /*
1377          * When called from kernel_thread, don't do user tracing stuff.
1378          */
1379         if (likely(user_mode(regs)))
1380                 trace = tracehook_prepare_clone(clone_flags);
1381
1382         p = copy_process(clone_flags, stack_start, regs, stack_size,
1383                          child_tidptr, NULL, trace);
1384         /*
1385          * Do this prior waking up the new thread - the thread pointer
1386          * might get invalid after that point, if the thread exits quickly.
1387          */
1388         if (!IS_ERR(p)) {
1389                 struct completion vfork;
1390
1391                 trace_sched_process_fork(current, p);
1392
1393                 nr = task_pid_vnr(p);
1394
1395                 if (clone_flags & CLONE_PARENT_SETTID)
1396                         put_user(nr, parent_tidptr);
1397
1398                 if (clone_flags & CLONE_VFORK) {
1399                         p->vfork_done = &vfork;
1400                         init_completion(&vfork);
1401                 }
1402
1403                 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1404
1405                 /*
1406                  * We set PF_STARTING at creation in case tracing wants to
1407                  * use this to distinguish a fully live task from one that
1408                  * hasn't gotten to tracehook_report_clone() yet.  Now we
1409                  * clear it and set the child going.
1410                  */
1411                 p->flags &= ~PF_STARTING;
1412
1413                 if (unlikely(clone_flags & CLONE_STOPPED)) {
1414                         /*
1415                          * We'll start up with an immediate SIGSTOP.
1416                          */
1417                         sigaddset(&p->pending.signal, SIGSTOP);
1418                         set_tsk_thread_flag(p, TIF_SIGPENDING);
1419                         __set_task_state(p, TASK_STOPPED);
1420                 } else {
1421                         wake_up_new_task(p, clone_flags);
1422                 }
1423
1424                 tracehook_report_clone_complete(trace, regs,
1425                                                 clone_flags, nr, p);
1426
1427                 if (clone_flags & CLONE_VFORK) {
1428                         freezer_do_not_count();
1429                         wait_for_completion(&vfork);
1430                         freezer_count();
1431                         tracehook_report_vfork_done(p, nr);
1432                 }
1433         } else {
1434                 nr = PTR_ERR(p);
1435         }
1436         return nr;
1437 }
1438
1439 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1440 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1441 #endif
1442
1443 static void sighand_ctor(void *data)
1444 {
1445         struct sighand_struct *sighand = data;
1446
1447         spin_lock_init(&sighand->siglock);
1448         init_waitqueue_head(&sighand->signalfd_wqh);
1449 }
1450
1451 void __init proc_caches_init(void)
1452 {
1453         sighand_cachep = kmem_cache_create("sighand_cache",
1454                         sizeof(struct sighand_struct), 0,
1455                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1456                         sighand_ctor);
1457         signal_cachep = kmem_cache_create("signal_cache",
1458                         sizeof(struct signal_struct), 0,
1459                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1460         files_cachep = kmem_cache_create("files_cache",
1461                         sizeof(struct files_struct), 0,
1462                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1463         fs_cachep = kmem_cache_create("fs_cache",
1464                         sizeof(struct fs_struct), 0,
1465                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1466         vm_area_cachep = kmem_cache_create("vm_area_struct",
1467                         sizeof(struct vm_area_struct), 0,
1468                         SLAB_PANIC, NULL);
1469         mm_cachep = kmem_cache_create("mm_struct",
1470                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1471                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1472 }
1473
1474 /*
1475  * Check constraints on flags passed to the unshare system call and
1476  * force unsharing of additional process context as appropriate.
1477  */
1478 static void check_unshare_flags(unsigned long *flags_ptr)
1479 {
1480         /*
1481          * If unsharing a thread from a thread group, must also
1482          * unshare vm.
1483          */
1484         if (*flags_ptr & CLONE_THREAD)
1485                 *flags_ptr |= CLONE_VM;
1486
1487         /*
1488          * If unsharing vm, must also unshare signal handlers.
1489          */
1490         if (*flags_ptr & CLONE_VM)
1491                 *flags_ptr |= CLONE_SIGHAND;
1492
1493         /*
1494          * If unsharing signal handlers and the task was created
1495          * using CLONE_THREAD, then must unshare the thread
1496          */
1497         if ((*flags_ptr & CLONE_SIGHAND) &&
1498             (atomic_read(&current->signal->count) > 1))
1499                 *flags_ptr |= CLONE_THREAD;
1500
1501         /*
1502          * If unsharing namespace, must also unshare filesystem information.
1503          */
1504         if (*flags_ptr & CLONE_NEWNS)
1505                 *flags_ptr |= CLONE_FS;
1506 }
1507
1508 /*
1509  * Unsharing of tasks created with CLONE_THREAD is not supported yet
1510  */
1511 static int unshare_thread(unsigned long unshare_flags)
1512 {
1513         if (unshare_flags & CLONE_THREAD)
1514                 return -EINVAL;
1515
1516         return 0;
1517 }
1518
1519 /*
1520  * Unshare the filesystem structure if it is being shared
1521  */
1522 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1523 {
1524         struct fs_struct *fs = current->fs;
1525
1526         if ((unshare_flags & CLONE_FS) &&
1527             (fs && atomic_read(&fs->count) > 1)) {
1528                 *new_fsp = __copy_fs_struct(current->fs);
1529                 if (!*new_fsp)
1530                         return -ENOMEM;
1531         }
1532
1533         return 0;
1534 }
1535
1536 /*
1537  * Unsharing of sighand is not supported yet
1538  */
1539 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1540 {
1541         struct sighand_struct *sigh = current->sighand;
1542
1543         if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
1544                 return -EINVAL;
1545         else
1546                 return 0;
1547 }
1548
1549 /*
1550  * Unshare vm if it is being shared
1551  */
1552 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1553 {
1554         struct mm_struct *mm = current->mm;
1555
1556         if ((unshare_flags & CLONE_VM) &&
1557             (mm && atomic_read(&mm->mm_users) > 1)) {
1558                 return -EINVAL;
1559         }
1560
1561         return 0;
1562 }
1563
1564 /*
1565  * Unshare file descriptor table if it is being shared
1566  */
1567 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1568 {
1569         struct files_struct *fd = current->files;
1570         int error = 0;
1571
1572         if ((unshare_flags & CLONE_FILES) &&
1573             (fd && atomic_read(&fd->count) > 1)) {
1574                 *new_fdp = dup_fd(fd, &error);
1575                 if (!*new_fdp)
1576                         return error;
1577         }
1578
1579         return 0;
1580 }
1581
1582 /*
1583  * unshare allows a process to 'unshare' part of the process
1584  * context which was originally shared using clone.  copy_*
1585  * functions used by do_fork() cannot be used here directly
1586  * because they modify an inactive task_struct that is being
1587  * constructed. Here we are modifying the current, active,
1588  * task_struct.
1589  */
1590 asmlinkage long sys_unshare(unsigned long unshare_flags)
1591 {
1592         int err = 0;
1593         struct fs_struct *fs, *new_fs = NULL;
1594         struct sighand_struct *new_sigh = NULL;
1595         struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1596         struct files_struct *fd, *new_fd = NULL;
1597         struct nsproxy *new_nsproxy = NULL;
1598         int do_sysvsem = 0;
1599
1600         check_unshare_flags(&unshare_flags);
1601
1602         /* Return -EINVAL for all unsupported flags */
1603         err = -EINVAL;
1604         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1605                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1606                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|
1607                                 CLONE_NEWNET))
1608                 goto bad_unshare_out;
1609
1610         /*
1611          * CLONE_NEWIPC must also detach from the undolist: after switching
1612          * to a new ipc namespace, the semaphore arrays from the old
1613          * namespace are unreachable.
1614          */
1615         if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1616                 do_sysvsem = 1;
1617         if ((err = unshare_thread(unshare_flags)))
1618                 goto bad_unshare_out;
1619         if ((err = unshare_fs(unshare_flags, &new_fs)))
1620                 goto bad_unshare_cleanup_thread;
1621         if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1622                 goto bad_unshare_cleanup_fs;
1623         if ((err = unshare_vm(unshare_flags, &new_mm)))
1624                 goto bad_unshare_cleanup_sigh;
1625         if ((err = unshare_fd(unshare_flags, &new_fd)))
1626                 goto bad_unshare_cleanup_vm;
1627         if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1628                         new_fs)))
1629                 goto bad_unshare_cleanup_fd;
1630
1631         if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {
1632                 if (do_sysvsem) {
1633                         /*
1634                          * CLONE_SYSVSEM is equivalent to sys_exit().
1635                          */
1636                         exit_sem(current);
1637                 }
1638
1639                 if (new_nsproxy) {
1640                         switch_task_namespaces(current, new_nsproxy);
1641                         new_nsproxy = NULL;
1642                 }
1643
1644                 task_lock(current);
1645
1646                 if (new_fs) {
1647                         fs = current->fs;
1648                         current->fs = new_fs;
1649                         new_fs = fs;
1650                 }
1651
1652                 if (new_mm) {
1653                         mm = current->mm;
1654                         active_mm = current->active_mm;
1655                         current->mm = new_mm;
1656                         current->active_mm = new_mm;
1657                         activate_mm(active_mm, new_mm);
1658                         new_mm = mm;
1659                 }
1660
1661                 if (new_fd) {
1662                         fd = current->files;
1663                         current->files = new_fd;
1664                         new_fd = fd;
1665                 }
1666
1667                 task_unlock(current);
1668         }
1669
1670         if (new_nsproxy)
1671                 put_nsproxy(new_nsproxy);
1672
1673 bad_unshare_cleanup_fd:
1674         if (new_fd)
1675                 put_files_struct(new_fd);
1676
1677 bad_unshare_cleanup_vm:
1678         if (new_mm)
1679                 mmput(new_mm);
1680
1681 bad_unshare_cleanup_sigh:
1682         if (new_sigh)
1683                 if (atomic_dec_and_test(&new_sigh->count))
1684                         kmem_cache_free(sighand_cachep, new_sigh);
1685
1686 bad_unshare_cleanup_fs:
1687         if (new_fs)
1688                 put_fs_struct(new_fs);
1689
1690 bad_unshare_cleanup_thread:
1691 bad_unshare_out:
1692         return err;
1693 }
1694
1695 /*
1696  *      Helper to unshare the files of the current task.
1697  *      We don't want to expose copy_files internals to
1698  *      the exec layer of the kernel.
1699  */
1700
1701 int unshare_files(struct files_struct **displaced)
1702 {
1703         struct task_struct *task = current;
1704         struct files_struct *copy = NULL;
1705         int error;
1706
1707         error = unshare_fd(CLONE_FILES, &copy);
1708         if (error || !copy) {
1709                 *displaced = NULL;
1710                 return error;
1711         }
1712         *displaced = task->files;
1713         task_lock(task);
1714         task->files = copy;
1715         task_unlock(task);
1716         return 0;
1717 }