]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/fork.c
Merge tag 'for-5.2/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/devic...
[linux.git] / kernel / fork.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/fork.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7
8 /*
9  *  'fork.c' contains the help-routines for the 'fork' system call
10  * (see also entry.S and others).
11  * Fork is rather simple, once you get the hang of it, but the memory
12  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
13  */
14
15 #include <linux/anon_inodes.h>
16 #include <linux/slab.h>
17 #include <linux/sched/autogroup.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/coredump.h>
20 #include <linux/sched/user.h>
21 #include <linux/sched/numa_balancing.h>
22 #include <linux/sched/stat.h>
23 #include <linux/sched/task.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/sched/cputime.h>
26 #include <linux/seq_file.h>
27 #include <linux/rtmutex.h>
28 #include <linux/init.h>
29 #include <linux/unistd.h>
30 #include <linux/module.h>
31 #include <linux/vmalloc.h>
32 #include <linux/completion.h>
33 #include <linux/personality.h>
34 #include <linux/mempolicy.h>
35 #include <linux/sem.h>
36 #include <linux/file.h>
37 #include <linux/fdtable.h>
38 #include <linux/iocontext.h>
39 #include <linux/key.h>
40 #include <linux/binfmts.h>
41 #include <linux/mman.h>
42 #include <linux/mmu_notifier.h>
43 #include <linux/hmm.h>
44 #include <linux/fs.h>
45 #include <linux/mm.h>
46 #include <linux/vmacache.h>
47 #include <linux/nsproxy.h>
48 #include <linux/capability.h>
49 #include <linux/cpu.h>
50 #include <linux/cgroup.h>
51 #include <linux/security.h>
52 #include <linux/hugetlb.h>
53 #include <linux/seccomp.h>
54 #include <linux/swap.h>
55 #include <linux/syscalls.h>
56 #include <linux/jiffies.h>
57 #include <linux/futex.h>
58 #include <linux/compat.h>
59 #include <linux/kthread.h>
60 #include <linux/task_io_accounting_ops.h>
61 #include <linux/rcupdate.h>
62 #include <linux/ptrace.h>
63 #include <linux/mount.h>
64 #include <linux/audit.h>
65 #include <linux/memcontrol.h>
66 #include <linux/ftrace.h>
67 #include <linux/proc_fs.h>
68 #include <linux/profile.h>
69 #include <linux/rmap.h>
70 #include <linux/ksm.h>
71 #include <linux/acct.h>
72 #include <linux/userfaultfd_k.h>
73 #include <linux/tsacct_kern.h>
74 #include <linux/cn_proc.h>
75 #include <linux/freezer.h>
76 #include <linux/delayacct.h>
77 #include <linux/taskstats_kern.h>
78 #include <linux/random.h>
79 #include <linux/tty.h>
80 #include <linux/blkdev.h>
81 #include <linux/fs_struct.h>
82 #include <linux/magic.h>
83 #include <linux/perf_event.h>
84 #include <linux/posix-timers.h>
85 #include <linux/user-return-notifier.h>
86 #include <linux/oom.h>
87 #include <linux/khugepaged.h>
88 #include <linux/signalfd.h>
89 #include <linux/uprobes.h>
90 #include <linux/aio.h>
91 #include <linux/compiler.h>
92 #include <linux/sysctl.h>
93 #include <linux/kcov.h>
94 #include <linux/livepatch.h>
95 #include <linux/thread_info.h>
96 #include <linux/stackleak.h>
97
98 #include <asm/pgtable.h>
99 #include <asm/pgalloc.h>
100 #include <linux/uaccess.h>
101 #include <asm/mmu_context.h>
102 #include <asm/cacheflush.h>
103 #include <asm/tlbflush.h>
104
105 #include <trace/events/sched.h>
106
107 #define CREATE_TRACE_POINTS
108 #include <trace/events/task.h>
109
110 /*
111  * Minimum number of threads to boot the kernel
112  */
113 #define MIN_THREADS 20
114
115 /*
116  * Maximum number of threads
117  */
118 #define MAX_THREADS FUTEX_TID_MASK
119
120 /*
121  * Protected counters by write_lock_irq(&tasklist_lock)
122  */
123 unsigned long total_forks;      /* Handle normal Linux uptimes. */
124 int nr_threads;                 /* The idle threads do not count.. */
125
126 static int max_threads;         /* tunable limit on nr_threads */
127
128 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
129
130 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
131
132 #ifdef CONFIG_PROVE_RCU
133 int lockdep_tasklist_lock_is_held(void)
134 {
135         return lockdep_is_held(&tasklist_lock);
136 }
137 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
138 #endif /* #ifdef CONFIG_PROVE_RCU */
139
140 int nr_processes(void)
141 {
142         int cpu;
143         int total = 0;
144
145         for_each_possible_cpu(cpu)
146                 total += per_cpu(process_counts, cpu);
147
148         return total;
149 }
150
151 void __weak arch_release_task_struct(struct task_struct *tsk)
152 {
153 }
154
155 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
156 static struct kmem_cache *task_struct_cachep;
157
158 static inline struct task_struct *alloc_task_struct_node(int node)
159 {
160         return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
161 }
162
163 static inline void free_task_struct(struct task_struct *tsk)
164 {
165         kmem_cache_free(task_struct_cachep, tsk);
166 }
167 #endif
168
169 #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
170
171 /*
172  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
173  * kmemcache based allocator.
174  */
175 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
176
177 #ifdef CONFIG_VMAP_STACK
178 /*
179  * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
180  * flush.  Try to minimize the number of calls by caching stacks.
181  */
182 #define NR_CACHED_STACKS 2
183 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
184
185 static int free_vm_stack_cache(unsigned int cpu)
186 {
187         struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
188         int i;
189
190         for (i = 0; i < NR_CACHED_STACKS; i++) {
191                 struct vm_struct *vm_stack = cached_vm_stacks[i];
192
193                 if (!vm_stack)
194                         continue;
195
196                 vfree(vm_stack->addr);
197                 cached_vm_stacks[i] = NULL;
198         }
199
200         return 0;
201 }
202 #endif
203
204 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
205 {
206 #ifdef CONFIG_VMAP_STACK
207         void *stack;
208         int i;
209
210         for (i = 0; i < NR_CACHED_STACKS; i++) {
211                 struct vm_struct *s;
212
213                 s = this_cpu_xchg(cached_stacks[i], NULL);
214
215                 if (!s)
216                         continue;
217
218                 /* Clear stale pointers from reused stack. */
219                 memset(s->addr, 0, THREAD_SIZE);
220
221                 tsk->stack_vm_area = s;
222                 tsk->stack = s->addr;
223                 return s->addr;
224         }
225
226         /*
227          * Allocated stacks are cached and later reused by new threads,
228          * so memcg accounting is performed manually on assigning/releasing
229          * stacks to tasks. Drop __GFP_ACCOUNT.
230          */
231         stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
232                                      VMALLOC_START, VMALLOC_END,
233                                      THREADINFO_GFP & ~__GFP_ACCOUNT,
234                                      PAGE_KERNEL,
235                                      0, node, __builtin_return_address(0));
236
237         /*
238          * We can't call find_vm_area() in interrupt context, and
239          * free_thread_stack() can be called in interrupt context,
240          * so cache the vm_struct.
241          */
242         if (stack) {
243                 tsk->stack_vm_area = find_vm_area(stack);
244                 tsk->stack = stack;
245         }
246         return stack;
247 #else
248         struct page *page = alloc_pages_node(node, THREADINFO_GFP,
249                                              THREAD_SIZE_ORDER);
250
251         return page ? page_address(page) : NULL;
252 #endif
253 }
254
255 static inline void free_thread_stack(struct task_struct *tsk)
256 {
257 #ifdef CONFIG_VMAP_STACK
258         struct vm_struct *vm = task_stack_vm_area(tsk);
259
260         if (vm) {
261                 int i;
262
263                 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
264                         mod_memcg_page_state(vm->pages[i],
265                                              MEMCG_KERNEL_STACK_KB,
266                                              -(int)(PAGE_SIZE / 1024));
267
268                         memcg_kmem_uncharge(vm->pages[i], 0);
269                 }
270
271                 for (i = 0; i < NR_CACHED_STACKS; i++) {
272                         if (this_cpu_cmpxchg(cached_stacks[i],
273                                         NULL, tsk->stack_vm_area) != NULL)
274                                 continue;
275
276                         return;
277                 }
278
279                 vfree_atomic(tsk->stack);
280                 return;
281         }
282 #endif
283
284         __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
285 }
286 # else
287 static struct kmem_cache *thread_stack_cache;
288
289 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
290                                                   int node)
291 {
292         unsigned long *stack;
293         stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
294         tsk->stack = stack;
295         return stack;
296 }
297
298 static void free_thread_stack(struct task_struct *tsk)
299 {
300         kmem_cache_free(thread_stack_cache, tsk->stack);
301 }
302
303 void thread_stack_cache_init(void)
304 {
305         thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
306                                         THREAD_SIZE, THREAD_SIZE, 0, 0,
307                                         THREAD_SIZE, NULL);
308         BUG_ON(thread_stack_cache == NULL);
309 }
310 # endif
311 #endif
312
313 /* SLAB cache for signal_struct structures (tsk->signal) */
314 static struct kmem_cache *signal_cachep;
315
316 /* SLAB cache for sighand_struct structures (tsk->sighand) */
317 struct kmem_cache *sighand_cachep;
318
319 /* SLAB cache for files_struct structures (tsk->files) */
320 struct kmem_cache *files_cachep;
321
322 /* SLAB cache for fs_struct structures (tsk->fs) */
323 struct kmem_cache *fs_cachep;
324
325 /* SLAB cache for vm_area_struct structures */
326 static struct kmem_cache *vm_area_cachep;
327
328 /* SLAB cache for mm_struct structures (tsk->mm) */
329 static struct kmem_cache *mm_cachep;
330
331 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
332 {
333         struct vm_area_struct *vma;
334
335         vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
336         if (vma)
337                 vma_init(vma, mm);
338         return vma;
339 }
340
341 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
342 {
343         struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
344
345         if (new) {
346                 *new = *orig;
347                 INIT_LIST_HEAD(&new->anon_vma_chain);
348         }
349         return new;
350 }
351
352 void vm_area_free(struct vm_area_struct *vma)
353 {
354         kmem_cache_free(vm_area_cachep, vma);
355 }
356
357 static void account_kernel_stack(struct task_struct *tsk, int account)
358 {
359         void *stack = task_stack_page(tsk);
360         struct vm_struct *vm = task_stack_vm_area(tsk);
361
362         BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
363
364         if (vm) {
365                 int i;
366
367                 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
368
369                 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
370                         mod_zone_page_state(page_zone(vm->pages[i]),
371                                             NR_KERNEL_STACK_KB,
372                                             PAGE_SIZE / 1024 * account);
373                 }
374         } else {
375                 /*
376                  * All stack pages are in the same zone and belong to the
377                  * same memcg.
378                  */
379                 struct page *first_page = virt_to_page(stack);
380
381                 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
382                                     THREAD_SIZE / 1024 * account);
383
384                 mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
385                                      account * (THREAD_SIZE / 1024));
386         }
387 }
388
389 static int memcg_charge_kernel_stack(struct task_struct *tsk)
390 {
391 #ifdef CONFIG_VMAP_STACK
392         struct vm_struct *vm = task_stack_vm_area(tsk);
393         int ret;
394
395         if (vm) {
396                 int i;
397
398                 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
399                         /*
400                          * If memcg_kmem_charge() fails, page->mem_cgroup
401                          * pointer is NULL, and both memcg_kmem_uncharge()
402                          * and mod_memcg_page_state() in free_thread_stack()
403                          * will ignore this page. So it's safe.
404                          */
405                         ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0);
406                         if (ret)
407                                 return ret;
408
409                         mod_memcg_page_state(vm->pages[i],
410                                              MEMCG_KERNEL_STACK_KB,
411                                              PAGE_SIZE / 1024);
412                 }
413         }
414 #endif
415         return 0;
416 }
417
418 static void release_task_stack(struct task_struct *tsk)
419 {
420         if (WARN_ON(tsk->state != TASK_DEAD))
421                 return;  /* Better to leak the stack than to free prematurely */
422
423         account_kernel_stack(tsk, -1);
424         free_thread_stack(tsk);
425         tsk->stack = NULL;
426 #ifdef CONFIG_VMAP_STACK
427         tsk->stack_vm_area = NULL;
428 #endif
429 }
430
431 #ifdef CONFIG_THREAD_INFO_IN_TASK
432 void put_task_stack(struct task_struct *tsk)
433 {
434         if (refcount_dec_and_test(&tsk->stack_refcount))
435                 release_task_stack(tsk);
436 }
437 #endif
438
439 void free_task(struct task_struct *tsk)
440 {
441 #ifndef CONFIG_THREAD_INFO_IN_TASK
442         /*
443          * The task is finally done with both the stack and thread_info,
444          * so free both.
445          */
446         release_task_stack(tsk);
447 #else
448         /*
449          * If the task had a separate stack allocation, it should be gone
450          * by now.
451          */
452         WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
453 #endif
454         rt_mutex_debug_task_free(tsk);
455         ftrace_graph_exit_task(tsk);
456         put_seccomp_filter(tsk);
457         arch_release_task_struct(tsk);
458         if (tsk->flags & PF_KTHREAD)
459                 free_kthread_struct(tsk);
460         free_task_struct(tsk);
461 }
462 EXPORT_SYMBOL(free_task);
463
464 #ifdef CONFIG_MMU
465 static __latent_entropy int dup_mmap(struct mm_struct *mm,
466                                         struct mm_struct *oldmm)
467 {
468         struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
469         struct rb_node **rb_link, *rb_parent;
470         int retval;
471         unsigned long charge;
472         LIST_HEAD(uf);
473
474         uprobe_start_dup_mmap();
475         if (down_write_killable(&oldmm->mmap_sem)) {
476                 retval = -EINTR;
477                 goto fail_uprobe_end;
478         }
479         flush_cache_dup_mm(oldmm);
480         uprobe_dup_mmap(oldmm, mm);
481         /*
482          * Not linked in yet - no deadlock potential:
483          */
484         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
485
486         /* No ordering required: file already has been exposed. */
487         RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
488
489         mm->total_vm = oldmm->total_vm;
490         mm->data_vm = oldmm->data_vm;
491         mm->exec_vm = oldmm->exec_vm;
492         mm->stack_vm = oldmm->stack_vm;
493
494         rb_link = &mm->mm_rb.rb_node;
495         rb_parent = NULL;
496         pprev = &mm->mmap;
497         retval = ksm_fork(mm, oldmm);
498         if (retval)
499                 goto out;
500         retval = khugepaged_fork(mm, oldmm);
501         if (retval)
502                 goto out;
503
504         prev = NULL;
505         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
506                 struct file *file;
507
508                 if (mpnt->vm_flags & VM_DONTCOPY) {
509                         vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
510                         continue;
511                 }
512                 charge = 0;
513                 /*
514                  * Don't duplicate many vmas if we've been oom-killed (for
515                  * example)
516                  */
517                 if (fatal_signal_pending(current)) {
518                         retval = -EINTR;
519                         goto out;
520                 }
521                 if (mpnt->vm_flags & VM_ACCOUNT) {
522                         unsigned long len = vma_pages(mpnt);
523
524                         if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
525                                 goto fail_nomem;
526                         charge = len;
527                 }
528                 tmp = vm_area_dup(mpnt);
529                 if (!tmp)
530                         goto fail_nomem;
531                 retval = vma_dup_policy(mpnt, tmp);
532                 if (retval)
533                         goto fail_nomem_policy;
534                 tmp->vm_mm = mm;
535                 retval = dup_userfaultfd(tmp, &uf);
536                 if (retval)
537                         goto fail_nomem_anon_vma_fork;
538                 if (tmp->vm_flags & VM_WIPEONFORK) {
539                         /* VM_WIPEONFORK gets a clean slate in the child. */
540                         tmp->anon_vma = NULL;
541                         if (anon_vma_prepare(tmp))
542                                 goto fail_nomem_anon_vma_fork;
543                 } else if (anon_vma_fork(tmp, mpnt))
544                         goto fail_nomem_anon_vma_fork;
545                 tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
546                 tmp->vm_next = tmp->vm_prev = NULL;
547                 file = tmp->vm_file;
548                 if (file) {
549                         struct inode *inode = file_inode(file);
550                         struct address_space *mapping = file->f_mapping;
551
552                         get_file(file);
553                         if (tmp->vm_flags & VM_DENYWRITE)
554                                 atomic_dec(&inode->i_writecount);
555                         i_mmap_lock_write(mapping);
556                         if (tmp->vm_flags & VM_SHARED)
557                                 atomic_inc(&mapping->i_mmap_writable);
558                         flush_dcache_mmap_lock(mapping);
559                         /* insert tmp into the share list, just after mpnt */
560                         vma_interval_tree_insert_after(tmp, mpnt,
561                                         &mapping->i_mmap);
562                         flush_dcache_mmap_unlock(mapping);
563                         i_mmap_unlock_write(mapping);
564                 }
565
566                 /*
567                  * Clear hugetlb-related page reserves for children. This only
568                  * affects MAP_PRIVATE mappings. Faults generated by the child
569                  * are not guaranteed to succeed, even if read-only
570                  */
571                 if (is_vm_hugetlb_page(tmp))
572                         reset_vma_resv_huge_pages(tmp);
573
574                 /*
575                  * Link in the new vma and copy the page table entries.
576                  */
577                 *pprev = tmp;
578                 pprev = &tmp->vm_next;
579                 tmp->vm_prev = prev;
580                 prev = tmp;
581
582                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
583                 rb_link = &tmp->vm_rb.rb_right;
584                 rb_parent = &tmp->vm_rb;
585
586                 mm->map_count++;
587                 if (!(tmp->vm_flags & VM_WIPEONFORK))
588                         retval = copy_page_range(mm, oldmm, mpnt);
589
590                 if (tmp->vm_ops && tmp->vm_ops->open)
591                         tmp->vm_ops->open(tmp);
592
593                 if (retval)
594                         goto out;
595         }
596         /* a new mm has just been created */
597         retval = arch_dup_mmap(oldmm, mm);
598 out:
599         up_write(&mm->mmap_sem);
600         flush_tlb_mm(oldmm);
601         up_write(&oldmm->mmap_sem);
602         dup_userfaultfd_complete(&uf);
603 fail_uprobe_end:
604         uprobe_end_dup_mmap();
605         return retval;
606 fail_nomem_anon_vma_fork:
607         mpol_put(vma_policy(tmp));
608 fail_nomem_policy:
609         vm_area_free(tmp);
610 fail_nomem:
611         retval = -ENOMEM;
612         vm_unacct_memory(charge);
613         goto out;
614 }
615
616 static inline int mm_alloc_pgd(struct mm_struct *mm)
617 {
618         mm->pgd = pgd_alloc(mm);
619         if (unlikely(!mm->pgd))
620                 return -ENOMEM;
621         return 0;
622 }
623
624 static inline void mm_free_pgd(struct mm_struct *mm)
625 {
626         pgd_free(mm, mm->pgd);
627 }
628 #else
629 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
630 {
631         down_write(&oldmm->mmap_sem);
632         RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
633         up_write(&oldmm->mmap_sem);
634         return 0;
635 }
636 #define mm_alloc_pgd(mm)        (0)
637 #define mm_free_pgd(mm)
638 #endif /* CONFIG_MMU */
639
640 static void check_mm(struct mm_struct *mm)
641 {
642         int i;
643
644         for (i = 0; i < NR_MM_COUNTERS; i++) {
645                 long x = atomic_long_read(&mm->rss_stat.count[i]);
646
647                 if (unlikely(x))
648                         printk(KERN_ALERT "BUG: Bad rss-counter state "
649                                           "mm:%p idx:%d val:%ld\n", mm, i, x);
650         }
651
652         if (mm_pgtables_bytes(mm))
653                 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
654                                 mm_pgtables_bytes(mm));
655
656 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
657         VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
658 #endif
659 }
660
661 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
662 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
663
664 /*
665  * Called when the last reference to the mm
666  * is dropped: either by a lazy thread or by
667  * mmput. Free the page directory and the mm.
668  */
669 void __mmdrop(struct mm_struct *mm)
670 {
671         BUG_ON(mm == &init_mm);
672         WARN_ON_ONCE(mm == current->mm);
673         WARN_ON_ONCE(mm == current->active_mm);
674         mm_free_pgd(mm);
675         destroy_context(mm);
676         hmm_mm_destroy(mm);
677         mmu_notifier_mm_destroy(mm);
678         check_mm(mm);
679         put_user_ns(mm->user_ns);
680         free_mm(mm);
681 }
682 EXPORT_SYMBOL_GPL(__mmdrop);
683
684 static void mmdrop_async_fn(struct work_struct *work)
685 {
686         struct mm_struct *mm;
687
688         mm = container_of(work, struct mm_struct, async_put_work);
689         __mmdrop(mm);
690 }
691
692 static void mmdrop_async(struct mm_struct *mm)
693 {
694         if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
695                 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
696                 schedule_work(&mm->async_put_work);
697         }
698 }
699
700 static inline void free_signal_struct(struct signal_struct *sig)
701 {
702         taskstats_tgid_free(sig);
703         sched_autogroup_exit(sig);
704         /*
705          * __mmdrop is not safe to call from softirq context on x86 due to
706          * pgd_dtor so postpone it to the async context
707          */
708         if (sig->oom_mm)
709                 mmdrop_async(sig->oom_mm);
710         kmem_cache_free(signal_cachep, sig);
711 }
712
713 static inline void put_signal_struct(struct signal_struct *sig)
714 {
715         if (refcount_dec_and_test(&sig->sigcnt))
716                 free_signal_struct(sig);
717 }
718
719 void __put_task_struct(struct task_struct *tsk)
720 {
721         WARN_ON(!tsk->exit_state);
722         WARN_ON(refcount_read(&tsk->usage));
723         WARN_ON(tsk == current);
724
725         cgroup_free(tsk);
726         task_numa_free(tsk);
727         security_task_free(tsk);
728         exit_creds(tsk);
729         delayacct_tsk_free(tsk);
730         put_signal_struct(tsk->signal);
731
732         if (!profile_handoff_task(tsk))
733                 free_task(tsk);
734 }
735 EXPORT_SYMBOL_GPL(__put_task_struct);
736
737 void __init __weak arch_task_cache_init(void) { }
738
739 /*
740  * set_max_threads
741  */
742 static void set_max_threads(unsigned int max_threads_suggested)
743 {
744         u64 threads;
745         unsigned long nr_pages = totalram_pages();
746
747         /*
748          * The number of threads shall be limited such that the thread
749          * structures may only consume a small part of the available memory.
750          */
751         if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64)
752                 threads = MAX_THREADS;
753         else
754                 threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE,
755                                     (u64) THREAD_SIZE * 8UL);
756
757         if (threads > max_threads_suggested)
758                 threads = max_threads_suggested;
759
760         max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
761 }
762
763 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
764 /* Initialized by the architecture: */
765 int arch_task_struct_size __read_mostly;
766 #endif
767
768 static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
769 {
770         /* Fetch thread_struct whitelist for the architecture. */
771         arch_thread_struct_whitelist(offset, size);
772
773         /*
774          * Handle zero-sized whitelist or empty thread_struct, otherwise
775          * adjust offset to position of thread_struct in task_struct.
776          */
777         if (unlikely(*size == 0))
778                 *offset = 0;
779         else
780                 *offset += offsetof(struct task_struct, thread);
781 }
782
783 void __init fork_init(void)
784 {
785         int i;
786 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
787 #ifndef ARCH_MIN_TASKALIGN
788 #define ARCH_MIN_TASKALIGN      0
789 #endif
790         int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
791         unsigned long useroffset, usersize;
792
793         /* create a slab on which task_structs can be allocated */
794         task_struct_whitelist(&useroffset, &usersize);
795         task_struct_cachep = kmem_cache_create_usercopy("task_struct",
796                         arch_task_struct_size, align,
797                         SLAB_PANIC|SLAB_ACCOUNT,
798                         useroffset, usersize, NULL);
799 #endif
800
801         /* do the arch specific task caches init */
802         arch_task_cache_init();
803
804         set_max_threads(MAX_THREADS);
805
806         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
807         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
808         init_task.signal->rlim[RLIMIT_SIGPENDING] =
809                 init_task.signal->rlim[RLIMIT_NPROC];
810
811         for (i = 0; i < UCOUNT_COUNTS; i++) {
812                 init_user_ns.ucount_max[i] = max_threads/2;
813         }
814
815 #ifdef CONFIG_VMAP_STACK
816         cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
817                           NULL, free_vm_stack_cache);
818 #endif
819
820         lockdep_init_task(&init_task);
821         uprobes_init();
822 }
823
824 int __weak arch_dup_task_struct(struct task_struct *dst,
825                                                struct task_struct *src)
826 {
827         *dst = *src;
828         return 0;
829 }
830
831 void set_task_stack_end_magic(struct task_struct *tsk)
832 {
833         unsigned long *stackend;
834
835         stackend = end_of_stack(tsk);
836         *stackend = STACK_END_MAGIC;    /* for overflow detection */
837 }
838
839 static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
840 {
841         struct task_struct *tsk;
842         unsigned long *stack;
843         struct vm_struct *stack_vm_area __maybe_unused;
844         int err;
845
846         if (node == NUMA_NO_NODE)
847                 node = tsk_fork_get_node(orig);
848         tsk = alloc_task_struct_node(node);
849         if (!tsk)
850                 return NULL;
851
852         stack = alloc_thread_stack_node(tsk, node);
853         if (!stack)
854                 goto free_tsk;
855
856         if (memcg_charge_kernel_stack(tsk))
857                 goto free_stack;
858
859         stack_vm_area = task_stack_vm_area(tsk);
860
861         err = arch_dup_task_struct(tsk, orig);
862
863         /*
864          * arch_dup_task_struct() clobbers the stack-related fields.  Make
865          * sure they're properly initialized before using any stack-related
866          * functions again.
867          */
868         tsk->stack = stack;
869 #ifdef CONFIG_VMAP_STACK
870         tsk->stack_vm_area = stack_vm_area;
871 #endif
872 #ifdef CONFIG_THREAD_INFO_IN_TASK
873         refcount_set(&tsk->stack_refcount, 1);
874 #endif
875
876         if (err)
877                 goto free_stack;
878
879 #ifdef CONFIG_SECCOMP
880         /*
881          * We must handle setting up seccomp filters once we're under
882          * the sighand lock in case orig has changed between now and
883          * then. Until then, filter must be NULL to avoid messing up
884          * the usage counts on the error path calling free_task.
885          */
886         tsk->seccomp.filter = NULL;
887 #endif
888
889         setup_thread_stack(tsk, orig);
890         clear_user_return_notifier(tsk);
891         clear_tsk_need_resched(tsk);
892         set_task_stack_end_magic(tsk);
893
894 #ifdef CONFIG_STACKPROTECTOR
895         tsk->stack_canary = get_random_canary();
896 #endif
897
898         /*
899          * One for us, one for whoever does the "release_task()" (usually
900          * parent)
901          */
902         refcount_set(&tsk->usage, 2);
903 #ifdef CONFIG_BLK_DEV_IO_TRACE
904         tsk->btrace_seq = 0;
905 #endif
906         tsk->splice_pipe = NULL;
907         tsk->task_frag.page = NULL;
908         tsk->wake_q.next = NULL;
909
910         account_kernel_stack(tsk, 1);
911
912         kcov_task_init(tsk);
913
914 #ifdef CONFIG_FAULT_INJECTION
915         tsk->fail_nth = 0;
916 #endif
917
918 #ifdef CONFIG_BLK_CGROUP
919         tsk->throttle_queue = NULL;
920         tsk->use_memdelay = 0;
921 #endif
922
923 #ifdef CONFIG_MEMCG
924         tsk->active_memcg = NULL;
925 #endif
926         return tsk;
927
928 free_stack:
929         free_thread_stack(tsk);
930 free_tsk:
931         free_task_struct(tsk);
932         return NULL;
933 }
934
935 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
936
937 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
938
939 static int __init coredump_filter_setup(char *s)
940 {
941         default_dump_filter =
942                 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
943                 MMF_DUMP_FILTER_MASK;
944         return 1;
945 }
946
947 __setup("coredump_filter=", coredump_filter_setup);
948
949 #include <linux/init_task.h>
950
951 static void mm_init_aio(struct mm_struct *mm)
952 {
953 #ifdef CONFIG_AIO
954         spin_lock_init(&mm->ioctx_lock);
955         mm->ioctx_table = NULL;
956 #endif
957 }
958
959 static __always_inline void mm_clear_owner(struct mm_struct *mm,
960                                            struct task_struct *p)
961 {
962 #ifdef CONFIG_MEMCG
963         if (mm->owner == p)
964                 WRITE_ONCE(mm->owner, NULL);
965 #endif
966 }
967
968 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
969 {
970 #ifdef CONFIG_MEMCG
971         mm->owner = p;
972 #endif
973 }
974
975 static void mm_init_uprobes_state(struct mm_struct *mm)
976 {
977 #ifdef CONFIG_UPROBES
978         mm->uprobes_state.xol_area = NULL;
979 #endif
980 }
981
982 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
983         struct user_namespace *user_ns)
984 {
985         mm->mmap = NULL;
986         mm->mm_rb = RB_ROOT;
987         mm->vmacache_seqnum = 0;
988         atomic_set(&mm->mm_users, 1);
989         atomic_set(&mm->mm_count, 1);
990         init_rwsem(&mm->mmap_sem);
991         INIT_LIST_HEAD(&mm->mmlist);
992         mm->core_state = NULL;
993         mm_pgtables_bytes_init(mm);
994         mm->map_count = 0;
995         mm->locked_vm = 0;
996         atomic64_set(&mm->pinned_vm, 0);
997         memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
998         spin_lock_init(&mm->page_table_lock);
999         spin_lock_init(&mm->arg_lock);
1000         mm_init_cpumask(mm);
1001         mm_init_aio(mm);
1002         mm_init_owner(mm, p);
1003         RCU_INIT_POINTER(mm->exe_file, NULL);
1004         mmu_notifier_mm_init(mm);
1005         hmm_mm_init(mm);
1006         init_tlb_flush_pending(mm);
1007 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
1008         mm->pmd_huge_pte = NULL;
1009 #endif
1010         mm_init_uprobes_state(mm);
1011
1012         if (current->mm) {
1013                 mm->flags = current->mm->flags & MMF_INIT_MASK;
1014                 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
1015         } else {
1016                 mm->flags = default_dump_filter;
1017                 mm->def_flags = 0;
1018         }
1019
1020         if (mm_alloc_pgd(mm))
1021                 goto fail_nopgd;
1022
1023         if (init_new_context(p, mm))
1024                 goto fail_nocontext;
1025
1026         mm->user_ns = get_user_ns(user_ns);
1027         return mm;
1028
1029 fail_nocontext:
1030         mm_free_pgd(mm);
1031 fail_nopgd:
1032         free_mm(mm);
1033         return NULL;
1034 }
1035
1036 /*
1037  * Allocate and initialize an mm_struct.
1038  */
1039 struct mm_struct *mm_alloc(void)
1040 {
1041         struct mm_struct *mm;
1042
1043         mm = allocate_mm();
1044         if (!mm)
1045                 return NULL;
1046
1047         memset(mm, 0, sizeof(*mm));
1048         return mm_init(mm, current, current_user_ns());
1049 }
1050
1051 static inline void __mmput(struct mm_struct *mm)
1052 {
1053         VM_BUG_ON(atomic_read(&mm->mm_users));
1054
1055         uprobe_clear_state(mm);
1056         exit_aio(mm);
1057         ksm_exit(mm);
1058         khugepaged_exit(mm); /* must run before exit_mmap */
1059         exit_mmap(mm);
1060         mm_put_huge_zero_page(mm);
1061         set_mm_exe_file(mm, NULL);
1062         if (!list_empty(&mm->mmlist)) {
1063                 spin_lock(&mmlist_lock);
1064                 list_del(&mm->mmlist);
1065                 spin_unlock(&mmlist_lock);
1066         }
1067         if (mm->binfmt)
1068                 module_put(mm->binfmt->module);
1069         mmdrop(mm);
1070 }
1071
1072 /*
1073  * Decrement the use count and release all resources for an mm.
1074  */
1075 void mmput(struct mm_struct *mm)
1076 {
1077         might_sleep();
1078
1079         if (atomic_dec_and_test(&mm->mm_users))
1080                 __mmput(mm);
1081 }
1082 EXPORT_SYMBOL_GPL(mmput);
1083
1084 #ifdef CONFIG_MMU
1085 static void mmput_async_fn(struct work_struct *work)
1086 {
1087         struct mm_struct *mm = container_of(work, struct mm_struct,
1088                                             async_put_work);
1089
1090         __mmput(mm);
1091 }
1092
1093 void mmput_async(struct mm_struct *mm)
1094 {
1095         if (atomic_dec_and_test(&mm->mm_users)) {
1096                 INIT_WORK(&mm->async_put_work, mmput_async_fn);
1097                 schedule_work(&mm->async_put_work);
1098         }
1099 }
1100 #endif
1101
1102 /**
1103  * set_mm_exe_file - change a reference to the mm's executable file
1104  *
1105  * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1106  *
1107  * Main users are mmput() and sys_execve(). Callers prevent concurrent
1108  * invocations: in mmput() nobody alive left, in execve task is single
1109  * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
1110  * mm->exe_file, but does so without using set_mm_exe_file() in order
1111  * to do avoid the need for any locks.
1112  */
1113 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1114 {
1115         struct file *old_exe_file;
1116
1117         /*
1118          * It is safe to dereference the exe_file without RCU as
1119          * this function is only called if nobody else can access
1120          * this mm -- see comment above for justification.
1121          */
1122         old_exe_file = rcu_dereference_raw(mm->exe_file);
1123
1124         if (new_exe_file)
1125                 get_file(new_exe_file);
1126         rcu_assign_pointer(mm->exe_file, new_exe_file);
1127         if (old_exe_file)
1128                 fput(old_exe_file);
1129 }
1130
1131 /**
1132  * get_mm_exe_file - acquire a reference to the mm's executable file
1133  *
1134  * Returns %NULL if mm has no associated executable file.
1135  * User must release file via fput().
1136  */
1137 struct file *get_mm_exe_file(struct mm_struct *mm)
1138 {
1139         struct file *exe_file;
1140
1141         rcu_read_lock();
1142         exe_file = rcu_dereference(mm->exe_file);
1143         if (exe_file && !get_file_rcu(exe_file))
1144                 exe_file = NULL;
1145         rcu_read_unlock();
1146         return exe_file;
1147 }
1148 EXPORT_SYMBOL(get_mm_exe_file);
1149
1150 /**
1151  * get_task_exe_file - acquire a reference to the task's executable file
1152  *
1153  * Returns %NULL if task's mm (if any) has no associated executable file or
1154  * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1155  * User must release file via fput().
1156  */
1157 struct file *get_task_exe_file(struct task_struct *task)
1158 {
1159         struct file *exe_file = NULL;
1160         struct mm_struct *mm;
1161
1162         task_lock(task);
1163         mm = task->mm;
1164         if (mm) {
1165                 if (!(task->flags & PF_KTHREAD))
1166                         exe_file = get_mm_exe_file(mm);
1167         }
1168         task_unlock(task);
1169         return exe_file;
1170 }
1171 EXPORT_SYMBOL(get_task_exe_file);
1172
1173 /**
1174  * get_task_mm - acquire a reference to the task's mm
1175  *
1176  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
1177  * this kernel workthread has transiently adopted a user mm with use_mm,
1178  * to do its AIO) is not set and if so returns a reference to it, after
1179  * bumping up the use count.  User must release the mm via mmput()
1180  * after use.  Typically used by /proc and ptrace.
1181  */
1182 struct mm_struct *get_task_mm(struct task_struct *task)
1183 {
1184         struct mm_struct *mm;
1185
1186         task_lock(task);
1187         mm = task->mm;
1188         if (mm) {
1189                 if (task->flags & PF_KTHREAD)
1190                         mm = NULL;
1191                 else
1192                         mmget(mm);
1193         }
1194         task_unlock(task);
1195         return mm;
1196 }
1197 EXPORT_SYMBOL_GPL(get_task_mm);
1198
1199 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1200 {
1201         struct mm_struct *mm;
1202         int err;
1203
1204         err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
1205         if (err)
1206                 return ERR_PTR(err);
1207
1208         mm = get_task_mm(task);
1209         if (mm && mm != current->mm &&
1210                         !ptrace_may_access(task, mode)) {
1211                 mmput(mm);
1212                 mm = ERR_PTR(-EACCES);
1213         }
1214         mutex_unlock(&task->signal->cred_guard_mutex);
1215
1216         return mm;
1217 }
1218
1219 static void complete_vfork_done(struct task_struct *tsk)
1220 {
1221         struct completion *vfork;
1222
1223         task_lock(tsk);
1224         vfork = tsk->vfork_done;
1225         if (likely(vfork)) {
1226                 tsk->vfork_done = NULL;
1227                 complete(vfork);
1228         }
1229         task_unlock(tsk);
1230 }
1231
1232 static int wait_for_vfork_done(struct task_struct *child,
1233                                 struct completion *vfork)
1234 {
1235         int killed;
1236
1237         freezer_do_not_count();
1238         cgroup_enter_frozen();
1239         killed = wait_for_completion_killable(vfork);
1240         cgroup_leave_frozen(false);
1241         freezer_count();
1242
1243         if (killed) {
1244                 task_lock(child);
1245                 child->vfork_done = NULL;
1246                 task_unlock(child);
1247         }
1248
1249         put_task_struct(child);
1250         return killed;
1251 }
1252
1253 /* Please note the differences between mmput and mm_release.
1254  * mmput is called whenever we stop holding onto a mm_struct,
1255  * error success whatever.
1256  *
1257  * mm_release is called after a mm_struct has been removed
1258  * from the current process.
1259  *
1260  * This difference is important for error handling, when we
1261  * only half set up a mm_struct for a new process and need to restore
1262  * the old one.  Because we mmput the new mm_struct before
1263  * restoring the old one. . .
1264  * Eric Biederman 10 January 1998
1265  */
1266 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
1267 {
1268         /* Get rid of any futexes when releasing the mm */
1269 #ifdef CONFIG_FUTEX
1270         if (unlikely(tsk->robust_list)) {
1271                 exit_robust_list(tsk);
1272                 tsk->robust_list = NULL;
1273         }
1274 #ifdef CONFIG_COMPAT
1275         if (unlikely(tsk->compat_robust_list)) {
1276                 compat_exit_robust_list(tsk);
1277                 tsk->compat_robust_list = NULL;
1278         }
1279 #endif
1280         if (unlikely(!list_empty(&tsk->pi_state_list)))
1281                 exit_pi_state_list(tsk);
1282 #endif
1283
1284         uprobe_free_utask(tsk);
1285
1286         /* Get rid of any cached register state */
1287         deactivate_mm(tsk, mm);
1288
1289         /*
1290          * Signal userspace if we're not exiting with a core dump
1291          * because we want to leave the value intact for debugging
1292          * purposes.
1293          */
1294         if (tsk->clear_child_tid) {
1295                 if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
1296                     atomic_read(&mm->mm_users) > 1) {
1297                         /*
1298                          * We don't check the error code - if userspace has
1299                          * not set up a proper pointer then tough luck.
1300                          */
1301                         put_user(0, tsk->clear_child_tid);
1302                         do_futex(tsk->clear_child_tid, FUTEX_WAKE,
1303                                         1, NULL, NULL, 0, 0);
1304                 }
1305                 tsk->clear_child_tid = NULL;
1306         }
1307
1308         /*
1309          * All done, finally we can wake up parent and return this mm to him.
1310          * Also kthread_stop() uses this completion for synchronization.
1311          */
1312         if (tsk->vfork_done)
1313                 complete_vfork_done(tsk);
1314 }
1315
1316 /**
1317  * dup_mm() - duplicates an existing mm structure
1318  * @tsk: the task_struct with which the new mm will be associated.
1319  * @oldmm: the mm to duplicate.
1320  *
1321  * Allocates a new mm structure and duplicates the provided @oldmm structure
1322  * content into it.
1323  *
1324  * Return: the duplicated mm or NULL on failure.
1325  */
1326 static struct mm_struct *dup_mm(struct task_struct *tsk,
1327                                 struct mm_struct *oldmm)
1328 {
1329         struct mm_struct *mm;
1330         int err;
1331
1332         mm = allocate_mm();
1333         if (!mm)
1334                 goto fail_nomem;
1335
1336         memcpy(mm, oldmm, sizeof(*mm));
1337
1338         if (!mm_init(mm, tsk, mm->user_ns))
1339                 goto fail_nomem;
1340
1341         err = dup_mmap(mm, oldmm);
1342         if (err)
1343                 goto free_pt;
1344
1345         mm->hiwater_rss = get_mm_rss(mm);
1346         mm->hiwater_vm = mm->total_vm;
1347
1348         if (mm->binfmt && !try_module_get(mm->binfmt->module))
1349                 goto free_pt;
1350
1351         return mm;
1352
1353 free_pt:
1354         /* don't put binfmt in mmput, we haven't got module yet */
1355         mm->binfmt = NULL;
1356         mm_init_owner(mm, NULL);
1357         mmput(mm);
1358
1359 fail_nomem:
1360         return NULL;
1361 }
1362
1363 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1364 {
1365         struct mm_struct *mm, *oldmm;
1366         int retval;
1367
1368         tsk->min_flt = tsk->maj_flt = 0;
1369         tsk->nvcsw = tsk->nivcsw = 0;
1370 #ifdef CONFIG_DETECT_HUNG_TASK
1371         tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1372         tsk->last_switch_time = 0;
1373 #endif
1374
1375         tsk->mm = NULL;
1376         tsk->active_mm = NULL;
1377
1378         /*
1379          * Are we cloning a kernel thread?
1380          *
1381          * We need to steal a active VM for that..
1382          */
1383         oldmm = current->mm;
1384         if (!oldmm)
1385                 return 0;
1386
1387         /* initialize the new vmacache entries */
1388         vmacache_flush(tsk);
1389
1390         if (clone_flags & CLONE_VM) {
1391                 mmget(oldmm);
1392                 mm = oldmm;
1393                 goto good_mm;
1394         }
1395
1396         retval = -ENOMEM;
1397         mm = dup_mm(tsk, current->mm);
1398         if (!mm)
1399                 goto fail_nomem;
1400
1401 good_mm:
1402         tsk->mm = mm;
1403         tsk->active_mm = mm;
1404         return 0;
1405
1406 fail_nomem:
1407         return retval;
1408 }
1409
1410 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1411 {
1412         struct fs_struct *fs = current->fs;
1413         if (clone_flags & CLONE_FS) {
1414                 /* tsk->fs is already what we want */
1415                 spin_lock(&fs->lock);
1416                 if (fs->in_exec) {
1417                         spin_unlock(&fs->lock);
1418                         return -EAGAIN;
1419                 }
1420                 fs->users++;
1421                 spin_unlock(&fs->lock);
1422                 return 0;
1423         }
1424         tsk->fs = copy_fs_struct(fs);
1425         if (!tsk->fs)
1426                 return -ENOMEM;
1427         return 0;
1428 }
1429
1430 static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
1431 {
1432         struct files_struct *oldf, *newf;
1433         int error = 0;
1434
1435         /*
1436          * A background process may not have any files ...
1437          */
1438         oldf = current->files;
1439         if (!oldf)
1440                 goto out;
1441
1442         if (clone_flags & CLONE_FILES) {
1443                 atomic_inc(&oldf->count);
1444                 goto out;
1445         }
1446
1447         newf = dup_fd(oldf, &error);
1448         if (!newf)
1449                 goto out;
1450
1451         tsk->files = newf;
1452         error = 0;
1453 out:
1454         return error;
1455 }
1456
1457 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
1458 {
1459 #ifdef CONFIG_BLOCK
1460         struct io_context *ioc = current->io_context;
1461         struct io_context *new_ioc;
1462
1463         if (!ioc)
1464                 return 0;
1465         /*
1466          * Share io context with parent, if CLONE_IO is set
1467          */
1468         if (clone_flags & CLONE_IO) {
1469                 ioc_task_link(ioc);
1470                 tsk->io_context = ioc;
1471         } else if (ioprio_valid(ioc->ioprio)) {
1472                 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
1473                 if (unlikely(!new_ioc))
1474                         return -ENOMEM;
1475
1476                 new_ioc->ioprio = ioc->ioprio;
1477                 put_io_context(new_ioc);
1478         }
1479 #endif
1480         return 0;
1481 }
1482
1483 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1484 {
1485         struct sighand_struct *sig;
1486
1487         if (clone_flags & CLONE_SIGHAND) {
1488                 refcount_inc(&current->sighand->count);
1489                 return 0;
1490         }
1491         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1492         rcu_assign_pointer(tsk->sighand, sig);
1493         if (!sig)
1494                 return -ENOMEM;
1495
1496         refcount_set(&sig->count, 1);
1497         spin_lock_irq(&current->sighand->siglock);
1498         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1499         spin_unlock_irq(&current->sighand->siglock);
1500         return 0;
1501 }
1502
1503 void __cleanup_sighand(struct sighand_struct *sighand)
1504 {
1505         if (refcount_dec_and_test(&sighand->count)) {
1506                 signalfd_cleanup(sighand);
1507                 /*
1508                  * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1509                  * without an RCU grace period, see __lock_task_sighand().
1510                  */
1511                 kmem_cache_free(sighand_cachep, sighand);
1512         }
1513 }
1514
1515 #ifdef CONFIG_POSIX_TIMERS
1516 /*
1517  * Initialize POSIX timer handling for a thread group.
1518  */
1519 static void posix_cpu_timers_init_group(struct signal_struct *sig)
1520 {
1521         unsigned long cpu_limit;
1522
1523         cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1524         if (cpu_limit != RLIM_INFINITY) {
1525                 sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC;
1526                 sig->cputimer.running = true;
1527         }
1528
1529         /* The timer lists. */
1530         INIT_LIST_HEAD(&sig->cpu_timers[0]);
1531         INIT_LIST_HEAD(&sig->cpu_timers[1]);
1532         INIT_LIST_HEAD(&sig->cpu_timers[2]);
1533 }
1534 #else
1535 static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { }
1536 #endif
1537
1538 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1539 {
1540         struct signal_struct *sig;
1541
1542         if (clone_flags & CLONE_THREAD)
1543                 return 0;
1544
1545         sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1546         tsk->signal = sig;
1547         if (!sig)
1548                 return -ENOMEM;
1549
1550         sig->nr_threads = 1;
1551         atomic_set(&sig->live, 1);
1552         refcount_set(&sig->sigcnt, 1);
1553
1554         /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1555         sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1556         tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1557
1558         init_waitqueue_head(&sig->wait_chldexit);
1559         sig->curr_target = tsk;
1560         init_sigpending(&sig->shared_pending);
1561         INIT_HLIST_HEAD(&sig->multiprocess);
1562         seqlock_init(&sig->stats_lock);
1563         prev_cputime_init(&sig->prev_cputime);
1564
1565 #ifdef CONFIG_POSIX_TIMERS
1566         INIT_LIST_HEAD(&sig->posix_timers);
1567         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1568         sig->real_timer.function = it_real_fn;
1569 #endif
1570
1571         task_lock(current->group_leader);
1572         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1573         task_unlock(current->group_leader);
1574
1575         posix_cpu_timers_init_group(sig);
1576
1577         tty_audit_fork(sig);
1578         sched_autogroup_fork(sig);
1579
1580         sig->oom_score_adj = current->signal->oom_score_adj;
1581         sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1582
1583         mutex_init(&sig->cred_guard_mutex);
1584
1585         return 0;
1586 }
1587
1588 static void copy_seccomp(struct task_struct *p)
1589 {
1590 #ifdef CONFIG_SECCOMP
1591         /*
1592          * Must be called with sighand->lock held, which is common to
1593          * all threads in the group. Holding cred_guard_mutex is not
1594          * needed because this new task is not yet running and cannot
1595          * be racing exec.
1596          */
1597         assert_spin_locked(&current->sighand->siglock);
1598
1599         /* Ref-count the new filter user, and assign it. */
1600         get_seccomp_filter(current);
1601         p->seccomp = current->seccomp;
1602
1603         /*
1604          * Explicitly enable no_new_privs here in case it got set
1605          * between the task_struct being duplicated and holding the
1606          * sighand lock. The seccomp state and nnp must be in sync.
1607          */
1608         if (task_no_new_privs(current))
1609                 task_set_no_new_privs(p);
1610
1611         /*
1612          * If the parent gained a seccomp mode after copying thread
1613          * flags and between before we held the sighand lock, we have
1614          * to manually enable the seccomp thread flag here.
1615          */
1616         if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1617                 set_tsk_thread_flag(p, TIF_SECCOMP);
1618 #endif
1619 }
1620
1621 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1622 {
1623         current->clear_child_tid = tidptr;
1624
1625         return task_pid_vnr(current);
1626 }
1627
1628 static void rt_mutex_init_task(struct task_struct *p)
1629 {
1630         raw_spin_lock_init(&p->pi_lock);
1631 #ifdef CONFIG_RT_MUTEXES
1632         p->pi_waiters = RB_ROOT_CACHED;
1633         p->pi_top_task = NULL;
1634         p->pi_blocked_on = NULL;
1635 #endif
1636 }
1637
1638 #ifdef CONFIG_POSIX_TIMERS
1639 /*
1640  * Initialize POSIX timer handling for a single task.
1641  */
1642 static void posix_cpu_timers_init(struct task_struct *tsk)
1643 {
1644         tsk->cputime_expires.prof_exp = 0;
1645         tsk->cputime_expires.virt_exp = 0;
1646         tsk->cputime_expires.sched_exp = 0;
1647         INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1648         INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1649         INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1650 }
1651 #else
1652 static inline void posix_cpu_timers_init(struct task_struct *tsk) { }
1653 #endif
1654
1655 static inline void init_task_pid_links(struct task_struct *task)
1656 {
1657         enum pid_type type;
1658
1659         for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1660                 INIT_HLIST_NODE(&task->pid_links[type]);
1661         }
1662 }
1663
1664 static inline void
1665 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1666 {
1667         if (type == PIDTYPE_PID)
1668                 task->thread_pid = pid;
1669         else
1670                 task->signal->pids[type] = pid;
1671 }
1672
1673 static inline void rcu_copy_process(struct task_struct *p)
1674 {
1675 #ifdef CONFIG_PREEMPT_RCU
1676         p->rcu_read_lock_nesting = 0;
1677         p->rcu_read_unlock_special.s = 0;
1678         p->rcu_blocked_node = NULL;
1679         INIT_LIST_HEAD(&p->rcu_node_entry);
1680 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1681 #ifdef CONFIG_TASKS_RCU
1682         p->rcu_tasks_holdout = false;
1683         INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1684         p->rcu_tasks_idle_cpu = -1;
1685 #endif /* #ifdef CONFIG_TASKS_RCU */
1686 }
1687
1688 static int pidfd_release(struct inode *inode, struct file *file)
1689 {
1690         struct pid *pid = file->private_data;
1691
1692         file->private_data = NULL;
1693         put_pid(pid);
1694         return 0;
1695 }
1696
1697 #ifdef CONFIG_PROC_FS
1698 static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
1699 {
1700         struct pid_namespace *ns = proc_pid_ns(file_inode(m->file));
1701         struct pid *pid = f->private_data;
1702
1703         seq_put_decimal_ull(m, "Pid:\t", pid_nr_ns(pid, ns));
1704         seq_putc(m, '\n');
1705 }
1706 #endif
1707
1708 const struct file_operations pidfd_fops = {
1709         .release = pidfd_release,
1710 #ifdef CONFIG_PROC_FS
1711         .show_fdinfo = pidfd_show_fdinfo,
1712 #endif
1713 };
1714
1715 static void __delayed_free_task(struct rcu_head *rhp)
1716 {
1717         struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
1718
1719         free_task(tsk);
1720 }
1721
1722 static __always_inline void delayed_free_task(struct task_struct *tsk)
1723 {
1724         if (IS_ENABLED(CONFIG_MEMCG))
1725                 call_rcu(&tsk->rcu, __delayed_free_task);
1726         else
1727                 free_task(tsk);
1728 }
1729
1730 /*
1731  * This creates a new process as a copy of the old one,
1732  * but does not actually start it yet.
1733  *
1734  * It copies the registers, and all the appropriate
1735  * parts of the process environment (as per the clone
1736  * flags). The actual kick-off is left to the caller.
1737  */
1738 static __latent_entropy struct task_struct *copy_process(
1739                                         unsigned long clone_flags,
1740                                         unsigned long stack_start,
1741                                         unsigned long stack_size,
1742                                         int __user *parent_tidptr,
1743                                         int __user *child_tidptr,
1744                                         struct pid *pid,
1745                                         int trace,
1746                                         unsigned long tls,
1747                                         int node)
1748 {
1749         int pidfd = -1, retval;
1750         struct task_struct *p;
1751         struct multiprocess_signals delayed;
1752         struct file *pidfile = NULL;
1753
1754         /*
1755          * Don't allow sharing the root directory with processes in a different
1756          * namespace
1757          */
1758         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1759                 return ERR_PTR(-EINVAL);
1760
1761         if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1762                 return ERR_PTR(-EINVAL);
1763
1764         /*
1765          * Thread groups must share signals as well, and detached threads
1766          * can only be started up within the thread group.
1767          */
1768         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1769                 return ERR_PTR(-EINVAL);
1770
1771         /*
1772          * Shared signal handlers imply shared VM. By way of the above,
1773          * thread groups also imply shared VM. Blocking this case allows
1774          * for various simplifications in other code.
1775          */
1776         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1777                 return ERR_PTR(-EINVAL);
1778
1779         /*
1780          * Siblings of global init remain as zombies on exit since they are
1781          * not reaped by their parent (swapper). To solve this and to avoid
1782          * multi-rooted process trees, prevent global and container-inits
1783          * from creating siblings.
1784          */
1785         if ((clone_flags & CLONE_PARENT) &&
1786                                 current->signal->flags & SIGNAL_UNKILLABLE)
1787                 return ERR_PTR(-EINVAL);
1788
1789         /*
1790          * If the new process will be in a different pid or user namespace
1791          * do not allow it to share a thread group with the forking task.
1792          */
1793         if (clone_flags & CLONE_THREAD) {
1794                 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1795                     (task_active_pid_ns(current) !=
1796                                 current->nsproxy->pid_ns_for_children))
1797                         return ERR_PTR(-EINVAL);
1798         }
1799
1800         if (clone_flags & CLONE_PIDFD) {
1801                 /*
1802                  * - CLONE_PARENT_SETTID is useless for pidfds and also
1803                  *   parent_tidptr is used to return pidfds.
1804                  * - CLONE_DETACHED is blocked so that we can potentially
1805                  *   reuse it later for CLONE_PIDFD.
1806                  * - CLONE_THREAD is blocked until someone really needs it.
1807                  */
1808                 if (clone_flags &
1809                     (CLONE_DETACHED | CLONE_PARENT_SETTID | CLONE_THREAD))
1810                         return ERR_PTR(-EINVAL);
1811         }
1812
1813         /*
1814          * Force any signals received before this point to be delivered
1815          * before the fork happens.  Collect up signals sent to multiple
1816          * processes that happen during the fork and delay them so that
1817          * they appear to happen after the fork.
1818          */
1819         sigemptyset(&delayed.signal);
1820         INIT_HLIST_NODE(&delayed.node);
1821
1822         spin_lock_irq(&current->sighand->siglock);
1823         if (!(clone_flags & CLONE_THREAD))
1824                 hlist_add_head(&delayed.node, &current->signal->multiprocess);
1825         recalc_sigpending();
1826         spin_unlock_irq(&current->sighand->siglock);
1827         retval = -ERESTARTNOINTR;
1828         if (signal_pending(current))
1829                 goto fork_out;
1830
1831         retval = -ENOMEM;
1832         p = dup_task_struct(current, node);
1833         if (!p)
1834                 goto fork_out;
1835
1836         /*
1837          * This _must_ happen before we call free_task(), i.e. before we jump
1838          * to any of the bad_fork_* labels. This is to avoid freeing
1839          * p->set_child_tid which is (ab)used as a kthread's data pointer for
1840          * kernel threads (PF_KTHREAD).
1841          */
1842         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1843         /*
1844          * Clear TID on mm_release()?
1845          */
1846         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1847
1848         ftrace_graph_init_task(p);
1849
1850         rt_mutex_init_task(p);
1851
1852 #ifdef CONFIG_PROVE_LOCKING
1853         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1854         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1855 #endif
1856         retval = -EAGAIN;
1857         if (atomic_read(&p->real_cred->user->processes) >=
1858                         task_rlimit(p, RLIMIT_NPROC)) {
1859                 if (p->real_cred->user != INIT_USER &&
1860                     !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
1861                         goto bad_fork_free;
1862         }
1863         current->flags &= ~PF_NPROC_EXCEEDED;
1864
1865         retval = copy_creds(p, clone_flags);
1866         if (retval < 0)
1867                 goto bad_fork_free;
1868
1869         /*
1870          * If multiple threads are within copy_process(), then this check
1871          * triggers too late. This doesn't hurt, the check is only there
1872          * to stop root fork bombs.
1873          */
1874         retval = -EAGAIN;
1875         if (nr_threads >= max_threads)
1876                 goto bad_fork_cleanup_count;
1877
1878         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1879         p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
1880         p->flags |= PF_FORKNOEXEC;
1881         INIT_LIST_HEAD(&p->children);
1882         INIT_LIST_HEAD(&p->sibling);
1883         rcu_copy_process(p);
1884         p->vfork_done = NULL;
1885         spin_lock_init(&p->alloc_lock);
1886
1887         init_sigpending(&p->pending);
1888
1889         p->utime = p->stime = p->gtime = 0;
1890 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1891         p->utimescaled = p->stimescaled = 0;
1892 #endif
1893         prev_cputime_init(&p->prev_cputime);
1894
1895 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1896         seqcount_init(&p->vtime.seqcount);
1897         p->vtime.starttime = 0;
1898         p->vtime.state = VTIME_INACTIVE;
1899 #endif
1900
1901 #if defined(SPLIT_RSS_COUNTING)
1902         memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1903 #endif
1904
1905         p->default_timer_slack_ns = current->timer_slack_ns;
1906
1907 #ifdef CONFIG_PSI
1908         p->psi_flags = 0;
1909 #endif
1910
1911         task_io_accounting_init(&p->ioac);
1912         acct_clear_integrals(p);
1913
1914         posix_cpu_timers_init(p);
1915
1916         p->io_context = NULL;
1917         audit_set_context(p, NULL);
1918         cgroup_fork(p);
1919 #ifdef CONFIG_NUMA
1920         p->mempolicy = mpol_dup(p->mempolicy);
1921         if (IS_ERR(p->mempolicy)) {
1922                 retval = PTR_ERR(p->mempolicy);
1923                 p->mempolicy = NULL;
1924                 goto bad_fork_cleanup_threadgroup_lock;
1925         }
1926 #endif
1927 #ifdef CONFIG_CPUSETS
1928         p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1929         p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1930         seqcount_init(&p->mems_allowed_seq);
1931 #endif
1932 #ifdef CONFIG_TRACE_IRQFLAGS
1933         p->irq_events = 0;
1934         p->hardirqs_enabled = 0;
1935         p->hardirq_enable_ip = 0;
1936         p->hardirq_enable_event = 0;
1937         p->hardirq_disable_ip = _THIS_IP_;
1938         p->hardirq_disable_event = 0;
1939         p->softirqs_enabled = 1;
1940         p->softirq_enable_ip = _THIS_IP_;
1941         p->softirq_enable_event = 0;
1942         p->softirq_disable_ip = 0;
1943         p->softirq_disable_event = 0;
1944         p->hardirq_context = 0;
1945         p->softirq_context = 0;
1946 #endif
1947
1948         p->pagefault_disabled = 0;
1949
1950 #ifdef CONFIG_LOCKDEP
1951         p->lockdep_depth = 0; /* no locks held yet */
1952         p->curr_chain_key = 0;
1953         p->lockdep_recursion = 0;
1954         lockdep_init_task(p);
1955 #endif
1956
1957 #ifdef CONFIG_DEBUG_MUTEXES
1958         p->blocked_on = NULL; /* not blocked yet */
1959 #endif
1960 #ifdef CONFIG_BCACHE
1961         p->sequential_io        = 0;
1962         p->sequential_io_avg    = 0;
1963 #endif
1964
1965         /* Perform scheduler related setup. Assign this task to a CPU. */
1966         retval = sched_fork(clone_flags, p);
1967         if (retval)
1968                 goto bad_fork_cleanup_policy;
1969
1970         retval = perf_event_init_task(p);
1971         if (retval)
1972                 goto bad_fork_cleanup_policy;
1973         retval = audit_alloc(p);
1974         if (retval)
1975                 goto bad_fork_cleanup_perf;
1976         /* copy all the process information */
1977         shm_init_task(p);
1978         retval = security_task_alloc(p, clone_flags);
1979         if (retval)
1980                 goto bad_fork_cleanup_audit;
1981         retval = copy_semundo(clone_flags, p);
1982         if (retval)
1983                 goto bad_fork_cleanup_security;
1984         retval = copy_files(clone_flags, p);
1985         if (retval)
1986                 goto bad_fork_cleanup_semundo;
1987         retval = copy_fs(clone_flags, p);
1988         if (retval)
1989                 goto bad_fork_cleanup_files;
1990         retval = copy_sighand(clone_flags, p);
1991         if (retval)
1992                 goto bad_fork_cleanup_fs;
1993         retval = copy_signal(clone_flags, p);
1994         if (retval)
1995                 goto bad_fork_cleanup_sighand;
1996         retval = copy_mm(clone_flags, p);
1997         if (retval)
1998                 goto bad_fork_cleanup_signal;
1999         retval = copy_namespaces(clone_flags, p);
2000         if (retval)
2001                 goto bad_fork_cleanup_mm;
2002         retval = copy_io(clone_flags, p);
2003         if (retval)
2004                 goto bad_fork_cleanup_namespaces;
2005         retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
2006         if (retval)
2007                 goto bad_fork_cleanup_io;
2008
2009         stackleak_task_init(p);
2010
2011         if (pid != &init_struct_pid) {
2012                 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
2013                 if (IS_ERR(pid)) {
2014                         retval = PTR_ERR(pid);
2015                         goto bad_fork_cleanup_thread;
2016                 }
2017         }
2018
2019         /*
2020          * This has to happen after we've potentially unshared the file
2021          * descriptor table (so that the pidfd doesn't leak into the child
2022          * if the fd table isn't shared).
2023          */
2024         if (clone_flags & CLONE_PIDFD) {
2025                 retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
2026                 if (retval < 0)
2027                         goto bad_fork_free_pid;
2028
2029                 pidfd = retval;
2030
2031                 pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid,
2032                                               O_RDWR | O_CLOEXEC);
2033                 if (IS_ERR(pidfile)) {
2034                         put_unused_fd(pidfd);
2035                         goto bad_fork_free_pid;
2036                 }
2037                 get_pid(pid);   /* held by pidfile now */
2038
2039                 retval = put_user(pidfd, parent_tidptr);
2040                 if (retval)
2041                         goto bad_fork_put_pidfd;
2042         }
2043
2044 #ifdef CONFIG_BLOCK
2045         p->plug = NULL;
2046 #endif
2047 #ifdef CONFIG_FUTEX
2048         p->robust_list = NULL;
2049 #ifdef CONFIG_COMPAT
2050         p->compat_robust_list = NULL;
2051 #endif
2052         INIT_LIST_HEAD(&p->pi_state_list);
2053         p->pi_state_cache = NULL;
2054 #endif
2055         /*
2056          * sigaltstack should be cleared when sharing the same VM
2057          */
2058         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
2059                 sas_ss_reset(p);
2060
2061         /*
2062          * Syscall tracing and stepping should be turned off in the
2063          * child regardless of CLONE_PTRACE.
2064          */
2065         user_disable_single_step(p);
2066         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
2067 #ifdef TIF_SYSCALL_EMU
2068         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
2069 #endif
2070         clear_tsk_latency_tracing(p);
2071
2072         /* ok, now we should be set up.. */
2073         p->pid = pid_nr(pid);
2074         if (clone_flags & CLONE_THREAD) {
2075                 p->exit_signal = -1;
2076                 p->group_leader = current->group_leader;
2077                 p->tgid = current->tgid;
2078         } else {
2079                 if (clone_flags & CLONE_PARENT)
2080                         p->exit_signal = current->group_leader->exit_signal;
2081                 else
2082                         p->exit_signal = (clone_flags & CSIGNAL);
2083                 p->group_leader = p;
2084                 p->tgid = p->pid;
2085         }
2086
2087         p->nr_dirtied = 0;
2088         p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
2089         p->dirty_paused_when = 0;
2090
2091         p->pdeath_signal = 0;
2092         INIT_LIST_HEAD(&p->thread_group);
2093         p->task_works = NULL;
2094
2095         cgroup_threadgroup_change_begin(current);
2096         /*
2097          * Ensure that the cgroup subsystem policies allow the new process to be
2098          * forked. It should be noted the the new process's css_set can be changed
2099          * between here and cgroup_post_fork() if an organisation operation is in
2100          * progress.
2101          */
2102         retval = cgroup_can_fork(p);
2103         if (retval)
2104                 goto bad_fork_cgroup_threadgroup_change_end;
2105
2106         /*
2107          * From this point on we must avoid any synchronous user-space
2108          * communication until we take the tasklist-lock. In particular, we do
2109          * not want user-space to be able to predict the process start-time by
2110          * stalling fork(2) after we recorded the start_time but before it is
2111          * visible to the system.
2112          */
2113
2114         p->start_time = ktime_get_ns();
2115         p->real_start_time = ktime_get_boot_ns();
2116
2117         /*
2118          * Make it visible to the rest of the system, but dont wake it up yet.
2119          * Need tasklist lock for parent etc handling!
2120          */
2121         write_lock_irq(&tasklist_lock);
2122
2123         /* CLONE_PARENT re-uses the old parent */
2124         if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
2125                 p->real_parent = current->real_parent;
2126                 p->parent_exec_id = current->parent_exec_id;
2127         } else {
2128                 p->real_parent = current;
2129                 p->parent_exec_id = current->self_exec_id;
2130         }
2131
2132         klp_copy_process(p);
2133
2134         spin_lock(&current->sighand->siglock);
2135
2136         /*
2137          * Copy seccomp details explicitly here, in case they were changed
2138          * before holding sighand lock.
2139          */
2140         copy_seccomp(p);
2141
2142         rseq_fork(p, clone_flags);
2143
2144         /* Don't start children in a dying pid namespace */
2145         if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
2146                 retval = -ENOMEM;
2147                 goto bad_fork_cancel_cgroup;
2148         }
2149
2150         /* Let kill terminate clone/fork in the middle */
2151         if (fatal_signal_pending(current)) {
2152                 retval = -EINTR;
2153                 goto bad_fork_cancel_cgroup;
2154         }
2155
2156         /* past the last point of failure */
2157         if (pidfile)
2158                 fd_install(pidfd, pidfile);
2159
2160         init_task_pid_links(p);
2161         if (likely(p->pid)) {
2162                 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
2163
2164                 init_task_pid(p, PIDTYPE_PID, pid);
2165                 if (thread_group_leader(p)) {
2166                         init_task_pid(p, PIDTYPE_TGID, pid);
2167                         init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
2168                         init_task_pid(p, PIDTYPE_SID, task_session(current));
2169
2170                         if (is_child_reaper(pid)) {
2171                                 ns_of_pid(pid)->child_reaper = p;
2172                                 p->signal->flags |= SIGNAL_UNKILLABLE;
2173                         }
2174                         p->signal->shared_pending.signal = delayed.signal;
2175                         p->signal->tty = tty_kref_get(current->signal->tty);
2176                         /*
2177                          * Inherit has_child_subreaper flag under the same
2178                          * tasklist_lock with adding child to the process tree
2179                          * for propagate_has_child_subreaper optimization.
2180                          */
2181                         p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
2182                                                          p->real_parent->signal->is_child_subreaper;
2183                         list_add_tail(&p->sibling, &p->real_parent->children);
2184                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
2185                         attach_pid(p, PIDTYPE_TGID);
2186                         attach_pid(p, PIDTYPE_PGID);
2187                         attach_pid(p, PIDTYPE_SID);
2188                         __this_cpu_inc(process_counts);
2189                 } else {
2190                         current->signal->nr_threads++;
2191                         atomic_inc(&current->signal->live);
2192                         refcount_inc(&current->signal->sigcnt);
2193                         task_join_group_stop(p);
2194                         list_add_tail_rcu(&p->thread_group,
2195                                           &p->group_leader->thread_group);
2196                         list_add_tail_rcu(&p->thread_node,
2197                                           &p->signal->thread_head);
2198                 }
2199                 attach_pid(p, PIDTYPE_PID);
2200                 nr_threads++;
2201         }
2202         total_forks++;
2203         hlist_del_init(&delayed.node);
2204         spin_unlock(&current->sighand->siglock);
2205         syscall_tracepoint_update(p);
2206         write_unlock_irq(&tasklist_lock);
2207
2208         proc_fork_connector(p);
2209         cgroup_post_fork(p);
2210         cgroup_threadgroup_change_end(current);
2211         perf_event_fork(p);
2212
2213         trace_task_newtask(p, clone_flags);
2214         uprobe_copy_process(p, clone_flags);
2215
2216         return p;
2217
2218 bad_fork_cancel_cgroup:
2219         spin_unlock(&current->sighand->siglock);
2220         write_unlock_irq(&tasklist_lock);
2221         cgroup_cancel_fork(p);
2222 bad_fork_cgroup_threadgroup_change_end:
2223         cgroup_threadgroup_change_end(current);
2224 bad_fork_put_pidfd:
2225         if (clone_flags & CLONE_PIDFD) {
2226                 fput(pidfile);
2227                 put_unused_fd(pidfd);
2228         }
2229 bad_fork_free_pid:
2230         if (pid != &init_struct_pid)
2231                 free_pid(pid);
2232 bad_fork_cleanup_thread:
2233         exit_thread(p);
2234 bad_fork_cleanup_io:
2235         if (p->io_context)
2236                 exit_io_context(p);
2237 bad_fork_cleanup_namespaces:
2238         exit_task_namespaces(p);
2239 bad_fork_cleanup_mm:
2240         if (p->mm) {
2241                 mm_clear_owner(p->mm, p);
2242                 mmput(p->mm);
2243         }
2244 bad_fork_cleanup_signal:
2245         if (!(clone_flags & CLONE_THREAD))
2246                 free_signal_struct(p->signal);
2247 bad_fork_cleanup_sighand:
2248         __cleanup_sighand(p->sighand);
2249 bad_fork_cleanup_fs:
2250         exit_fs(p); /* blocking */
2251 bad_fork_cleanup_files:
2252         exit_files(p); /* blocking */
2253 bad_fork_cleanup_semundo:
2254         exit_sem(p);
2255 bad_fork_cleanup_security:
2256         security_task_free(p);
2257 bad_fork_cleanup_audit:
2258         audit_free(p);
2259 bad_fork_cleanup_perf:
2260         perf_event_free_task(p);
2261 bad_fork_cleanup_policy:
2262         lockdep_free_task(p);
2263 #ifdef CONFIG_NUMA
2264         mpol_put(p->mempolicy);
2265 bad_fork_cleanup_threadgroup_lock:
2266 #endif
2267         delayacct_tsk_free(p);
2268 bad_fork_cleanup_count:
2269         atomic_dec(&p->cred->user->processes);
2270         exit_creds(p);
2271 bad_fork_free:
2272         p->state = TASK_DEAD;
2273         put_task_stack(p);
2274         delayed_free_task(p);
2275 fork_out:
2276         spin_lock_irq(&current->sighand->siglock);
2277         hlist_del_init(&delayed.node);
2278         spin_unlock_irq(&current->sighand->siglock);
2279         return ERR_PTR(retval);
2280 }
2281
2282 static inline void init_idle_pids(struct task_struct *idle)
2283 {
2284         enum pid_type type;
2285
2286         for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
2287                 INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */
2288                 init_task_pid(idle, type, &init_struct_pid);
2289         }
2290 }
2291
2292 struct task_struct *fork_idle(int cpu)
2293 {
2294         struct task_struct *task;
2295         task = copy_process(CLONE_VM, 0, 0, NULL, NULL, &init_struct_pid, 0, 0,
2296                             cpu_to_node(cpu));
2297         if (!IS_ERR(task)) {
2298                 init_idle_pids(task);
2299                 init_idle(task, cpu);
2300         }
2301
2302         return task;
2303 }
2304
2305 struct mm_struct *copy_init_mm(void)
2306 {
2307         return dup_mm(NULL, &init_mm);
2308 }
2309
2310 /*
2311  *  Ok, this is the main fork-routine.
2312  *
2313  * It copies the process, and if successful kick-starts
2314  * it and waits for it to finish using the VM if required.
2315  */
2316 long _do_fork(unsigned long clone_flags,
2317               unsigned long stack_start,
2318               unsigned long stack_size,
2319               int __user *parent_tidptr,
2320               int __user *child_tidptr,
2321               unsigned long tls)
2322 {
2323         struct completion vfork;
2324         struct pid *pid;
2325         struct task_struct *p;
2326         int trace = 0;
2327         long nr;
2328
2329         /*
2330          * Determine whether and which event to report to ptracer.  When
2331          * called from kernel_thread or CLONE_UNTRACED is explicitly
2332          * requested, no event is reported; otherwise, report if the event
2333          * for the type of forking is enabled.
2334          */
2335         if (!(clone_flags & CLONE_UNTRACED)) {
2336                 if (clone_flags & CLONE_VFORK)
2337                         trace = PTRACE_EVENT_VFORK;
2338                 else if ((clone_flags & CSIGNAL) != SIGCHLD)
2339                         trace = PTRACE_EVENT_CLONE;
2340                 else
2341                         trace = PTRACE_EVENT_FORK;
2342
2343                 if (likely(!ptrace_event_enabled(current, trace)))
2344                         trace = 0;
2345         }
2346
2347         p = copy_process(clone_flags, stack_start, stack_size, parent_tidptr,
2348                          child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
2349         add_latent_entropy();
2350
2351         if (IS_ERR(p))
2352                 return PTR_ERR(p);
2353
2354         /*
2355          * Do this prior waking up the new thread - the thread pointer
2356          * might get invalid after that point, if the thread exits quickly.
2357          */
2358         trace_sched_process_fork(current, p);
2359
2360         pid = get_task_pid(p, PIDTYPE_PID);
2361         nr = pid_vnr(pid);
2362
2363         if (clone_flags & CLONE_PARENT_SETTID)
2364                 put_user(nr, parent_tidptr);
2365
2366         if (clone_flags & CLONE_VFORK) {
2367                 p->vfork_done = &vfork;
2368                 init_completion(&vfork);
2369                 get_task_struct(p);
2370         }
2371
2372         wake_up_new_task(p);
2373
2374         /* forking complete and child started to run, tell ptracer */
2375         if (unlikely(trace))
2376                 ptrace_event_pid(trace, pid);
2377
2378         if (clone_flags & CLONE_VFORK) {
2379                 if (!wait_for_vfork_done(p, &vfork))
2380                         ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
2381         }
2382
2383         put_pid(pid);
2384         return nr;
2385 }
2386
2387 #ifndef CONFIG_HAVE_COPY_THREAD_TLS
2388 /* For compatibility with architectures that call do_fork directly rather than
2389  * using the syscall entry points below. */
2390 long do_fork(unsigned long clone_flags,
2391               unsigned long stack_start,
2392               unsigned long stack_size,
2393               int __user *parent_tidptr,
2394               int __user *child_tidptr)
2395 {
2396         return _do_fork(clone_flags, stack_start, stack_size,
2397                         parent_tidptr, child_tidptr, 0);
2398 }
2399 #endif
2400
2401 /*
2402  * Create a kernel thread.
2403  */
2404 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
2405 {
2406         return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
2407                 (unsigned long)arg, NULL, NULL, 0);
2408 }
2409
2410 #ifdef __ARCH_WANT_SYS_FORK
2411 SYSCALL_DEFINE0(fork)
2412 {
2413 #ifdef CONFIG_MMU
2414         return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0);
2415 #else
2416         /* can not support in nommu mode */
2417         return -EINVAL;
2418 #endif
2419 }
2420 #endif
2421
2422 #ifdef __ARCH_WANT_SYS_VFORK
2423 SYSCALL_DEFINE0(vfork)
2424 {
2425         return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
2426                         0, NULL, NULL, 0);
2427 }
2428 #endif
2429
2430 #ifdef __ARCH_WANT_SYS_CLONE
2431 #ifdef CONFIG_CLONE_BACKWARDS
2432 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2433                  int __user *, parent_tidptr,
2434                  unsigned long, tls,
2435                  int __user *, child_tidptr)
2436 #elif defined(CONFIG_CLONE_BACKWARDS2)
2437 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
2438                  int __user *, parent_tidptr,
2439                  int __user *, child_tidptr,
2440                  unsigned long, tls)
2441 #elif defined(CONFIG_CLONE_BACKWARDS3)
2442 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
2443                 int, stack_size,
2444                 int __user *, parent_tidptr,
2445                 int __user *, child_tidptr,
2446                 unsigned long, tls)
2447 #else
2448 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2449                  int __user *, parent_tidptr,
2450                  int __user *, child_tidptr,
2451                  unsigned long, tls)
2452 #endif
2453 {
2454         return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls);
2455 }
2456 #endif
2457
2458 void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
2459 {
2460         struct task_struct *leader, *parent, *child;
2461         int res;
2462
2463         read_lock(&tasklist_lock);
2464         leader = top = top->group_leader;
2465 down:
2466         for_each_thread(leader, parent) {
2467                 list_for_each_entry(child, &parent->children, sibling) {
2468                         res = visitor(child, data);
2469                         if (res) {
2470                                 if (res < 0)
2471                                         goto out;
2472                                 leader = child;
2473                                 goto down;
2474                         }
2475 up:
2476                         ;
2477                 }
2478         }
2479
2480         if (leader != top) {
2481                 child = leader;
2482                 parent = child->real_parent;
2483                 leader = parent->group_leader;
2484                 goto up;
2485         }
2486 out:
2487         read_unlock(&tasklist_lock);
2488 }
2489
2490 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
2491 #define ARCH_MIN_MMSTRUCT_ALIGN 0
2492 #endif
2493
2494 static void sighand_ctor(void *data)
2495 {
2496         struct sighand_struct *sighand = data;
2497
2498         spin_lock_init(&sighand->siglock);
2499         init_waitqueue_head(&sighand->signalfd_wqh);
2500 }
2501
2502 void __init proc_caches_init(void)
2503 {
2504         unsigned int mm_size;
2505
2506         sighand_cachep = kmem_cache_create("sighand_cache",
2507                         sizeof(struct sighand_struct), 0,
2508                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
2509                         SLAB_ACCOUNT, sighand_ctor);
2510         signal_cachep = kmem_cache_create("signal_cache",
2511                         sizeof(struct signal_struct), 0,
2512                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2513                         NULL);
2514         files_cachep = kmem_cache_create("files_cache",
2515                         sizeof(struct files_struct), 0,
2516                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2517                         NULL);
2518         fs_cachep = kmem_cache_create("fs_cache",
2519                         sizeof(struct fs_struct), 0,
2520                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2521                         NULL);
2522
2523         /*
2524          * The mm_cpumask is located at the end of mm_struct, and is
2525          * dynamically sized based on the maximum CPU number this system
2526          * can have, taking hotplug into account (nr_cpu_ids).
2527          */
2528         mm_size = sizeof(struct mm_struct) + cpumask_size();
2529
2530         mm_cachep = kmem_cache_create_usercopy("mm_struct",
2531                         mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
2532                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2533                         offsetof(struct mm_struct, saved_auxv),
2534                         sizeof_field(struct mm_struct, saved_auxv),
2535                         NULL);
2536         vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
2537         mmap_init();
2538         nsproxy_cache_init();
2539 }
2540
2541 /*
2542  * Check constraints on flags passed to the unshare system call.
2543  */
2544 static int check_unshare_flags(unsigned long unshare_flags)
2545 {
2546         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
2547                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
2548                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
2549                                 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP))
2550                 return -EINVAL;
2551         /*
2552          * Not implemented, but pretend it works if there is nothing
2553          * to unshare.  Note that unsharing the address space or the
2554          * signal handlers also need to unshare the signal queues (aka
2555          * CLONE_THREAD).
2556          */
2557         if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
2558                 if (!thread_group_empty(current))
2559                         return -EINVAL;
2560         }
2561         if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
2562                 if (refcount_read(&current->sighand->count) > 1)
2563                         return -EINVAL;
2564         }
2565         if (unshare_flags & CLONE_VM) {
2566                 if (!current_is_single_threaded())
2567                         return -EINVAL;
2568         }
2569
2570         return 0;
2571 }
2572
2573 /*
2574  * Unshare the filesystem structure if it is being shared
2575  */
2576 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
2577 {
2578         struct fs_struct *fs = current->fs;
2579
2580         if (!(unshare_flags & CLONE_FS) || !fs)
2581                 return 0;
2582
2583         /* don't need lock here; in the worst case we'll do useless copy */
2584         if (fs->users == 1)
2585                 return 0;
2586
2587         *new_fsp = copy_fs_struct(fs);
2588         if (!*new_fsp)
2589                 return -ENOMEM;
2590
2591         return 0;
2592 }
2593
2594 /*
2595  * Unshare file descriptor table if it is being shared
2596  */
2597 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
2598 {
2599         struct files_struct *fd = current->files;
2600         int error = 0;
2601
2602         if ((unshare_flags & CLONE_FILES) &&
2603             (fd && atomic_read(&fd->count) > 1)) {
2604                 *new_fdp = dup_fd(fd, &error);
2605                 if (!*new_fdp)
2606                         return error;
2607         }
2608
2609         return 0;
2610 }
2611
2612 /*
2613  * unshare allows a process to 'unshare' part of the process
2614  * context which was originally shared using clone.  copy_*
2615  * functions used by do_fork() cannot be used here directly
2616  * because they modify an inactive task_struct that is being
2617  * constructed. Here we are modifying the current, active,
2618  * task_struct.
2619  */
2620 int ksys_unshare(unsigned long unshare_flags)
2621 {
2622         struct fs_struct *fs, *new_fs = NULL;
2623         struct files_struct *fd, *new_fd = NULL;
2624         struct cred *new_cred = NULL;
2625         struct nsproxy *new_nsproxy = NULL;
2626         int do_sysvsem = 0;
2627         int err;
2628
2629         /*
2630          * If unsharing a user namespace must also unshare the thread group
2631          * and unshare the filesystem root and working directories.
2632          */
2633         if (unshare_flags & CLONE_NEWUSER)
2634                 unshare_flags |= CLONE_THREAD | CLONE_FS;
2635         /*
2636          * If unsharing vm, must also unshare signal handlers.
2637          */
2638         if (unshare_flags & CLONE_VM)
2639                 unshare_flags |= CLONE_SIGHAND;
2640         /*
2641          * If unsharing a signal handlers, must also unshare the signal queues.
2642          */
2643         if (unshare_flags & CLONE_SIGHAND)
2644                 unshare_flags |= CLONE_THREAD;
2645         /*
2646          * If unsharing namespace, must also unshare filesystem information.
2647          */
2648         if (unshare_flags & CLONE_NEWNS)
2649                 unshare_flags |= CLONE_FS;
2650
2651         err = check_unshare_flags(unshare_flags);
2652         if (err)
2653                 goto bad_unshare_out;
2654         /*
2655          * CLONE_NEWIPC must also detach from the undolist: after switching
2656          * to a new ipc namespace, the semaphore arrays from the old
2657          * namespace are unreachable.
2658          */
2659         if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
2660                 do_sysvsem = 1;
2661         err = unshare_fs(unshare_flags, &new_fs);
2662         if (err)
2663                 goto bad_unshare_out;
2664         err = unshare_fd(unshare_flags, &new_fd);
2665         if (err)
2666                 goto bad_unshare_cleanup_fs;
2667         err = unshare_userns(unshare_flags, &new_cred);
2668         if (err)
2669                 goto bad_unshare_cleanup_fd;
2670         err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
2671                                          new_cred, new_fs);
2672         if (err)
2673                 goto bad_unshare_cleanup_cred;
2674
2675         if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
2676                 if (do_sysvsem) {
2677                         /*
2678                          * CLONE_SYSVSEM is equivalent to sys_exit().
2679                          */
2680                         exit_sem(current);
2681                 }
2682                 if (unshare_flags & CLONE_NEWIPC) {
2683                         /* Orphan segments in old ns (see sem above). */
2684                         exit_shm(current);
2685                         shm_init_task(current);
2686                 }
2687
2688                 if (new_nsproxy)
2689                         switch_task_namespaces(current, new_nsproxy);
2690
2691                 task_lock(current);
2692
2693                 if (new_fs) {
2694                         fs = current->fs;
2695                         spin_lock(&fs->lock);
2696                         current->fs = new_fs;
2697                         if (--fs->users)
2698                                 new_fs = NULL;
2699                         else
2700                                 new_fs = fs;
2701                         spin_unlock(&fs->lock);
2702                 }
2703
2704                 if (new_fd) {
2705                         fd = current->files;
2706                         current->files = new_fd;
2707                         new_fd = fd;
2708                 }
2709
2710                 task_unlock(current);
2711
2712                 if (new_cred) {
2713                         /* Install the new user namespace */
2714                         commit_creds(new_cred);
2715                         new_cred = NULL;
2716                 }
2717         }
2718
2719         perf_event_namespaces(current);
2720
2721 bad_unshare_cleanup_cred:
2722         if (new_cred)
2723                 put_cred(new_cred);
2724 bad_unshare_cleanup_fd:
2725         if (new_fd)
2726                 put_files_struct(new_fd);
2727
2728 bad_unshare_cleanup_fs:
2729         if (new_fs)
2730                 free_fs_struct(new_fs);
2731
2732 bad_unshare_out:
2733         return err;
2734 }
2735
2736 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
2737 {
2738         return ksys_unshare(unshare_flags);
2739 }
2740
2741 /*
2742  *      Helper to unshare the files of the current task.
2743  *      We don't want to expose copy_files internals to
2744  *      the exec layer of the kernel.
2745  */
2746
2747 int unshare_files(struct files_struct **displaced)
2748 {
2749         struct task_struct *task = current;
2750         struct files_struct *copy = NULL;
2751         int error;
2752
2753         error = unshare_fd(CLONE_FILES, &copy);
2754         if (error || !copy) {
2755                 *displaced = NULL;
2756                 return error;
2757         }
2758         *displaced = task->files;
2759         task_lock(task);
2760         task->files = copy;
2761         task_unlock(task);
2762         return 0;
2763 }
2764
2765 int sysctl_max_threads(struct ctl_table *table, int write,
2766                        void __user *buffer, size_t *lenp, loff_t *ppos)
2767 {
2768         struct ctl_table t;
2769         int ret;
2770         int threads = max_threads;
2771         int min = MIN_THREADS;
2772         int max = MAX_THREADS;
2773
2774         t = *table;
2775         t.data = &threads;
2776         t.extra1 = &min;
2777         t.extra2 = &max;
2778
2779         ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2780         if (ret || !write)
2781                 return ret;
2782
2783         set_max_threads(threads);
2784
2785         return 0;
2786 }