]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/exit.c
futex: Replace PF_EXITPIDONE with a state
[linux.git] / kernel / exit.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/exit.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/sched/autogroup.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/stat.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/sched/cputime.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/capability.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/tty.h>
22 #include <linux/iocontext.h>
23 #include <linux/key.h>
24 #include <linux/cpu.h>
25 #include <linux/acct.h>
26 #include <linux/tsacct_kern.h>
27 #include <linux/file.h>
28 #include <linux/fdtable.h>
29 #include <linux/freezer.h>
30 #include <linux/binfmts.h>
31 #include <linux/nsproxy.h>
32 #include <linux/pid_namespace.h>
33 #include <linux/ptrace.h>
34 #include <linux/profile.h>
35 #include <linux/mount.h>
36 #include <linux/proc_fs.h>
37 #include <linux/kthread.h>
38 #include <linux/mempolicy.h>
39 #include <linux/taskstats_kern.h>
40 #include <linux/delayacct.h>
41 #include <linux/cgroup.h>
42 #include <linux/syscalls.h>
43 #include <linux/signal.h>
44 #include <linux/posix-timers.h>
45 #include <linux/cn_proc.h>
46 #include <linux/mutex.h>
47 #include <linux/futex.h>
48 #include <linux/pipe_fs_i.h>
49 #include <linux/audit.h> /* for audit_free() */
50 #include <linux/resource.h>
51 #include <linux/blkdev.h>
52 #include <linux/task_io_accounting_ops.h>
53 #include <linux/tracehook.h>
54 #include <linux/fs_struct.h>
55 #include <linux/init_task.h>
56 #include <linux/perf_event.h>
57 #include <trace/events/sched.h>
58 #include <linux/hw_breakpoint.h>
59 #include <linux/oom.h>
60 #include <linux/writeback.h>
61 #include <linux/shm.h>
62 #include <linux/kcov.h>
63 #include <linux/random.h>
64 #include <linux/rcuwait.h>
65 #include <linux/compat.h>
66
67 #include <linux/uaccess.h>
68 #include <asm/unistd.h>
69 #include <asm/pgtable.h>
70 #include <asm/mmu_context.h>
71
72 static void __unhash_process(struct task_struct *p, bool group_dead)
73 {
74         nr_threads--;
75         detach_pid(p, PIDTYPE_PID);
76         if (group_dead) {
77                 detach_pid(p, PIDTYPE_TGID);
78                 detach_pid(p, PIDTYPE_PGID);
79                 detach_pid(p, PIDTYPE_SID);
80
81                 list_del_rcu(&p->tasks);
82                 list_del_init(&p->sibling);
83                 __this_cpu_dec(process_counts);
84         }
85         list_del_rcu(&p->thread_group);
86         list_del_rcu(&p->thread_node);
87 }
88
89 /*
90  * This function expects the tasklist_lock write-locked.
91  */
92 static void __exit_signal(struct task_struct *tsk)
93 {
94         struct signal_struct *sig = tsk->signal;
95         bool group_dead = thread_group_leader(tsk);
96         struct sighand_struct *sighand;
97         struct tty_struct *uninitialized_var(tty);
98         u64 utime, stime;
99
100         sighand = rcu_dereference_check(tsk->sighand,
101                                         lockdep_tasklist_lock_is_held());
102         spin_lock(&sighand->siglock);
103
104 #ifdef CONFIG_POSIX_TIMERS
105         posix_cpu_timers_exit(tsk);
106         if (group_dead) {
107                 posix_cpu_timers_exit_group(tsk);
108         } else {
109                 /*
110                  * This can only happen if the caller is de_thread().
111                  * FIXME: this is the temporary hack, we should teach
112                  * posix-cpu-timers to handle this case correctly.
113                  */
114                 if (unlikely(has_group_leader_pid(tsk)))
115                         posix_cpu_timers_exit_group(tsk);
116         }
117 #endif
118
119         if (group_dead) {
120                 tty = sig->tty;
121                 sig->tty = NULL;
122         } else {
123                 /*
124                  * If there is any task waiting for the group exit
125                  * then notify it:
126                  */
127                 if (sig->notify_count > 0 && !--sig->notify_count)
128                         wake_up_process(sig->group_exit_task);
129
130                 if (tsk == sig->curr_target)
131                         sig->curr_target = next_thread(tsk);
132         }
133
134         add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
135                               sizeof(unsigned long long));
136
137         /*
138          * Accumulate here the counters for all threads as they die. We could
139          * skip the group leader because it is the last user of signal_struct,
140          * but we want to avoid the race with thread_group_cputime() which can
141          * see the empty ->thread_head list.
142          */
143         task_cputime(tsk, &utime, &stime);
144         write_seqlock(&sig->stats_lock);
145         sig->utime += utime;
146         sig->stime += stime;
147         sig->gtime += task_gtime(tsk);
148         sig->min_flt += tsk->min_flt;
149         sig->maj_flt += tsk->maj_flt;
150         sig->nvcsw += tsk->nvcsw;
151         sig->nivcsw += tsk->nivcsw;
152         sig->inblock += task_io_get_inblock(tsk);
153         sig->oublock += task_io_get_oublock(tsk);
154         task_io_accounting_add(&sig->ioac, &tsk->ioac);
155         sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
156         sig->nr_threads--;
157         __unhash_process(tsk, group_dead);
158         write_sequnlock(&sig->stats_lock);
159
160         /*
161          * Do this under ->siglock, we can race with another thread
162          * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
163          */
164         flush_sigqueue(&tsk->pending);
165         tsk->sighand = NULL;
166         spin_unlock(&sighand->siglock);
167
168         __cleanup_sighand(sighand);
169         clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
170         if (group_dead) {
171                 flush_sigqueue(&sig->shared_pending);
172                 tty_kref_put(tty);
173         }
174 }
175
176 static void delayed_put_task_struct(struct rcu_head *rhp)
177 {
178         struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
179
180         perf_event_delayed_put(tsk);
181         trace_sched_process_free(tsk);
182         put_task_struct(tsk);
183 }
184
185 void put_task_struct_rcu_user(struct task_struct *task)
186 {
187         if (refcount_dec_and_test(&task->rcu_users))
188                 call_rcu(&task->rcu, delayed_put_task_struct);
189 }
190
191 void release_task(struct task_struct *p)
192 {
193         struct task_struct *leader;
194         int zap_leader;
195 repeat:
196         /* don't need to get the RCU readlock here - the process is dead and
197          * can't be modifying its own credentials. But shut RCU-lockdep up */
198         rcu_read_lock();
199         atomic_dec(&__task_cred(p)->user->processes);
200         rcu_read_unlock();
201
202         proc_flush_task(p);
203         cgroup_release(p);
204
205         write_lock_irq(&tasklist_lock);
206         ptrace_release_task(p);
207         __exit_signal(p);
208
209         /*
210          * If we are the last non-leader member of the thread
211          * group, and the leader is zombie, then notify the
212          * group leader's parent process. (if it wants notification.)
213          */
214         zap_leader = 0;
215         leader = p->group_leader;
216         if (leader != p && thread_group_empty(leader)
217                         && leader->exit_state == EXIT_ZOMBIE) {
218                 /*
219                  * If we were the last child thread and the leader has
220                  * exited already, and the leader's parent ignores SIGCHLD,
221                  * then we are the one who should release the leader.
222                  */
223                 zap_leader = do_notify_parent(leader, leader->exit_signal);
224                 if (zap_leader)
225                         leader->exit_state = EXIT_DEAD;
226         }
227
228         write_unlock_irq(&tasklist_lock);
229         release_thread(p);
230         put_task_struct_rcu_user(p);
231
232         p = leader;
233         if (unlikely(zap_leader))
234                 goto repeat;
235 }
236
237 void rcuwait_wake_up(struct rcuwait *w)
238 {
239         struct task_struct *task;
240
241         rcu_read_lock();
242
243         /*
244          * Order condition vs @task, such that everything prior to the load
245          * of @task is visible. This is the condition as to why the user called
246          * rcuwait_trywake() in the first place. Pairs with set_current_state()
247          * barrier (A) in rcuwait_wait_event().
248          *
249          *    WAIT                WAKE
250          *    [S] tsk = current   [S] cond = true
251          *        MB (A)              MB (B)
252          *    [L] cond            [L] tsk
253          */
254         smp_mb(); /* (B) */
255
256         task = rcu_dereference(w->task);
257         if (task)
258                 wake_up_process(task);
259         rcu_read_unlock();
260 }
261
262 /*
263  * Determine if a process group is "orphaned", according to the POSIX
264  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
265  * by terminal-generated stop signals.  Newly orphaned process groups are
266  * to receive a SIGHUP and a SIGCONT.
267  *
268  * "I ask you, have you ever known what it is to be an orphan?"
269  */
270 static int will_become_orphaned_pgrp(struct pid *pgrp,
271                                         struct task_struct *ignored_task)
272 {
273         struct task_struct *p;
274
275         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
276                 if ((p == ignored_task) ||
277                     (p->exit_state && thread_group_empty(p)) ||
278                     is_global_init(p->real_parent))
279                         continue;
280
281                 if (task_pgrp(p->real_parent) != pgrp &&
282                     task_session(p->real_parent) == task_session(p))
283                         return 0;
284         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
285
286         return 1;
287 }
288
289 int is_current_pgrp_orphaned(void)
290 {
291         int retval;
292
293         read_lock(&tasklist_lock);
294         retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
295         read_unlock(&tasklist_lock);
296
297         return retval;
298 }
299
300 static bool has_stopped_jobs(struct pid *pgrp)
301 {
302         struct task_struct *p;
303
304         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
305                 if (p->signal->flags & SIGNAL_STOP_STOPPED)
306                         return true;
307         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
308
309         return false;
310 }
311
312 /*
313  * Check to see if any process groups have become orphaned as
314  * a result of our exiting, and if they have any stopped jobs,
315  * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
316  */
317 static void
318 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
319 {
320         struct pid *pgrp = task_pgrp(tsk);
321         struct task_struct *ignored_task = tsk;
322
323         if (!parent)
324                 /* exit: our father is in a different pgrp than
325                  * we are and we were the only connection outside.
326                  */
327                 parent = tsk->real_parent;
328         else
329                 /* reparent: our child is in a different pgrp than
330                  * we are, and it was the only connection outside.
331                  */
332                 ignored_task = NULL;
333
334         if (task_pgrp(parent) != pgrp &&
335             task_session(parent) == task_session(tsk) &&
336             will_become_orphaned_pgrp(pgrp, ignored_task) &&
337             has_stopped_jobs(pgrp)) {
338                 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
339                 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
340         }
341 }
342
343 #ifdef CONFIG_MEMCG
344 /*
345  * A task is exiting.   If it owned this mm, find a new owner for the mm.
346  */
347 void mm_update_next_owner(struct mm_struct *mm)
348 {
349         struct task_struct *c, *g, *p = current;
350
351 retry:
352         /*
353          * If the exiting or execing task is not the owner, it's
354          * someone else's problem.
355          */
356         if (mm->owner != p)
357                 return;
358         /*
359          * The current owner is exiting/execing and there are no other
360          * candidates.  Do not leave the mm pointing to a possibly
361          * freed task structure.
362          */
363         if (atomic_read(&mm->mm_users) <= 1) {
364                 WRITE_ONCE(mm->owner, NULL);
365                 return;
366         }
367
368         read_lock(&tasklist_lock);
369         /*
370          * Search in the children
371          */
372         list_for_each_entry(c, &p->children, sibling) {
373                 if (c->mm == mm)
374                         goto assign_new_owner;
375         }
376
377         /*
378          * Search in the siblings
379          */
380         list_for_each_entry(c, &p->real_parent->children, sibling) {
381                 if (c->mm == mm)
382                         goto assign_new_owner;
383         }
384
385         /*
386          * Search through everything else, we should not get here often.
387          */
388         for_each_process(g) {
389                 if (g->flags & PF_KTHREAD)
390                         continue;
391                 for_each_thread(g, c) {
392                         if (c->mm == mm)
393                                 goto assign_new_owner;
394                         if (c->mm)
395                                 break;
396                 }
397         }
398         read_unlock(&tasklist_lock);
399         /*
400          * We found no owner yet mm_users > 1: this implies that we are
401          * most likely racing with swapoff (try_to_unuse()) or /proc or
402          * ptrace or page migration (get_task_mm()).  Mark owner as NULL.
403          */
404         WRITE_ONCE(mm->owner, NULL);
405         return;
406
407 assign_new_owner:
408         BUG_ON(c == p);
409         get_task_struct(c);
410         /*
411          * The task_lock protects c->mm from changing.
412          * We always want mm->owner->mm == mm
413          */
414         task_lock(c);
415         /*
416          * Delay read_unlock() till we have the task_lock()
417          * to ensure that c does not slip away underneath us
418          */
419         read_unlock(&tasklist_lock);
420         if (c->mm != mm) {
421                 task_unlock(c);
422                 put_task_struct(c);
423                 goto retry;
424         }
425         WRITE_ONCE(mm->owner, c);
426         task_unlock(c);
427         put_task_struct(c);
428 }
429 #endif /* CONFIG_MEMCG */
430
431 /*
432  * Turn us into a lazy TLB process if we
433  * aren't already..
434  */
435 static void exit_mm(void)
436 {
437         struct mm_struct *mm = current->mm;
438         struct core_state *core_state;
439
440         mm_release(current, mm);
441         if (!mm)
442                 return;
443         sync_mm_rss(mm);
444         /*
445          * Serialize with any possible pending coredump.
446          * We must hold mmap_sem around checking core_state
447          * and clearing tsk->mm.  The core-inducing thread
448          * will increment ->nr_threads for each thread in the
449          * group with ->mm != NULL.
450          */
451         down_read(&mm->mmap_sem);
452         core_state = mm->core_state;
453         if (core_state) {
454                 struct core_thread self;
455
456                 up_read(&mm->mmap_sem);
457
458                 self.task = current;
459                 self.next = xchg(&core_state->dumper.next, &self);
460                 /*
461                  * Implies mb(), the result of xchg() must be visible
462                  * to core_state->dumper.
463                  */
464                 if (atomic_dec_and_test(&core_state->nr_threads))
465                         complete(&core_state->startup);
466
467                 for (;;) {
468                         set_current_state(TASK_UNINTERRUPTIBLE);
469                         if (!self.task) /* see coredump_finish() */
470                                 break;
471                         freezable_schedule();
472                 }
473                 __set_current_state(TASK_RUNNING);
474                 down_read(&mm->mmap_sem);
475         }
476         mmgrab(mm);
477         BUG_ON(mm != current->active_mm);
478         /* more a memory barrier than a real lock */
479         task_lock(current);
480         current->mm = NULL;
481         up_read(&mm->mmap_sem);
482         enter_lazy_tlb(mm, current);
483         task_unlock(current);
484         mm_update_next_owner(mm);
485         mmput(mm);
486         if (test_thread_flag(TIF_MEMDIE))
487                 exit_oom_victim();
488 }
489
490 static struct task_struct *find_alive_thread(struct task_struct *p)
491 {
492         struct task_struct *t;
493
494         for_each_thread(p, t) {
495                 if (!(t->flags & PF_EXITING))
496                         return t;
497         }
498         return NULL;
499 }
500
501 static struct task_struct *find_child_reaper(struct task_struct *father,
502                                                 struct list_head *dead)
503         __releases(&tasklist_lock)
504         __acquires(&tasklist_lock)
505 {
506         struct pid_namespace *pid_ns = task_active_pid_ns(father);
507         struct task_struct *reaper = pid_ns->child_reaper;
508         struct task_struct *p, *n;
509
510         if (likely(reaper != father))
511                 return reaper;
512
513         reaper = find_alive_thread(father);
514         if (reaper) {
515                 pid_ns->child_reaper = reaper;
516                 return reaper;
517         }
518
519         write_unlock_irq(&tasklist_lock);
520         if (unlikely(pid_ns == &init_pid_ns)) {
521                 panic("Attempted to kill init! exitcode=0x%08x\n",
522                         father->signal->group_exit_code ?: father->exit_code);
523         }
524
525         list_for_each_entry_safe(p, n, dead, ptrace_entry) {
526                 list_del_init(&p->ptrace_entry);
527                 release_task(p);
528         }
529
530         zap_pid_ns_processes(pid_ns);
531         write_lock_irq(&tasklist_lock);
532
533         return father;
534 }
535
536 /*
537  * When we die, we re-parent all our children, and try to:
538  * 1. give them to another thread in our thread group, if such a member exists
539  * 2. give it to the first ancestor process which prctl'd itself as a
540  *    child_subreaper for its children (like a service manager)
541  * 3. give it to the init process (PID 1) in our pid namespace
542  */
543 static struct task_struct *find_new_reaper(struct task_struct *father,
544                                            struct task_struct *child_reaper)
545 {
546         struct task_struct *thread, *reaper;
547
548         thread = find_alive_thread(father);
549         if (thread)
550                 return thread;
551
552         if (father->signal->has_child_subreaper) {
553                 unsigned int ns_level = task_pid(father)->level;
554                 /*
555                  * Find the first ->is_child_subreaper ancestor in our pid_ns.
556                  * We can't check reaper != child_reaper to ensure we do not
557                  * cross the namespaces, the exiting parent could be injected
558                  * by setns() + fork().
559                  * We check pid->level, this is slightly more efficient than
560                  * task_active_pid_ns(reaper) != task_active_pid_ns(father).
561                  */
562                 for (reaper = father->real_parent;
563                      task_pid(reaper)->level == ns_level;
564                      reaper = reaper->real_parent) {
565                         if (reaper == &init_task)
566                                 break;
567                         if (!reaper->signal->is_child_subreaper)
568                                 continue;
569                         thread = find_alive_thread(reaper);
570                         if (thread)
571                                 return thread;
572                 }
573         }
574
575         return child_reaper;
576 }
577
578 /*
579 * Any that need to be release_task'd are put on the @dead list.
580  */
581 static void reparent_leader(struct task_struct *father, struct task_struct *p,
582                                 struct list_head *dead)
583 {
584         if (unlikely(p->exit_state == EXIT_DEAD))
585                 return;
586
587         /* We don't want people slaying init. */
588         p->exit_signal = SIGCHLD;
589
590         /* If it has exited notify the new parent about this child's death. */
591         if (!p->ptrace &&
592             p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
593                 if (do_notify_parent(p, p->exit_signal)) {
594                         p->exit_state = EXIT_DEAD;
595                         list_add(&p->ptrace_entry, dead);
596                 }
597         }
598
599         kill_orphaned_pgrp(p, father);
600 }
601
602 /*
603  * This does two things:
604  *
605  * A.  Make init inherit all the child processes
606  * B.  Check to see if any process groups have become orphaned
607  *      as a result of our exiting, and if they have any stopped
608  *      jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
609  */
610 static void forget_original_parent(struct task_struct *father,
611                                         struct list_head *dead)
612 {
613         struct task_struct *p, *t, *reaper;
614
615         if (unlikely(!list_empty(&father->ptraced)))
616                 exit_ptrace(father, dead);
617
618         /* Can drop and reacquire tasklist_lock */
619         reaper = find_child_reaper(father, dead);
620         if (list_empty(&father->children))
621                 return;
622
623         reaper = find_new_reaper(father, reaper);
624         list_for_each_entry(p, &father->children, sibling) {
625                 for_each_thread(p, t) {
626                         t->real_parent = reaper;
627                         BUG_ON((!t->ptrace) != (t->parent == father));
628                         if (likely(!t->ptrace))
629                                 t->parent = t->real_parent;
630                         if (t->pdeath_signal)
631                                 group_send_sig_info(t->pdeath_signal,
632                                                     SEND_SIG_NOINFO, t,
633                                                     PIDTYPE_TGID);
634                 }
635                 /*
636                  * If this is a threaded reparent there is no need to
637                  * notify anyone anything has happened.
638                  */
639                 if (!same_thread_group(reaper, father))
640                         reparent_leader(father, p, dead);
641         }
642         list_splice_tail_init(&father->children, &reaper->children);
643 }
644
645 /*
646  * Send signals to all our closest relatives so that they know
647  * to properly mourn us..
648  */
649 static void exit_notify(struct task_struct *tsk, int group_dead)
650 {
651         bool autoreap;
652         struct task_struct *p, *n;
653         LIST_HEAD(dead);
654
655         write_lock_irq(&tasklist_lock);
656         forget_original_parent(tsk, &dead);
657
658         if (group_dead)
659                 kill_orphaned_pgrp(tsk->group_leader, NULL);
660
661         tsk->exit_state = EXIT_ZOMBIE;
662         if (unlikely(tsk->ptrace)) {
663                 int sig = thread_group_leader(tsk) &&
664                                 thread_group_empty(tsk) &&
665                                 !ptrace_reparented(tsk) ?
666                         tsk->exit_signal : SIGCHLD;
667                 autoreap = do_notify_parent(tsk, sig);
668         } else if (thread_group_leader(tsk)) {
669                 autoreap = thread_group_empty(tsk) &&
670                         do_notify_parent(tsk, tsk->exit_signal);
671         } else {
672                 autoreap = true;
673         }
674
675         if (autoreap) {
676                 tsk->exit_state = EXIT_DEAD;
677                 list_add(&tsk->ptrace_entry, &dead);
678         }
679
680         /* mt-exec, de_thread() is waiting for group leader */
681         if (unlikely(tsk->signal->notify_count < 0))
682                 wake_up_process(tsk->signal->group_exit_task);
683         write_unlock_irq(&tasklist_lock);
684
685         list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
686                 list_del_init(&p->ptrace_entry);
687                 release_task(p);
688         }
689 }
690
691 #ifdef CONFIG_DEBUG_STACK_USAGE
692 static void check_stack_usage(void)
693 {
694         static DEFINE_SPINLOCK(low_water_lock);
695         static int lowest_to_date = THREAD_SIZE;
696         unsigned long free;
697
698         free = stack_not_used(current);
699
700         if (free >= lowest_to_date)
701                 return;
702
703         spin_lock(&low_water_lock);
704         if (free < lowest_to_date) {
705                 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
706                         current->comm, task_pid_nr(current), free);
707                 lowest_to_date = free;
708         }
709         spin_unlock(&low_water_lock);
710 }
711 #else
712 static inline void check_stack_usage(void) {}
713 #endif
714
715 void __noreturn do_exit(long code)
716 {
717         struct task_struct *tsk = current;
718         int group_dead;
719
720         profile_task_exit(tsk);
721         kcov_task_exit(tsk);
722
723         WARN_ON(blk_needs_flush_plug(tsk));
724
725         if (unlikely(in_interrupt()))
726                 panic("Aiee, killing interrupt handler!");
727         if (unlikely(!tsk->pid))
728                 panic("Attempted to kill the idle task!");
729
730         /*
731          * If do_exit is called because this processes oopsed, it's possible
732          * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
733          * continuing. Amongst other possible reasons, this is to prevent
734          * mm_release()->clear_child_tid() from writing to a user-controlled
735          * kernel address.
736          */
737         set_fs(USER_DS);
738
739         ptrace_event(PTRACE_EVENT_EXIT, code);
740
741         validate_creds_for_do_exit(tsk);
742
743         /*
744          * We're taking recursive faults here in do_exit. Safest is to just
745          * leave this task alone and wait for reboot.
746          */
747         if (unlikely(tsk->flags & PF_EXITING)) {
748                 pr_alert("Fixing recursive fault but reboot is needed!\n");
749                 futex_exit_done(tsk);
750                 set_current_state(TASK_UNINTERRUPTIBLE);
751                 schedule();
752         }
753
754         exit_signals(tsk);  /* sets PF_EXITING */
755         /*
756          * Ensure that all new tsk->pi_lock acquisitions must observe
757          * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
758          */
759         smp_mb();
760         /*
761          * Ensure that we must observe the pi_state in exit_mm() ->
762          * mm_release() -> exit_pi_state_list().
763          */
764         raw_spin_lock_irq(&tsk->pi_lock);
765         raw_spin_unlock_irq(&tsk->pi_lock);
766
767         if (unlikely(in_atomic())) {
768                 pr_info("note: %s[%d] exited with preempt_count %d\n",
769                         current->comm, task_pid_nr(current),
770                         preempt_count());
771                 preempt_count_set(PREEMPT_ENABLED);
772         }
773
774         /* sync mm's RSS info before statistics gathering */
775         if (tsk->mm)
776                 sync_mm_rss(tsk->mm);
777         acct_update_integrals(tsk);
778         group_dead = atomic_dec_and_test(&tsk->signal->live);
779         if (group_dead) {
780 #ifdef CONFIG_POSIX_TIMERS
781                 hrtimer_cancel(&tsk->signal->real_timer);
782                 exit_itimers(tsk->signal);
783 #endif
784                 if (tsk->mm)
785                         setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
786         }
787         acct_collect(code, group_dead);
788         if (group_dead)
789                 tty_audit_exit();
790         audit_free(tsk);
791
792         tsk->exit_code = code;
793         taskstats_exit(tsk, group_dead);
794
795         exit_mm();
796
797         if (group_dead)
798                 acct_process();
799         trace_sched_process_exit(tsk);
800
801         exit_sem(tsk);
802         exit_shm(tsk);
803         exit_files(tsk);
804         exit_fs(tsk);
805         if (group_dead)
806                 disassociate_ctty(1);
807         exit_task_namespaces(tsk);
808         exit_task_work(tsk);
809         exit_thread(tsk);
810         exit_umh(tsk);
811
812         /*
813          * Flush inherited counters to the parent - before the parent
814          * gets woken up by child-exit notifications.
815          *
816          * because of cgroup mode, must be called before cgroup_exit()
817          */
818         perf_event_exit_task(tsk);
819
820         sched_autogroup_exit_task(tsk);
821         cgroup_exit(tsk);
822
823         /*
824          * FIXME: do that only when needed, using sched_exit tracepoint
825          */
826         flush_ptrace_hw_breakpoint(tsk);
827
828         exit_tasks_rcu_start();
829         exit_notify(tsk, group_dead);
830         proc_exit_connector(tsk);
831         mpol_put_task_policy(tsk);
832 #ifdef CONFIG_FUTEX
833         if (unlikely(current->pi_state_cache))
834                 kfree(current->pi_state_cache);
835 #endif
836         /*
837          * Make sure we are holding no locks:
838          */
839         debug_check_no_locks_held();
840         futex_exit_done(tsk);
841
842         if (tsk->io_context)
843                 exit_io_context(tsk);
844
845         if (tsk->splice_pipe)
846                 free_pipe_info(tsk->splice_pipe);
847
848         if (tsk->task_frag.page)
849                 put_page(tsk->task_frag.page);
850
851         validate_creds_for_do_exit(tsk);
852
853         check_stack_usage();
854         preempt_disable();
855         if (tsk->nr_dirtied)
856                 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
857         exit_rcu();
858         exit_tasks_rcu_finish();
859
860         lockdep_free_task(tsk);
861         do_task_dead();
862 }
863 EXPORT_SYMBOL_GPL(do_exit);
864
865 void complete_and_exit(struct completion *comp, long code)
866 {
867         if (comp)
868                 complete(comp);
869
870         do_exit(code);
871 }
872 EXPORT_SYMBOL(complete_and_exit);
873
874 SYSCALL_DEFINE1(exit, int, error_code)
875 {
876         do_exit((error_code&0xff)<<8);
877 }
878
879 /*
880  * Take down every thread in the group.  This is called by fatal signals
881  * as well as by sys_exit_group (below).
882  */
883 void
884 do_group_exit(int exit_code)
885 {
886         struct signal_struct *sig = current->signal;
887
888         BUG_ON(exit_code & 0x80); /* core dumps don't get here */
889
890         if (signal_group_exit(sig))
891                 exit_code = sig->group_exit_code;
892         else if (!thread_group_empty(current)) {
893                 struct sighand_struct *const sighand = current->sighand;
894
895                 spin_lock_irq(&sighand->siglock);
896                 if (signal_group_exit(sig))
897                         /* Another thread got here before we took the lock.  */
898                         exit_code = sig->group_exit_code;
899                 else {
900                         sig->group_exit_code = exit_code;
901                         sig->flags = SIGNAL_GROUP_EXIT;
902                         zap_other_threads(current);
903                 }
904                 spin_unlock_irq(&sighand->siglock);
905         }
906
907         do_exit(exit_code);
908         /* NOTREACHED */
909 }
910
911 /*
912  * this kills every thread in the thread group. Note that any externally
913  * wait4()-ing process will get the correct exit code - even if this
914  * thread is not the thread group leader.
915  */
916 SYSCALL_DEFINE1(exit_group, int, error_code)
917 {
918         do_group_exit((error_code & 0xff) << 8);
919         /* NOTREACHED */
920         return 0;
921 }
922
923 struct waitid_info {
924         pid_t pid;
925         uid_t uid;
926         int status;
927         int cause;
928 };
929
930 struct wait_opts {
931         enum pid_type           wo_type;
932         int                     wo_flags;
933         struct pid              *wo_pid;
934
935         struct waitid_info      *wo_info;
936         int                     wo_stat;
937         struct rusage           *wo_rusage;
938
939         wait_queue_entry_t              child_wait;
940         int                     notask_error;
941 };
942
943 static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
944 {
945         return  wo->wo_type == PIDTYPE_MAX ||
946                 task_pid_type(p, wo->wo_type) == wo->wo_pid;
947 }
948
949 static int
950 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
951 {
952         if (!eligible_pid(wo, p))
953                 return 0;
954
955         /*
956          * Wait for all children (clone and not) if __WALL is set or
957          * if it is traced by us.
958          */
959         if (ptrace || (wo->wo_flags & __WALL))
960                 return 1;
961
962         /*
963          * Otherwise, wait for clone children *only* if __WCLONE is set;
964          * otherwise, wait for non-clone children *only*.
965          *
966          * Note: a "clone" child here is one that reports to its parent
967          * using a signal other than SIGCHLD, or a non-leader thread which
968          * we can only see if it is traced by us.
969          */
970         if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
971                 return 0;
972
973         return 1;
974 }
975
976 /*
977  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
978  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
979  * the lock and this task is uninteresting.  If we return nonzero, we have
980  * released the lock and the system call should return.
981  */
982 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
983 {
984         int state, status;
985         pid_t pid = task_pid_vnr(p);
986         uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
987         struct waitid_info *infop;
988
989         if (!likely(wo->wo_flags & WEXITED))
990                 return 0;
991
992         if (unlikely(wo->wo_flags & WNOWAIT)) {
993                 status = p->exit_code;
994                 get_task_struct(p);
995                 read_unlock(&tasklist_lock);
996                 sched_annotate_sleep();
997                 if (wo->wo_rusage)
998                         getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
999                 put_task_struct(p);
1000                 goto out_info;
1001         }
1002         /*
1003          * Move the task's state to DEAD/TRACE, only one thread can do this.
1004          */
1005         state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1006                 EXIT_TRACE : EXIT_DEAD;
1007         if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1008                 return 0;
1009         /*
1010          * We own this thread, nobody else can reap it.
1011          */
1012         read_unlock(&tasklist_lock);
1013         sched_annotate_sleep();
1014
1015         /*
1016          * Check thread_group_leader() to exclude the traced sub-threads.
1017          */
1018         if (state == EXIT_DEAD && thread_group_leader(p)) {
1019                 struct signal_struct *sig = p->signal;
1020                 struct signal_struct *psig = current->signal;
1021                 unsigned long maxrss;
1022                 u64 tgutime, tgstime;
1023
1024                 /*
1025                  * The resource counters for the group leader are in its
1026                  * own task_struct.  Those for dead threads in the group
1027                  * are in its signal_struct, as are those for the child
1028                  * processes it has previously reaped.  All these
1029                  * accumulate in the parent's signal_struct c* fields.
1030                  *
1031                  * We don't bother to take a lock here to protect these
1032                  * p->signal fields because the whole thread group is dead
1033                  * and nobody can change them.
1034                  *
1035                  * psig->stats_lock also protects us from our sub-theads
1036                  * which can reap other children at the same time. Until
1037                  * we change k_getrusage()-like users to rely on this lock
1038                  * we have to take ->siglock as well.
1039                  *
1040                  * We use thread_group_cputime_adjusted() to get times for
1041                  * the thread group, which consolidates times for all threads
1042                  * in the group including the group leader.
1043                  */
1044                 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1045                 spin_lock_irq(&current->sighand->siglock);
1046                 write_seqlock(&psig->stats_lock);
1047                 psig->cutime += tgutime + sig->cutime;
1048                 psig->cstime += tgstime + sig->cstime;
1049                 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1050                 psig->cmin_flt +=
1051                         p->min_flt + sig->min_flt + sig->cmin_flt;
1052                 psig->cmaj_flt +=
1053                         p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1054                 psig->cnvcsw +=
1055                         p->nvcsw + sig->nvcsw + sig->cnvcsw;
1056                 psig->cnivcsw +=
1057                         p->nivcsw + sig->nivcsw + sig->cnivcsw;
1058                 psig->cinblock +=
1059                         task_io_get_inblock(p) +
1060                         sig->inblock + sig->cinblock;
1061                 psig->coublock +=
1062                         task_io_get_oublock(p) +
1063                         sig->oublock + sig->coublock;
1064                 maxrss = max(sig->maxrss, sig->cmaxrss);
1065                 if (psig->cmaxrss < maxrss)
1066                         psig->cmaxrss = maxrss;
1067                 task_io_accounting_add(&psig->ioac, &p->ioac);
1068                 task_io_accounting_add(&psig->ioac, &sig->ioac);
1069                 write_sequnlock(&psig->stats_lock);
1070                 spin_unlock_irq(&current->sighand->siglock);
1071         }
1072
1073         if (wo->wo_rusage)
1074                 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1075         status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1076                 ? p->signal->group_exit_code : p->exit_code;
1077         wo->wo_stat = status;
1078
1079         if (state == EXIT_TRACE) {
1080                 write_lock_irq(&tasklist_lock);
1081                 /* We dropped tasklist, ptracer could die and untrace */
1082                 ptrace_unlink(p);
1083
1084                 /* If parent wants a zombie, don't release it now */
1085                 state = EXIT_ZOMBIE;
1086                 if (do_notify_parent(p, p->exit_signal))
1087                         state = EXIT_DEAD;
1088                 p->exit_state = state;
1089                 write_unlock_irq(&tasklist_lock);
1090         }
1091         if (state == EXIT_DEAD)
1092                 release_task(p);
1093
1094 out_info:
1095         infop = wo->wo_info;
1096         if (infop) {
1097                 if ((status & 0x7f) == 0) {
1098                         infop->cause = CLD_EXITED;
1099                         infop->status = status >> 8;
1100                 } else {
1101                         infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1102                         infop->status = status & 0x7f;
1103                 }
1104                 infop->pid = pid;
1105                 infop->uid = uid;
1106         }
1107
1108         return pid;
1109 }
1110
1111 static int *task_stopped_code(struct task_struct *p, bool ptrace)
1112 {
1113         if (ptrace) {
1114                 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
1115                         return &p->exit_code;
1116         } else {
1117                 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1118                         return &p->signal->group_exit_code;
1119         }
1120         return NULL;
1121 }
1122
1123 /**
1124  * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1125  * @wo: wait options
1126  * @ptrace: is the wait for ptrace
1127  * @p: task to wait for
1128  *
1129  * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1130  *
1131  * CONTEXT:
1132  * read_lock(&tasklist_lock), which is released if return value is
1133  * non-zero.  Also, grabs and releases @p->sighand->siglock.
1134  *
1135  * RETURNS:
1136  * 0 if wait condition didn't exist and search for other wait conditions
1137  * should continue.  Non-zero return, -errno on failure and @p's pid on
1138  * success, implies that tasklist_lock is released and wait condition
1139  * search should terminate.
1140  */
1141 static int wait_task_stopped(struct wait_opts *wo,
1142                                 int ptrace, struct task_struct *p)
1143 {
1144         struct waitid_info *infop;
1145         int exit_code, *p_code, why;
1146         uid_t uid = 0; /* unneeded, required by compiler */
1147         pid_t pid;
1148
1149         /*
1150          * Traditionally we see ptrace'd stopped tasks regardless of options.
1151          */
1152         if (!ptrace && !(wo->wo_flags & WUNTRACED))
1153                 return 0;
1154
1155         if (!task_stopped_code(p, ptrace))
1156                 return 0;
1157
1158         exit_code = 0;
1159         spin_lock_irq(&p->sighand->siglock);
1160
1161         p_code = task_stopped_code(p, ptrace);
1162         if (unlikely(!p_code))
1163                 goto unlock_sig;
1164
1165         exit_code = *p_code;
1166         if (!exit_code)
1167                 goto unlock_sig;
1168
1169         if (!unlikely(wo->wo_flags & WNOWAIT))
1170                 *p_code = 0;
1171
1172         uid = from_kuid_munged(current_user_ns(), task_uid(p));
1173 unlock_sig:
1174         spin_unlock_irq(&p->sighand->siglock);
1175         if (!exit_code)
1176                 return 0;
1177
1178         /*
1179          * Now we are pretty sure this task is interesting.
1180          * Make sure it doesn't get reaped out from under us while we
1181          * give up the lock and then examine it below.  We don't want to
1182          * keep holding onto the tasklist_lock while we call getrusage and
1183          * possibly take page faults for user memory.
1184          */
1185         get_task_struct(p);
1186         pid = task_pid_vnr(p);
1187         why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1188         read_unlock(&tasklist_lock);
1189         sched_annotate_sleep();
1190         if (wo->wo_rusage)
1191                 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1192         put_task_struct(p);
1193
1194         if (likely(!(wo->wo_flags & WNOWAIT)))
1195                 wo->wo_stat = (exit_code << 8) | 0x7f;
1196
1197         infop = wo->wo_info;
1198         if (infop) {
1199                 infop->cause = why;
1200                 infop->status = exit_code;
1201                 infop->pid = pid;
1202                 infop->uid = uid;
1203         }
1204         return pid;
1205 }
1206
1207 /*
1208  * Handle do_wait work for one task in a live, non-stopped state.
1209  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1210  * the lock and this task is uninteresting.  If we return nonzero, we have
1211  * released the lock and the system call should return.
1212  */
1213 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1214 {
1215         struct waitid_info *infop;
1216         pid_t pid;
1217         uid_t uid;
1218
1219         if (!unlikely(wo->wo_flags & WCONTINUED))
1220                 return 0;
1221
1222         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1223                 return 0;
1224
1225         spin_lock_irq(&p->sighand->siglock);
1226         /* Re-check with the lock held.  */
1227         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1228                 spin_unlock_irq(&p->sighand->siglock);
1229                 return 0;
1230         }
1231         if (!unlikely(wo->wo_flags & WNOWAIT))
1232                 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1233         uid = from_kuid_munged(current_user_ns(), task_uid(p));
1234         spin_unlock_irq(&p->sighand->siglock);
1235
1236         pid = task_pid_vnr(p);
1237         get_task_struct(p);
1238         read_unlock(&tasklist_lock);
1239         sched_annotate_sleep();
1240         if (wo->wo_rusage)
1241                 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1242         put_task_struct(p);
1243
1244         infop = wo->wo_info;
1245         if (!infop) {
1246                 wo->wo_stat = 0xffff;
1247         } else {
1248                 infop->cause = CLD_CONTINUED;
1249                 infop->pid = pid;
1250                 infop->uid = uid;
1251                 infop->status = SIGCONT;
1252         }
1253         return pid;
1254 }
1255
1256 /*
1257  * Consider @p for a wait by @parent.
1258  *
1259  * -ECHILD should be in ->notask_error before the first call.
1260  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1261  * Returns zero if the search for a child should continue;
1262  * then ->notask_error is 0 if @p is an eligible child,
1263  * or still -ECHILD.
1264  */
1265 static int wait_consider_task(struct wait_opts *wo, int ptrace,
1266                                 struct task_struct *p)
1267 {
1268         /*
1269          * We can race with wait_task_zombie() from another thread.
1270          * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1271          * can't confuse the checks below.
1272          */
1273         int exit_state = READ_ONCE(p->exit_state);
1274         int ret;
1275
1276         if (unlikely(exit_state == EXIT_DEAD))
1277                 return 0;
1278
1279         ret = eligible_child(wo, ptrace, p);
1280         if (!ret)
1281                 return ret;
1282
1283         if (unlikely(exit_state == EXIT_TRACE)) {
1284                 /*
1285                  * ptrace == 0 means we are the natural parent. In this case
1286                  * we should clear notask_error, debugger will notify us.
1287                  */
1288                 if (likely(!ptrace))
1289                         wo->notask_error = 0;
1290                 return 0;
1291         }
1292
1293         if (likely(!ptrace) && unlikely(p->ptrace)) {
1294                 /*
1295                  * If it is traced by its real parent's group, just pretend
1296                  * the caller is ptrace_do_wait() and reap this child if it
1297                  * is zombie.
1298                  *
1299                  * This also hides group stop state from real parent; otherwise
1300                  * a single stop can be reported twice as group and ptrace stop.
1301                  * If a ptracer wants to distinguish these two events for its
1302                  * own children it should create a separate process which takes
1303                  * the role of real parent.
1304                  */
1305                 if (!ptrace_reparented(p))
1306                         ptrace = 1;
1307         }
1308
1309         /* slay zombie? */
1310         if (exit_state == EXIT_ZOMBIE) {
1311                 /* we don't reap group leaders with subthreads */
1312                 if (!delay_group_leader(p)) {
1313                         /*
1314                          * A zombie ptracee is only visible to its ptracer.
1315                          * Notification and reaping will be cascaded to the
1316                          * real parent when the ptracer detaches.
1317                          */
1318                         if (unlikely(ptrace) || likely(!p->ptrace))
1319                                 return wait_task_zombie(wo, p);
1320                 }
1321
1322                 /*
1323                  * Allow access to stopped/continued state via zombie by
1324                  * falling through.  Clearing of notask_error is complex.
1325                  *
1326                  * When !@ptrace:
1327                  *
1328                  * If WEXITED is set, notask_error should naturally be
1329                  * cleared.  If not, subset of WSTOPPED|WCONTINUED is set,
1330                  * so, if there are live subthreads, there are events to
1331                  * wait for.  If all subthreads are dead, it's still safe
1332                  * to clear - this function will be called again in finite
1333                  * amount time once all the subthreads are released and
1334                  * will then return without clearing.
1335                  *
1336                  * When @ptrace:
1337                  *
1338                  * Stopped state is per-task and thus can't change once the
1339                  * target task dies.  Only continued and exited can happen.
1340                  * Clear notask_error if WCONTINUED | WEXITED.
1341                  */
1342                 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1343                         wo->notask_error = 0;
1344         } else {
1345                 /*
1346                  * @p is alive and it's gonna stop, continue or exit, so
1347                  * there always is something to wait for.
1348                  */
1349                 wo->notask_error = 0;
1350         }
1351
1352         /*
1353          * Wait for stopped.  Depending on @ptrace, different stopped state
1354          * is used and the two don't interact with each other.
1355          */
1356         ret = wait_task_stopped(wo, ptrace, p);
1357         if (ret)
1358                 return ret;
1359
1360         /*
1361          * Wait for continued.  There's only one continued state and the
1362          * ptracer can consume it which can confuse the real parent.  Don't
1363          * use WCONTINUED from ptracer.  You don't need or want it.
1364          */
1365         return wait_task_continued(wo, p);
1366 }
1367
1368 /*
1369  * Do the work of do_wait() for one thread in the group, @tsk.
1370  *
1371  * -ECHILD should be in ->notask_error before the first call.
1372  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1373  * Returns zero if the search for a child should continue; then
1374  * ->notask_error is 0 if there were any eligible children,
1375  * or still -ECHILD.
1376  */
1377 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1378 {
1379         struct task_struct *p;
1380
1381         list_for_each_entry(p, &tsk->children, sibling) {
1382                 int ret = wait_consider_task(wo, 0, p);
1383
1384                 if (ret)
1385                         return ret;
1386         }
1387
1388         return 0;
1389 }
1390
1391 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1392 {
1393         struct task_struct *p;
1394
1395         list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1396                 int ret = wait_consider_task(wo, 1, p);
1397
1398                 if (ret)
1399                         return ret;
1400         }
1401
1402         return 0;
1403 }
1404
1405 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
1406                                 int sync, void *key)
1407 {
1408         struct wait_opts *wo = container_of(wait, struct wait_opts,
1409                                                 child_wait);
1410         struct task_struct *p = key;
1411
1412         if (!eligible_pid(wo, p))
1413                 return 0;
1414
1415         if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1416                 return 0;
1417
1418         return default_wake_function(wait, mode, sync, key);
1419 }
1420
1421 void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1422 {
1423         __wake_up_sync_key(&parent->signal->wait_chldexit,
1424                                 TASK_INTERRUPTIBLE, 1, p);
1425 }
1426
1427 static long do_wait(struct wait_opts *wo)
1428 {
1429         struct task_struct *tsk;
1430         int retval;
1431
1432         trace_sched_process_wait(wo->wo_pid);
1433
1434         init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1435         wo->child_wait.private = current;
1436         add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1437 repeat:
1438         /*
1439          * If there is nothing that can match our criteria, just get out.
1440          * We will clear ->notask_error to zero if we see any child that
1441          * might later match our criteria, even if we are not able to reap
1442          * it yet.
1443          */
1444         wo->notask_error = -ECHILD;
1445         if ((wo->wo_type < PIDTYPE_MAX) &&
1446            (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
1447                 goto notask;
1448
1449         set_current_state(TASK_INTERRUPTIBLE);
1450         read_lock(&tasklist_lock);
1451         tsk = current;
1452         do {
1453                 retval = do_wait_thread(wo, tsk);
1454                 if (retval)
1455                         goto end;
1456
1457                 retval = ptrace_do_wait(wo, tsk);
1458                 if (retval)
1459                         goto end;
1460
1461                 if (wo->wo_flags & __WNOTHREAD)
1462                         break;
1463         } while_each_thread(current, tsk);
1464         read_unlock(&tasklist_lock);
1465
1466 notask:
1467         retval = wo->notask_error;
1468         if (!retval && !(wo->wo_flags & WNOHANG)) {
1469                 retval = -ERESTARTSYS;
1470                 if (!signal_pending(current)) {
1471                         schedule();
1472                         goto repeat;
1473                 }
1474         }
1475 end:
1476         __set_current_state(TASK_RUNNING);
1477         remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1478         return retval;
1479 }
1480
1481 static struct pid *pidfd_get_pid(unsigned int fd)
1482 {
1483         struct fd f;
1484         struct pid *pid;
1485
1486         f = fdget(fd);
1487         if (!f.file)
1488                 return ERR_PTR(-EBADF);
1489
1490         pid = pidfd_pid(f.file);
1491         if (!IS_ERR(pid))
1492                 get_pid(pid);
1493
1494         fdput(f);
1495         return pid;
1496 }
1497
1498 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
1499                           int options, struct rusage *ru)
1500 {
1501         struct wait_opts wo;
1502         struct pid *pid = NULL;
1503         enum pid_type type;
1504         long ret;
1505
1506         if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1507                         __WNOTHREAD|__WCLONE|__WALL))
1508                 return -EINVAL;
1509         if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1510                 return -EINVAL;
1511
1512         switch (which) {
1513         case P_ALL:
1514                 type = PIDTYPE_MAX;
1515                 break;
1516         case P_PID:
1517                 type = PIDTYPE_PID;
1518                 if (upid <= 0)
1519                         return -EINVAL;
1520
1521                 pid = find_get_pid(upid);
1522                 break;
1523         case P_PGID:
1524                 type = PIDTYPE_PGID;
1525                 if (upid < 0)
1526                         return -EINVAL;
1527
1528                 if (upid)
1529                         pid = find_get_pid(upid);
1530                 else
1531                         pid = get_task_pid(current, PIDTYPE_PGID);
1532                 break;
1533         case P_PIDFD:
1534                 type = PIDTYPE_PID;
1535                 if (upid < 0)
1536                         return -EINVAL;
1537
1538                 pid = pidfd_get_pid(upid);
1539                 if (IS_ERR(pid))
1540                         return PTR_ERR(pid);
1541                 break;
1542         default:
1543                 return -EINVAL;
1544         }
1545
1546         wo.wo_type      = type;
1547         wo.wo_pid       = pid;
1548         wo.wo_flags     = options;
1549         wo.wo_info      = infop;
1550         wo.wo_rusage    = ru;
1551         ret = do_wait(&wo);
1552
1553         put_pid(pid);
1554         return ret;
1555 }
1556
1557 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1558                 infop, int, options, struct rusage __user *, ru)
1559 {
1560         struct rusage r;
1561         struct waitid_info info = {.status = 0};
1562         long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
1563         int signo = 0;
1564
1565         if (err > 0) {
1566                 signo = SIGCHLD;
1567                 err = 0;
1568                 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1569                         return -EFAULT;
1570         }
1571         if (!infop)
1572                 return err;
1573
1574         if (!user_access_begin(infop, sizeof(*infop)))
1575                 return -EFAULT;
1576
1577         unsafe_put_user(signo, &infop->si_signo, Efault);
1578         unsafe_put_user(0, &infop->si_errno, Efault);
1579         unsafe_put_user(info.cause, &infop->si_code, Efault);
1580         unsafe_put_user(info.pid, &infop->si_pid, Efault);
1581         unsafe_put_user(info.uid, &infop->si_uid, Efault);
1582         unsafe_put_user(info.status, &infop->si_status, Efault);
1583         user_access_end();
1584         return err;
1585 Efault:
1586         user_access_end();
1587         return -EFAULT;
1588 }
1589
1590 long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
1591                   struct rusage *ru)
1592 {
1593         struct wait_opts wo;
1594         struct pid *pid = NULL;
1595         enum pid_type type;
1596         long ret;
1597
1598         if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1599                         __WNOTHREAD|__WCLONE|__WALL))
1600                 return -EINVAL;
1601
1602         /* -INT_MIN is not defined */
1603         if (upid == INT_MIN)
1604                 return -ESRCH;
1605
1606         if (upid == -1)
1607                 type = PIDTYPE_MAX;
1608         else if (upid < 0) {
1609                 type = PIDTYPE_PGID;
1610                 pid = find_get_pid(-upid);
1611         } else if (upid == 0) {
1612                 type = PIDTYPE_PGID;
1613                 pid = get_task_pid(current, PIDTYPE_PGID);
1614         } else /* upid > 0 */ {
1615                 type = PIDTYPE_PID;
1616                 pid = find_get_pid(upid);
1617         }
1618
1619         wo.wo_type      = type;
1620         wo.wo_pid       = pid;
1621         wo.wo_flags     = options | WEXITED;
1622         wo.wo_info      = NULL;
1623         wo.wo_stat      = 0;
1624         wo.wo_rusage    = ru;
1625         ret = do_wait(&wo);
1626         put_pid(pid);
1627         if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
1628                 ret = -EFAULT;
1629
1630         return ret;
1631 }
1632
1633 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1634                 int, options, struct rusage __user *, ru)
1635 {
1636         struct rusage r;
1637         long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
1638
1639         if (err > 0) {
1640                 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1641                         return -EFAULT;
1642         }
1643         return err;
1644 }
1645
1646 #ifdef __ARCH_WANT_SYS_WAITPID
1647
1648 /*
1649  * sys_waitpid() remains for compatibility. waitpid() should be
1650  * implemented by calling sys_wait4() from libc.a.
1651  */
1652 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1653 {
1654         return kernel_wait4(pid, stat_addr, options, NULL);
1655 }
1656
1657 #endif
1658
1659 #ifdef CONFIG_COMPAT
1660 COMPAT_SYSCALL_DEFINE4(wait4,
1661         compat_pid_t, pid,
1662         compat_uint_t __user *, stat_addr,
1663         int, options,
1664         struct compat_rusage __user *, ru)
1665 {
1666         struct rusage r;
1667         long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
1668         if (err > 0) {
1669                 if (ru && put_compat_rusage(&r, ru))
1670                         return -EFAULT;
1671         }
1672         return err;
1673 }
1674
1675 COMPAT_SYSCALL_DEFINE5(waitid,
1676                 int, which, compat_pid_t, pid,
1677                 struct compat_siginfo __user *, infop, int, options,
1678                 struct compat_rusage __user *, uru)
1679 {
1680         struct rusage ru;
1681         struct waitid_info info = {.status = 0};
1682         long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
1683         int signo = 0;
1684         if (err > 0) {
1685                 signo = SIGCHLD;
1686                 err = 0;
1687                 if (uru) {
1688                         /* kernel_waitid() overwrites everything in ru */
1689                         if (COMPAT_USE_64BIT_TIME)
1690                                 err = copy_to_user(uru, &ru, sizeof(ru));
1691                         else
1692                                 err = put_compat_rusage(&ru, uru);
1693                         if (err)
1694                                 return -EFAULT;
1695                 }
1696         }
1697
1698         if (!infop)
1699                 return err;
1700
1701         if (!user_access_begin(infop, sizeof(*infop)))
1702                 return -EFAULT;
1703
1704         unsafe_put_user(signo, &infop->si_signo, Efault);
1705         unsafe_put_user(0, &infop->si_errno, Efault);
1706         unsafe_put_user(info.cause, &infop->si_code, Efault);
1707         unsafe_put_user(info.pid, &infop->si_pid, Efault);
1708         unsafe_put_user(info.uid, &infop->si_uid, Efault);
1709         unsafe_put_user(info.status, &infop->si_status, Efault);
1710         user_access_end();
1711         return err;
1712 Efault:
1713         user_access_end();
1714         return -EFAULT;
1715 }
1716 #endif
1717
1718 __weak void abort(void)
1719 {
1720         BUG();
1721
1722         /* if that doesn't kill us, halt */
1723         panic("Oops failed to kill thread");
1724 }
1725 EXPORT_SYMBOL(abort);