2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
43 #include <linux/livepatch.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/signal.h>
48 #include <asm/param.h>
49 #include <linux/uaccess.h>
50 #include <asm/unistd.h>
51 #include <asm/siginfo.h>
52 #include <asm/cacheflush.h>
53 #include "audit.h" /* audit_signal_info() */
56 * SLAB caches for signal bits.
59 static struct kmem_cache *sigqueue_cachep;
61 int print_fatal_signals __read_mostly;
63 static void __user *sig_handler(struct task_struct *t, int sig)
65 return t->sighand->action[sig - 1].sa.sa_handler;
68 static int sig_handler_ignored(void __user *handler, int sig)
70 /* Is it explicitly or implicitly ignored? */
71 return handler == SIG_IGN ||
72 (handler == SIG_DFL && sig_kernel_ignore(sig));
75 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
79 handler = sig_handler(t, sig);
81 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
82 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
85 return sig_handler_ignored(handler, sig);
88 static int sig_ignored(struct task_struct *t, int sig, bool force)
91 * Blocked signals are never ignored, since the
92 * signal handler may change by the time it is
95 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
99 * Tracers may want to know about even ignored signal unless it
100 * is SIGKILL which can't be reported anyway but can be ignored
101 * by SIGNAL_UNKILLABLE task.
103 if (t->ptrace && sig != SIGKILL)
106 return sig_task_ignored(t, sig, force);
110 * Re-calculate pending state from the set of locally pending
111 * signals, globally pending signals, and blocked signals.
113 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
118 switch (_NSIG_WORDS) {
120 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
121 ready |= signal->sig[i] &~ blocked->sig[i];
124 case 4: ready = signal->sig[3] &~ blocked->sig[3];
125 ready |= signal->sig[2] &~ blocked->sig[2];
126 ready |= signal->sig[1] &~ blocked->sig[1];
127 ready |= signal->sig[0] &~ blocked->sig[0];
130 case 2: ready = signal->sig[1] &~ blocked->sig[1];
131 ready |= signal->sig[0] &~ blocked->sig[0];
134 case 1: ready = signal->sig[0] &~ blocked->sig[0];
139 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
141 static int recalc_sigpending_tsk(struct task_struct *t)
143 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
144 PENDING(&t->pending, &t->blocked) ||
145 PENDING(&t->signal->shared_pending, &t->blocked)) {
146 set_tsk_thread_flag(t, TIF_SIGPENDING);
150 * We must never clear the flag in another thread, or in current
151 * when it's possible the current syscall is returning -ERESTART*.
152 * So we don't clear it here, and only callers who know they should do.
158 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
159 * This is superfluous when called on current, the wakeup is a harmless no-op.
161 void recalc_sigpending_and_wake(struct task_struct *t)
163 if (recalc_sigpending_tsk(t))
164 signal_wake_up(t, 0);
167 void recalc_sigpending(void)
169 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
170 !klp_patch_pending(current))
171 clear_thread_flag(TIF_SIGPENDING);
175 void calculate_sigpending(void)
177 /* Have any signals or users of TIF_SIGPENDING been delayed
180 spin_lock_irq(¤t->sighand->siglock);
181 set_tsk_thread_flag(current, TIF_SIGPENDING);
183 spin_unlock_irq(¤t->sighand->siglock);
186 /* Given the mask, find the first available signal that should be serviced. */
188 #define SYNCHRONOUS_MASK \
189 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
190 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
192 int next_signal(struct sigpending *pending, sigset_t *mask)
194 unsigned long i, *s, *m, x;
197 s = pending->signal.sig;
201 * Handle the first word specially: it contains the
202 * synchronous signals that need to be dequeued first.
206 if (x & SYNCHRONOUS_MASK)
207 x &= SYNCHRONOUS_MASK;
212 switch (_NSIG_WORDS) {
214 for (i = 1; i < _NSIG_WORDS; ++i) {
218 sig = ffz(~x) + i*_NSIG_BPW + 1;
227 sig = ffz(~x) + _NSIG_BPW + 1;
238 static inline void print_dropped_signal(int sig)
240 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
242 if (!print_fatal_signals)
245 if (!__ratelimit(&ratelimit_state))
248 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
249 current->comm, current->pid, sig);
253 * task_set_jobctl_pending - set jobctl pending bits
255 * @mask: pending bits to set
257 * Clear @mask from @task->jobctl. @mask must be subset of
258 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
259 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
260 * cleared. If @task is already being killed or exiting, this function
264 * Must be called with @task->sighand->siglock held.
267 * %true if @mask is set, %false if made noop because @task was dying.
269 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
271 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
272 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
273 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
275 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
278 if (mask & JOBCTL_STOP_SIGMASK)
279 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
281 task->jobctl |= mask;
286 * task_clear_jobctl_trapping - clear jobctl trapping bit
289 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
290 * Clear it and wake up the ptracer. Note that we don't need any further
291 * locking. @task->siglock guarantees that @task->parent points to the
295 * Must be called with @task->sighand->siglock held.
297 void task_clear_jobctl_trapping(struct task_struct *task)
299 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
300 task->jobctl &= ~JOBCTL_TRAPPING;
301 smp_mb(); /* advised by wake_up_bit() */
302 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
307 * task_clear_jobctl_pending - clear jobctl pending bits
309 * @mask: pending bits to clear
311 * Clear @mask from @task->jobctl. @mask must be subset of
312 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
313 * STOP bits are cleared together.
315 * If clearing of @mask leaves no stop or trap pending, this function calls
316 * task_clear_jobctl_trapping().
319 * Must be called with @task->sighand->siglock held.
321 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
323 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
325 if (mask & JOBCTL_STOP_PENDING)
326 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
328 task->jobctl &= ~mask;
330 if (!(task->jobctl & JOBCTL_PENDING_MASK))
331 task_clear_jobctl_trapping(task);
335 * task_participate_group_stop - participate in a group stop
336 * @task: task participating in a group stop
338 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
339 * Group stop states are cleared and the group stop count is consumed if
340 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
341 * stop, the appropriate %SIGNAL_* flags are set.
344 * Must be called with @task->sighand->siglock held.
347 * %true if group stop completion should be notified to the parent, %false
350 static bool task_participate_group_stop(struct task_struct *task)
352 struct signal_struct *sig = task->signal;
353 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
355 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
357 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
362 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
363 sig->group_stop_count--;
366 * Tell the caller to notify completion iff we are entering into a
367 * fresh group stop. Read comment in do_signal_stop() for details.
369 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
370 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
376 void task_join_group_stop(struct task_struct *task)
378 /* Have the new thread join an on-going signal group stop */
379 unsigned long jobctl = current->jobctl;
380 if (jobctl & JOBCTL_STOP_PENDING) {
381 struct signal_struct *sig = current->signal;
382 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
383 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
384 if (task_set_jobctl_pending(task, signr | gstop)) {
385 sig->group_stop_count++;
391 * allocate a new signal queue record
392 * - this may be called without locks if and only if t == current, otherwise an
393 * appropriate lock must be held to stop the target task from exiting
395 static struct sigqueue *
396 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
398 struct sigqueue *q = NULL;
399 struct user_struct *user;
402 * Protect access to @t credentials. This can go away when all
403 * callers hold rcu read lock.
406 user = get_uid(__task_cred(t)->user);
407 atomic_inc(&user->sigpending);
410 if (override_rlimit ||
411 atomic_read(&user->sigpending) <=
412 task_rlimit(t, RLIMIT_SIGPENDING)) {
413 q = kmem_cache_alloc(sigqueue_cachep, flags);
415 print_dropped_signal(sig);
418 if (unlikely(q == NULL)) {
419 atomic_dec(&user->sigpending);
422 INIT_LIST_HEAD(&q->list);
430 static void __sigqueue_free(struct sigqueue *q)
432 if (q->flags & SIGQUEUE_PREALLOC)
434 atomic_dec(&q->user->sigpending);
436 kmem_cache_free(sigqueue_cachep, q);
439 void flush_sigqueue(struct sigpending *queue)
443 sigemptyset(&queue->signal);
444 while (!list_empty(&queue->list)) {
445 q = list_entry(queue->list.next, struct sigqueue , list);
446 list_del_init(&q->list);
452 * Flush all pending signals for this kthread.
454 void flush_signals(struct task_struct *t)
458 spin_lock_irqsave(&t->sighand->siglock, flags);
459 clear_tsk_thread_flag(t, TIF_SIGPENDING);
460 flush_sigqueue(&t->pending);
461 flush_sigqueue(&t->signal->shared_pending);
462 spin_unlock_irqrestore(&t->sighand->siglock, flags);
465 #ifdef CONFIG_POSIX_TIMERS
466 static void __flush_itimer_signals(struct sigpending *pending)
468 sigset_t signal, retain;
469 struct sigqueue *q, *n;
471 signal = pending->signal;
472 sigemptyset(&retain);
474 list_for_each_entry_safe(q, n, &pending->list, list) {
475 int sig = q->info.si_signo;
477 if (likely(q->info.si_code != SI_TIMER)) {
478 sigaddset(&retain, sig);
480 sigdelset(&signal, sig);
481 list_del_init(&q->list);
486 sigorsets(&pending->signal, &signal, &retain);
489 void flush_itimer_signals(void)
491 struct task_struct *tsk = current;
494 spin_lock_irqsave(&tsk->sighand->siglock, flags);
495 __flush_itimer_signals(&tsk->pending);
496 __flush_itimer_signals(&tsk->signal->shared_pending);
497 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
501 void ignore_signals(struct task_struct *t)
505 for (i = 0; i < _NSIG; ++i)
506 t->sighand->action[i].sa.sa_handler = SIG_IGN;
512 * Flush all handlers for a task.
516 flush_signal_handlers(struct task_struct *t, int force_default)
519 struct k_sigaction *ka = &t->sighand->action[0];
520 for (i = _NSIG ; i != 0 ; i--) {
521 if (force_default || ka->sa.sa_handler != SIG_IGN)
522 ka->sa.sa_handler = SIG_DFL;
524 #ifdef __ARCH_HAS_SA_RESTORER
525 ka->sa.sa_restorer = NULL;
527 sigemptyset(&ka->sa.sa_mask);
532 int unhandled_signal(struct task_struct *tsk, int sig)
534 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
535 if (is_global_init(tsk))
537 if (handler != SIG_IGN && handler != SIG_DFL)
539 /* if ptraced, let the tracer determine */
543 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
546 struct sigqueue *q, *first = NULL;
549 * Collect the siginfo appropriate to this signal. Check if
550 * there is another siginfo for the same signal.
552 list_for_each_entry(q, &list->list, list) {
553 if (q->info.si_signo == sig) {
560 sigdelset(&list->signal, sig);
564 list_del_init(&first->list);
565 copy_siginfo(info, &first->info);
568 (first->flags & SIGQUEUE_PREALLOC) &&
569 (info->si_code == SI_TIMER) &&
570 (info->si_sys_private);
572 __sigqueue_free(first);
575 * Ok, it wasn't in the queue. This must be
576 * a fast-pathed signal or we must have been
577 * out of queue space. So zero out the info.
580 info->si_signo = sig;
582 info->si_code = SI_USER;
588 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
589 siginfo_t *info, bool *resched_timer)
591 int sig = next_signal(pending, mask);
594 collect_signal(sig, pending, info, resched_timer);
599 * Dequeue a signal and return the element to the caller, which is
600 * expected to free it.
602 * All callers have to hold the siglock.
604 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
606 bool resched_timer = false;
609 /* We only dequeue private signals from ourselves, we don't let
610 * signalfd steal them
612 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
614 signr = __dequeue_signal(&tsk->signal->shared_pending,
615 mask, info, &resched_timer);
616 #ifdef CONFIG_POSIX_TIMERS
620 * itimers are process shared and we restart periodic
621 * itimers in the signal delivery path to prevent DoS
622 * attacks in the high resolution timer case. This is
623 * compliant with the old way of self-restarting
624 * itimers, as the SIGALRM is a legacy signal and only
625 * queued once. Changing the restart behaviour to
626 * restart the timer in the signal dequeue path is
627 * reducing the timer noise on heavy loaded !highres
630 if (unlikely(signr == SIGALRM)) {
631 struct hrtimer *tmr = &tsk->signal->real_timer;
633 if (!hrtimer_is_queued(tmr) &&
634 tsk->signal->it_real_incr != 0) {
635 hrtimer_forward(tmr, tmr->base->get_time(),
636 tsk->signal->it_real_incr);
637 hrtimer_restart(tmr);
647 if (unlikely(sig_kernel_stop(signr))) {
649 * Set a marker that we have dequeued a stop signal. Our
650 * caller might release the siglock and then the pending
651 * stop signal it is about to process is no longer in the
652 * pending bitmasks, but must still be cleared by a SIGCONT
653 * (and overruled by a SIGKILL). So those cases clear this
654 * shared flag after we've set it. Note that this flag may
655 * remain set after the signal we return is ignored or
656 * handled. That doesn't matter because its only purpose
657 * is to alert stop-signal processing code when another
658 * processor has come along and cleared the flag.
660 current->jobctl |= JOBCTL_STOP_DEQUEUED;
662 #ifdef CONFIG_POSIX_TIMERS
665 * Release the siglock to ensure proper locking order
666 * of timer locks outside of siglocks. Note, we leave
667 * irqs disabled here, since the posix-timers code is
668 * about to disable them again anyway.
670 spin_unlock(&tsk->sighand->siglock);
671 posixtimer_rearm(info);
672 spin_lock(&tsk->sighand->siglock);
674 /* Don't expose the si_sys_private value to userspace */
675 info->si_sys_private = 0;
682 * Tell a process that it has a new active signal..
684 * NOTE! we rely on the previous spin_lock to
685 * lock interrupts for us! We can only be called with
686 * "siglock" held, and the local interrupt must
687 * have been disabled when that got acquired!
689 * No need to set need_resched since signal event passing
690 * goes through ->blocked
692 void signal_wake_up_state(struct task_struct *t, unsigned int state)
694 set_tsk_thread_flag(t, TIF_SIGPENDING);
696 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
697 * case. We don't check t->state here because there is a race with it
698 * executing another processor and just now entering stopped state.
699 * By using wake_up_state, we ensure the process will wake up and
700 * handle its death signal.
702 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
707 * Remove signals in mask from the pending set and queue.
708 * Returns 1 if any signals were found.
710 * All callers must be holding the siglock.
712 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
714 struct sigqueue *q, *n;
717 sigandsets(&m, mask, &s->signal);
718 if (sigisemptyset(&m))
721 sigandnsets(&s->signal, &s->signal, mask);
722 list_for_each_entry_safe(q, n, &s->list, list) {
723 if (sigismember(mask, q->info.si_signo)) {
724 list_del_init(&q->list);
731 static inline int is_si_special(const struct siginfo *info)
733 return info <= SEND_SIG_FORCED;
736 static inline bool si_fromuser(const struct siginfo *info)
738 return info == SEND_SIG_NOINFO ||
739 (!is_si_special(info) && SI_FROMUSER(info));
743 * called with RCU read lock from check_kill_permission()
745 static int kill_ok_by_cred(struct task_struct *t)
747 const struct cred *cred = current_cred();
748 const struct cred *tcred = __task_cred(t);
750 if (uid_eq(cred->euid, tcred->suid) ||
751 uid_eq(cred->euid, tcred->uid) ||
752 uid_eq(cred->uid, tcred->suid) ||
753 uid_eq(cred->uid, tcred->uid))
756 if (ns_capable(tcred->user_ns, CAP_KILL))
763 * Bad permissions for sending the signal
764 * - the caller must hold the RCU read lock
766 static int check_kill_permission(int sig, struct siginfo *info,
767 struct task_struct *t)
772 if (!valid_signal(sig))
775 if (!si_fromuser(info))
778 error = audit_signal_info(sig, t); /* Let audit system see the signal */
782 if (!same_thread_group(current, t) &&
783 !kill_ok_by_cred(t)) {
786 sid = task_session(t);
788 * We don't return the error if sid == NULL. The
789 * task was unhashed, the caller must notice this.
791 if (!sid || sid == task_session(current))
798 return security_task_kill(t, info, sig, NULL);
802 * ptrace_trap_notify - schedule trap to notify ptracer
803 * @t: tracee wanting to notify tracer
805 * This function schedules sticky ptrace trap which is cleared on the next
806 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
809 * If @t is running, STOP trap will be taken. If trapped for STOP and
810 * ptracer is listening for events, tracee is woken up so that it can
811 * re-trap for the new event. If trapped otherwise, STOP trap will be
812 * eventually taken without returning to userland after the existing traps
813 * are finished by PTRACE_CONT.
816 * Must be called with @task->sighand->siglock held.
818 static void ptrace_trap_notify(struct task_struct *t)
820 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
821 assert_spin_locked(&t->sighand->siglock);
823 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
824 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
828 * Handle magic process-wide effects of stop/continue signals. Unlike
829 * the signal actions, these happen immediately at signal-generation
830 * time regardless of blocking, ignoring, or handling. This does the
831 * actual continuing for SIGCONT, but not the actual stopping for stop
832 * signals. The process stop is done as a signal action for SIG_DFL.
834 * Returns true if the signal should be actually delivered, otherwise
835 * it should be dropped.
837 static bool prepare_signal(int sig, struct task_struct *p, bool force)
839 struct signal_struct *signal = p->signal;
840 struct task_struct *t;
843 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
844 if (!(signal->flags & SIGNAL_GROUP_EXIT))
845 return sig == SIGKILL;
847 * The process is in the middle of dying, nothing to do.
849 } else if (sig_kernel_stop(sig)) {
851 * This is a stop signal. Remove SIGCONT from all queues.
853 siginitset(&flush, sigmask(SIGCONT));
854 flush_sigqueue_mask(&flush, &signal->shared_pending);
855 for_each_thread(p, t)
856 flush_sigqueue_mask(&flush, &t->pending);
857 } else if (sig == SIGCONT) {
860 * Remove all stop signals from all queues, wake all threads.
862 siginitset(&flush, SIG_KERNEL_STOP_MASK);
863 flush_sigqueue_mask(&flush, &signal->shared_pending);
864 for_each_thread(p, t) {
865 flush_sigqueue_mask(&flush, &t->pending);
866 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
867 if (likely(!(t->ptrace & PT_SEIZED)))
868 wake_up_state(t, __TASK_STOPPED);
870 ptrace_trap_notify(t);
874 * Notify the parent with CLD_CONTINUED if we were stopped.
876 * If we were in the middle of a group stop, we pretend it
877 * was already finished, and then continued. Since SIGCHLD
878 * doesn't queue we report only CLD_STOPPED, as if the next
879 * CLD_CONTINUED was dropped.
882 if (signal->flags & SIGNAL_STOP_STOPPED)
883 why |= SIGNAL_CLD_CONTINUED;
884 else if (signal->group_stop_count)
885 why |= SIGNAL_CLD_STOPPED;
889 * The first thread which returns from do_signal_stop()
890 * will take ->siglock, notice SIGNAL_CLD_MASK, and
891 * notify its parent. See get_signal_to_deliver().
893 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
894 signal->group_stop_count = 0;
895 signal->group_exit_code = 0;
899 return !sig_ignored(p, sig, force);
903 * Test if P wants to take SIG. After we've checked all threads with this,
904 * it's equivalent to finding no threads not blocking SIG. Any threads not
905 * blocking SIG were ruled out because they are not running and already
906 * have pending signals. Such threads will dequeue from the shared queue
907 * as soon as they're available, so putting the signal on the shared queue
908 * will be equivalent to sending it to one such thread.
910 static inline int wants_signal(int sig, struct task_struct *p)
912 if (sigismember(&p->blocked, sig))
914 if (p->flags & PF_EXITING)
918 if (task_is_stopped_or_traced(p))
920 return task_curr(p) || !signal_pending(p);
923 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
925 struct signal_struct *signal = p->signal;
926 struct task_struct *t;
929 * Now find a thread we can wake up to take the signal off the queue.
931 * If the main thread wants the signal, it gets first crack.
932 * Probably the least surprising to the average bear.
934 if (wants_signal(sig, p))
936 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
938 * There is just one thread and it does not need to be woken.
939 * It will dequeue unblocked signals before it runs again.
944 * Otherwise try to find a suitable thread.
946 t = signal->curr_target;
947 while (!wants_signal(sig, t)) {
949 if (t == signal->curr_target)
951 * No thread needs to be woken.
952 * Any eligible threads will see
953 * the signal in the queue soon.
957 signal->curr_target = t;
961 * Found a killable thread. If the signal will be fatal,
962 * then start taking the whole group down immediately.
964 if (sig_fatal(p, sig) &&
965 !(signal->flags & SIGNAL_GROUP_EXIT) &&
966 !sigismember(&t->real_blocked, sig) &&
967 (sig == SIGKILL || !p->ptrace)) {
969 * This signal will be fatal to the whole group.
971 if (!sig_kernel_coredump(sig)) {
973 * Start a group exit and wake everybody up.
974 * This way we don't have other threads
975 * running and doing things after a slower
976 * thread has the fatal signal pending.
978 signal->flags = SIGNAL_GROUP_EXIT;
979 signal->group_exit_code = sig;
980 signal->group_stop_count = 0;
983 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
984 sigaddset(&t->pending.signal, SIGKILL);
985 signal_wake_up(t, 1);
986 } while_each_thread(p, t);
992 * The signal is already in the shared-pending queue.
993 * Tell the chosen thread to wake up and dequeue it.
995 signal_wake_up(t, sig == SIGKILL);
999 static inline int legacy_queue(struct sigpending *signals, int sig)
1001 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1004 #ifdef CONFIG_USER_NS
1005 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1007 if (current_user_ns() == task_cred_xxx(t, user_ns))
1010 if (SI_FROMKERNEL(info))
1014 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1015 make_kuid(current_user_ns(), info->si_uid));
1019 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1025 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1026 enum pid_type type, int from_ancestor_ns)
1028 struct sigpending *pending;
1030 int override_rlimit;
1031 int ret = 0, result;
1033 assert_spin_locked(&t->sighand->siglock);
1035 result = TRACE_SIGNAL_IGNORED;
1036 if (!prepare_signal(sig, t,
1037 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1040 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1042 * Short-circuit ignored signals and support queuing
1043 * exactly one non-rt signal, so that we can get more
1044 * detailed information about the cause of the signal.
1046 result = TRACE_SIGNAL_ALREADY_PENDING;
1047 if (legacy_queue(pending, sig))
1050 result = TRACE_SIGNAL_DELIVERED;
1052 * fast-pathed signals for kernel-internal things like SIGSTOP
1055 if (info == SEND_SIG_FORCED)
1059 * Real-time signals must be queued if sent by sigqueue, or
1060 * some other real-time mechanism. It is implementation
1061 * defined whether kill() does so. We attempt to do so, on
1062 * the principle of least surprise, but since kill is not
1063 * allowed to fail with EAGAIN when low on memory we just
1064 * make sure at least one signal gets delivered and don't
1065 * pass on the info struct.
1068 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1070 override_rlimit = 0;
1072 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1074 list_add_tail(&q->list, &pending->list);
1075 switch ((unsigned long) info) {
1076 case (unsigned long) SEND_SIG_NOINFO:
1077 clear_siginfo(&q->info);
1078 q->info.si_signo = sig;
1079 q->info.si_errno = 0;
1080 q->info.si_code = SI_USER;
1081 q->info.si_pid = task_tgid_nr_ns(current,
1082 task_active_pid_ns(t));
1083 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1085 case (unsigned long) SEND_SIG_PRIV:
1086 clear_siginfo(&q->info);
1087 q->info.si_signo = sig;
1088 q->info.si_errno = 0;
1089 q->info.si_code = SI_KERNEL;
1094 copy_siginfo(&q->info, info);
1095 if (from_ancestor_ns)
1100 userns_fixup_signal_uid(&q->info, t);
1102 } else if (!is_si_special(info)) {
1103 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1105 * Queue overflow, abort. We may abort if the
1106 * signal was rt and sent by user using something
1107 * other than kill().
1109 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1114 * This is a silent loss of information. We still
1115 * send the signal, but the *info bits are lost.
1117 result = TRACE_SIGNAL_LOSE_INFO;
1122 signalfd_notify(t, sig);
1123 sigaddset(&pending->signal, sig);
1124 complete_signal(sig, t, type);
1126 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1130 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1133 int from_ancestor_ns = 0;
1135 #ifdef CONFIG_PID_NS
1136 from_ancestor_ns = si_fromuser(info) &&
1137 !task_pid_nr_ns(current, task_active_pid_ns(t));
1140 return __send_signal(sig, info, t, type, from_ancestor_ns);
1143 static void print_fatal_signal(int signr)
1145 struct pt_regs *regs = signal_pt_regs();
1146 pr_info("potentially unexpected fatal signal %d.\n", signr);
1148 #if defined(__i386__) && !defined(__arch_um__)
1149 pr_info("code at %08lx: ", regs->ip);
1152 for (i = 0; i < 16; i++) {
1155 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1157 pr_cont("%02x ", insn);
1167 static int __init setup_print_fatal_signals(char *str)
1169 get_option (&str, &print_fatal_signals);
1174 __setup("print-fatal-signals=", setup_print_fatal_signals);
1177 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1179 return send_signal(sig, info, p, PIDTYPE_TGID);
1183 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1185 return send_signal(sig, info, t, PIDTYPE_PID);
1188 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1191 unsigned long flags;
1194 if (lock_task_sighand(p, &flags)) {
1195 ret = send_signal(sig, info, p, type);
1196 unlock_task_sighand(p, &flags);
1203 * Force a signal that the process can't ignore: if necessary
1204 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1206 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1207 * since we do not want to have a signal handler that was blocked
1208 * be invoked when user space had explicitly blocked it.
1210 * We don't want to have recursive SIGSEGV's etc, for example,
1211 * that is why we also clear SIGNAL_UNKILLABLE.
1214 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1216 unsigned long int flags;
1217 int ret, blocked, ignored;
1218 struct k_sigaction *action;
1220 spin_lock_irqsave(&t->sighand->siglock, flags);
1221 action = &t->sighand->action[sig-1];
1222 ignored = action->sa.sa_handler == SIG_IGN;
1223 blocked = sigismember(&t->blocked, sig);
1224 if (blocked || ignored) {
1225 action->sa.sa_handler = SIG_DFL;
1227 sigdelset(&t->blocked, sig);
1228 recalc_sigpending_and_wake(t);
1232 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1233 * debugging to leave init killable.
1235 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1236 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1237 ret = specific_send_sig_info(sig, info, t);
1238 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1244 * Nuke all other threads in the group.
1246 int zap_other_threads(struct task_struct *p)
1248 struct task_struct *t = p;
1251 p->signal->group_stop_count = 0;
1253 while_each_thread(p, t) {
1254 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1257 /* Don't bother with already dead threads */
1260 sigaddset(&t->pending.signal, SIGKILL);
1261 signal_wake_up(t, 1);
1267 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1268 unsigned long *flags)
1270 struct sighand_struct *sighand;
1274 sighand = rcu_dereference(tsk->sighand);
1275 if (unlikely(sighand == NULL))
1279 * This sighand can be already freed and even reused, but
1280 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1281 * initializes ->siglock: this slab can't go away, it has
1282 * the same object type, ->siglock can't be reinitialized.
1284 * We need to ensure that tsk->sighand is still the same
1285 * after we take the lock, we can race with de_thread() or
1286 * __exit_signal(). In the latter case the next iteration
1287 * must see ->sighand == NULL.
1289 spin_lock_irqsave(&sighand->siglock, *flags);
1290 if (likely(sighand == tsk->sighand))
1292 spin_unlock_irqrestore(&sighand->siglock, *flags);
1300 * send signal info to all the members of a group
1302 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1308 ret = check_kill_permission(sig, info, p);
1312 ret = do_send_sig_info(sig, info, p, type);
1318 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1319 * control characters do (^C, ^Z etc)
1320 * - the caller must hold at least a readlock on tasklist_lock
1322 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1324 struct task_struct *p = NULL;
1325 int retval, success;
1329 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1330 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1333 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1334 return success ? 0 : retval;
1337 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1340 struct task_struct *p;
1344 p = pid_task(pid, PIDTYPE_PID);
1346 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1348 if (likely(!p || error != -ESRCH))
1352 * The task was unhashed in between, try again. If it
1353 * is dead, pid_task() will return NULL, if we race with
1354 * de_thread() it will find the new leader.
1359 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1363 error = kill_pid_info(sig, info, find_vpid(pid));
1368 static int kill_as_cred_perm(const struct cred *cred,
1369 struct task_struct *target)
1371 const struct cred *pcred = __task_cred(target);
1372 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1373 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1378 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1379 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1380 const struct cred *cred)
1383 struct task_struct *p;
1384 unsigned long flags;
1386 if (!valid_signal(sig))
1390 p = pid_task(pid, PIDTYPE_PID);
1395 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1399 ret = security_task_kill(p, info, sig, cred);
1404 if (lock_task_sighand(p, &flags)) {
1405 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1406 unlock_task_sighand(p, &flags);
1414 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1417 * kill_something_info() interprets pid in interesting ways just like kill(2).
1419 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1420 * is probably wrong. Should make it like BSD or SYSV.
1423 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1429 ret = kill_pid_info(sig, info, find_vpid(pid));
1434 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1438 read_lock(&tasklist_lock);
1440 ret = __kill_pgrp_info(sig, info,
1441 pid ? find_vpid(-pid) : task_pgrp(current));
1443 int retval = 0, count = 0;
1444 struct task_struct * p;
1446 for_each_process(p) {
1447 if (task_pid_vnr(p) > 1 &&
1448 !same_thread_group(p, current)) {
1449 int err = group_send_sig_info(sig, info, p,
1456 ret = count ? retval : -ESRCH;
1458 read_unlock(&tasklist_lock);
1464 * These are for backward compatibility with the rest of the kernel source.
1467 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1470 * Make sure legacy kernel users don't send in bad values
1471 * (normal paths check this in check_kill_permission).
1473 if (!valid_signal(sig))
1476 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1479 #define __si_special(priv) \
1480 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1483 send_sig(int sig, struct task_struct *p, int priv)
1485 return send_sig_info(sig, __si_special(priv), p);
1489 force_sig(int sig, struct task_struct *p)
1491 force_sig_info(sig, SEND_SIG_PRIV, p);
1495 * When things go south during signal handling, we
1496 * will force a SIGSEGV. And if the signal that caused
1497 * the problem was already a SIGSEGV, we'll want to
1498 * make sure we don't even try to deliver the signal..
1501 force_sigsegv(int sig, struct task_struct *p)
1503 if (sig == SIGSEGV) {
1504 unsigned long flags;
1505 spin_lock_irqsave(&p->sighand->siglock, flags);
1506 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1507 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1509 force_sig(SIGSEGV, p);
1513 int force_sig_fault(int sig, int code, void __user *addr
1514 ___ARCH_SI_TRAPNO(int trapno)
1515 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1516 , struct task_struct *t)
1518 struct siginfo info;
1520 clear_siginfo(&info);
1521 info.si_signo = sig;
1523 info.si_code = code;
1524 info.si_addr = addr;
1525 #ifdef __ARCH_SI_TRAPNO
1526 info.si_trapno = trapno;
1530 info.si_flags = flags;
1533 return force_sig_info(info.si_signo, &info, t);
1536 int send_sig_fault(int sig, int code, void __user *addr
1537 ___ARCH_SI_TRAPNO(int trapno)
1538 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1539 , struct task_struct *t)
1541 struct siginfo info;
1543 clear_siginfo(&info);
1544 info.si_signo = sig;
1546 info.si_code = code;
1547 info.si_addr = addr;
1548 #ifdef __ARCH_SI_TRAPNO
1549 info.si_trapno = trapno;
1553 info.si_flags = flags;
1556 return send_sig_info(info.si_signo, &info, t);
1559 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1561 struct siginfo info;
1563 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1564 clear_siginfo(&info);
1565 info.si_signo = SIGBUS;
1567 info.si_code = code;
1568 info.si_addr = addr;
1569 info.si_addr_lsb = lsb;
1570 return force_sig_info(info.si_signo, &info, t);
1573 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1575 struct siginfo info;
1577 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1578 clear_siginfo(&info);
1579 info.si_signo = SIGBUS;
1581 info.si_code = code;
1582 info.si_addr = addr;
1583 info.si_addr_lsb = lsb;
1584 return send_sig_info(info.si_signo, &info, t);
1586 EXPORT_SYMBOL(send_sig_mceerr);
1588 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1590 struct siginfo info;
1592 clear_siginfo(&info);
1593 info.si_signo = SIGSEGV;
1595 info.si_code = SEGV_BNDERR;
1596 info.si_addr = addr;
1597 info.si_lower = lower;
1598 info.si_upper = upper;
1599 return force_sig_info(info.si_signo, &info, current);
1603 int force_sig_pkuerr(void __user *addr, u32 pkey)
1605 struct siginfo info;
1607 clear_siginfo(&info);
1608 info.si_signo = SIGSEGV;
1610 info.si_code = SEGV_PKUERR;
1611 info.si_addr = addr;
1612 info.si_pkey = pkey;
1613 return force_sig_info(info.si_signo, &info, current);
1617 /* For the crazy architectures that include trap information in
1618 * the errno field, instead of an actual errno value.
1620 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1622 struct siginfo info;
1624 clear_siginfo(&info);
1625 info.si_signo = SIGTRAP;
1626 info.si_errno = errno;
1627 info.si_code = TRAP_HWBKPT;
1628 info.si_addr = addr;
1629 return force_sig_info(info.si_signo, &info, current);
1632 int kill_pgrp(struct pid *pid, int sig, int priv)
1636 read_lock(&tasklist_lock);
1637 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1638 read_unlock(&tasklist_lock);
1642 EXPORT_SYMBOL(kill_pgrp);
1644 int kill_pid(struct pid *pid, int sig, int priv)
1646 return kill_pid_info(sig, __si_special(priv), pid);
1648 EXPORT_SYMBOL(kill_pid);
1651 * These functions support sending signals using preallocated sigqueue
1652 * structures. This is needed "because realtime applications cannot
1653 * afford to lose notifications of asynchronous events, like timer
1654 * expirations or I/O completions". In the case of POSIX Timers
1655 * we allocate the sigqueue structure from the timer_create. If this
1656 * allocation fails we are able to report the failure to the application
1657 * with an EAGAIN error.
1659 struct sigqueue *sigqueue_alloc(void)
1661 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1664 q->flags |= SIGQUEUE_PREALLOC;
1669 void sigqueue_free(struct sigqueue *q)
1671 unsigned long flags;
1672 spinlock_t *lock = ¤t->sighand->siglock;
1674 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1676 * We must hold ->siglock while testing q->list
1677 * to serialize with collect_signal() or with
1678 * __exit_signal()->flush_sigqueue().
1680 spin_lock_irqsave(lock, flags);
1681 q->flags &= ~SIGQUEUE_PREALLOC;
1683 * If it is queued it will be freed when dequeued,
1684 * like the "regular" sigqueue.
1686 if (!list_empty(&q->list))
1688 spin_unlock_irqrestore(lock, flags);
1694 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1696 int sig = q->info.si_signo;
1697 struct sigpending *pending;
1698 struct task_struct *t;
1699 unsigned long flags;
1702 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1706 t = pid_task(pid, type);
1707 if (!t || !likely(lock_task_sighand(t, &flags)))
1710 ret = 1; /* the signal is ignored */
1711 result = TRACE_SIGNAL_IGNORED;
1712 if (!prepare_signal(sig, t, false))
1716 if (unlikely(!list_empty(&q->list))) {
1718 * If an SI_TIMER entry is already queue just increment
1719 * the overrun count.
1721 BUG_ON(q->info.si_code != SI_TIMER);
1722 q->info.si_overrun++;
1723 result = TRACE_SIGNAL_ALREADY_PENDING;
1726 q->info.si_overrun = 0;
1728 signalfd_notify(t, sig);
1729 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1730 list_add_tail(&q->list, &pending->list);
1731 sigaddset(&pending->signal, sig);
1732 complete_signal(sig, t, type);
1733 result = TRACE_SIGNAL_DELIVERED;
1735 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1736 unlock_task_sighand(t, &flags);
1743 * Let a parent know about the death of a child.
1744 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1746 * Returns true if our parent ignored us and so we've switched to
1749 bool do_notify_parent(struct task_struct *tsk, int sig)
1751 struct siginfo info;
1752 unsigned long flags;
1753 struct sighand_struct *psig;
1754 bool autoreap = false;
1759 /* do_notify_parent_cldstop should have been called instead. */
1760 BUG_ON(task_is_stopped_or_traced(tsk));
1762 BUG_ON(!tsk->ptrace &&
1763 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1765 if (sig != SIGCHLD) {
1767 * This is only possible if parent == real_parent.
1768 * Check if it has changed security domain.
1770 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1774 clear_siginfo(&info);
1775 info.si_signo = sig;
1778 * We are under tasklist_lock here so our parent is tied to
1779 * us and cannot change.
1781 * task_active_pid_ns will always return the same pid namespace
1782 * until a task passes through release_task.
1784 * write_lock() currently calls preempt_disable() which is the
1785 * same as rcu_read_lock(), but according to Oleg, this is not
1786 * correct to rely on this
1789 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1790 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1794 task_cputime(tsk, &utime, &stime);
1795 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1796 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1798 info.si_status = tsk->exit_code & 0x7f;
1799 if (tsk->exit_code & 0x80)
1800 info.si_code = CLD_DUMPED;
1801 else if (tsk->exit_code & 0x7f)
1802 info.si_code = CLD_KILLED;
1804 info.si_code = CLD_EXITED;
1805 info.si_status = tsk->exit_code >> 8;
1808 psig = tsk->parent->sighand;
1809 spin_lock_irqsave(&psig->siglock, flags);
1810 if (!tsk->ptrace && sig == SIGCHLD &&
1811 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1812 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1814 * We are exiting and our parent doesn't care. POSIX.1
1815 * defines special semantics for setting SIGCHLD to SIG_IGN
1816 * or setting the SA_NOCLDWAIT flag: we should be reaped
1817 * automatically and not left for our parent's wait4 call.
1818 * Rather than having the parent do it as a magic kind of
1819 * signal handler, we just set this to tell do_exit that we
1820 * can be cleaned up without becoming a zombie. Note that
1821 * we still call __wake_up_parent in this case, because a
1822 * blocked sys_wait4 might now return -ECHILD.
1824 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1825 * is implementation-defined: we do (if you don't want
1826 * it, just use SIG_IGN instead).
1829 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1832 if (valid_signal(sig) && sig)
1833 __group_send_sig_info(sig, &info, tsk->parent);
1834 __wake_up_parent(tsk, tsk->parent);
1835 spin_unlock_irqrestore(&psig->siglock, flags);
1841 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1842 * @tsk: task reporting the state change
1843 * @for_ptracer: the notification is for ptracer
1844 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1846 * Notify @tsk's parent that the stopped/continued state has changed. If
1847 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1848 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1851 * Must be called with tasklist_lock at least read locked.
1853 static void do_notify_parent_cldstop(struct task_struct *tsk,
1854 bool for_ptracer, int why)
1856 struct siginfo info;
1857 unsigned long flags;
1858 struct task_struct *parent;
1859 struct sighand_struct *sighand;
1863 parent = tsk->parent;
1865 tsk = tsk->group_leader;
1866 parent = tsk->real_parent;
1869 clear_siginfo(&info);
1870 info.si_signo = SIGCHLD;
1873 * see comment in do_notify_parent() about the following 4 lines
1876 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1877 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1880 task_cputime(tsk, &utime, &stime);
1881 info.si_utime = nsec_to_clock_t(utime);
1882 info.si_stime = nsec_to_clock_t(stime);
1887 info.si_status = SIGCONT;
1890 info.si_status = tsk->signal->group_exit_code & 0x7f;
1893 info.si_status = tsk->exit_code & 0x7f;
1899 sighand = parent->sighand;
1900 spin_lock_irqsave(&sighand->siglock, flags);
1901 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1902 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1903 __group_send_sig_info(SIGCHLD, &info, parent);
1905 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1907 __wake_up_parent(tsk, parent);
1908 spin_unlock_irqrestore(&sighand->siglock, flags);
1911 static inline int may_ptrace_stop(void)
1913 if (!likely(current->ptrace))
1916 * Are we in the middle of do_coredump?
1917 * If so and our tracer is also part of the coredump stopping
1918 * is a deadlock situation, and pointless because our tracer
1919 * is dead so don't allow us to stop.
1920 * If SIGKILL was already sent before the caller unlocked
1921 * ->siglock we must see ->core_state != NULL. Otherwise it
1922 * is safe to enter schedule().
1924 * This is almost outdated, a task with the pending SIGKILL can't
1925 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1926 * after SIGKILL was already dequeued.
1928 if (unlikely(current->mm->core_state) &&
1929 unlikely(current->mm == current->parent->mm))
1936 * Return non-zero if there is a SIGKILL that should be waking us up.
1937 * Called with the siglock held.
1939 static int sigkill_pending(struct task_struct *tsk)
1941 return sigismember(&tsk->pending.signal, SIGKILL) ||
1942 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1946 * This must be called with current->sighand->siglock held.
1948 * This should be the path for all ptrace stops.
1949 * We always set current->last_siginfo while stopped here.
1950 * That makes it a way to test a stopped process for
1951 * being ptrace-stopped vs being job-control-stopped.
1953 * If we actually decide not to stop at all because the tracer
1954 * is gone, we keep current->exit_code unless clear_code.
1956 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1957 __releases(¤t->sighand->siglock)
1958 __acquires(¤t->sighand->siglock)
1960 bool gstop_done = false;
1962 if (arch_ptrace_stop_needed(exit_code, info)) {
1964 * The arch code has something special to do before a
1965 * ptrace stop. This is allowed to block, e.g. for faults
1966 * on user stack pages. We can't keep the siglock while
1967 * calling arch_ptrace_stop, so we must release it now.
1968 * To preserve proper semantics, we must do this before
1969 * any signal bookkeeping like checking group_stop_count.
1970 * Meanwhile, a SIGKILL could come in before we retake the
1971 * siglock. That must prevent us from sleeping in TASK_TRACED.
1972 * So after regaining the lock, we must check for SIGKILL.
1974 spin_unlock_irq(¤t->sighand->siglock);
1975 arch_ptrace_stop(exit_code, info);
1976 spin_lock_irq(¤t->sighand->siglock);
1977 if (sigkill_pending(current))
1981 set_special_state(TASK_TRACED);
1984 * We're committing to trapping. TRACED should be visible before
1985 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1986 * Also, transition to TRACED and updates to ->jobctl should be
1987 * atomic with respect to siglock and should be done after the arch
1988 * hook as siglock is released and regrabbed across it.
1993 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
1995 * set_current_state() smp_wmb();
1997 * wait_task_stopped()
1998 * task_stopped_code()
1999 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2003 current->last_siginfo = info;
2004 current->exit_code = exit_code;
2007 * If @why is CLD_STOPPED, we're trapping to participate in a group
2008 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2009 * across siglock relocks since INTERRUPT was scheduled, PENDING
2010 * could be clear now. We act as if SIGCONT is received after
2011 * TASK_TRACED is entered - ignore it.
2013 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2014 gstop_done = task_participate_group_stop(current);
2016 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2017 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2018 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2019 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2021 /* entering a trap, clear TRAPPING */
2022 task_clear_jobctl_trapping(current);
2024 spin_unlock_irq(¤t->sighand->siglock);
2025 read_lock(&tasklist_lock);
2026 if (may_ptrace_stop()) {
2028 * Notify parents of the stop.
2030 * While ptraced, there are two parents - the ptracer and
2031 * the real_parent of the group_leader. The ptracer should
2032 * know about every stop while the real parent is only
2033 * interested in the completion of group stop. The states
2034 * for the two don't interact with each other. Notify
2035 * separately unless they're gonna be duplicates.
2037 do_notify_parent_cldstop(current, true, why);
2038 if (gstop_done && ptrace_reparented(current))
2039 do_notify_parent_cldstop(current, false, why);
2042 * Don't want to allow preemption here, because
2043 * sys_ptrace() needs this task to be inactive.
2045 * XXX: implement read_unlock_no_resched().
2048 read_unlock(&tasklist_lock);
2049 preempt_enable_no_resched();
2050 freezable_schedule();
2053 * By the time we got the lock, our tracer went away.
2054 * Don't drop the lock yet, another tracer may come.
2056 * If @gstop_done, the ptracer went away between group stop
2057 * completion and here. During detach, it would have set
2058 * JOBCTL_STOP_PENDING on us and we'll re-enter
2059 * TASK_STOPPED in do_signal_stop() on return, so notifying
2060 * the real parent of the group stop completion is enough.
2063 do_notify_parent_cldstop(current, false, why);
2065 /* tasklist protects us from ptrace_freeze_traced() */
2066 __set_current_state(TASK_RUNNING);
2068 current->exit_code = 0;
2069 read_unlock(&tasklist_lock);
2073 * We are back. Now reacquire the siglock before touching
2074 * last_siginfo, so that we are sure to have synchronized with
2075 * any signal-sending on another CPU that wants to examine it.
2077 spin_lock_irq(¤t->sighand->siglock);
2078 current->last_siginfo = NULL;
2080 /* LISTENING can be set only during STOP traps, clear it */
2081 current->jobctl &= ~JOBCTL_LISTENING;
2084 * Queued signals ignored us while we were stopped for tracing.
2085 * So check for any that we should take before resuming user mode.
2086 * This sets TIF_SIGPENDING, but never clears it.
2088 recalc_sigpending_tsk(current);
2091 static void ptrace_do_notify(int signr, int exit_code, int why)
2095 clear_siginfo(&info);
2096 info.si_signo = signr;
2097 info.si_code = exit_code;
2098 info.si_pid = task_pid_vnr(current);
2099 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2101 /* Let the debugger run. */
2102 ptrace_stop(exit_code, why, 1, &info);
2105 void ptrace_notify(int exit_code)
2107 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2108 if (unlikely(current->task_works))
2111 spin_lock_irq(¤t->sighand->siglock);
2112 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2113 spin_unlock_irq(¤t->sighand->siglock);
2117 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2118 * @signr: signr causing group stop if initiating
2120 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2121 * and participate in it. If already set, participate in the existing
2122 * group stop. If participated in a group stop (and thus slept), %true is
2123 * returned with siglock released.
2125 * If ptraced, this function doesn't handle stop itself. Instead,
2126 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2127 * untouched. The caller must ensure that INTERRUPT trap handling takes
2128 * places afterwards.
2131 * Must be called with @current->sighand->siglock held, which is released
2135 * %false if group stop is already cancelled or ptrace trap is scheduled.
2136 * %true if participated in group stop.
2138 static bool do_signal_stop(int signr)
2139 __releases(¤t->sighand->siglock)
2141 struct signal_struct *sig = current->signal;
2143 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2144 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2145 struct task_struct *t;
2147 /* signr will be recorded in task->jobctl for retries */
2148 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2150 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2151 unlikely(signal_group_exit(sig)))
2154 * There is no group stop already in progress. We must
2157 * While ptraced, a task may be resumed while group stop is
2158 * still in effect and then receive a stop signal and
2159 * initiate another group stop. This deviates from the
2160 * usual behavior as two consecutive stop signals can't
2161 * cause two group stops when !ptraced. That is why we
2162 * also check !task_is_stopped(t) below.
2164 * The condition can be distinguished by testing whether
2165 * SIGNAL_STOP_STOPPED is already set. Don't generate
2166 * group_exit_code in such case.
2168 * This is not necessary for SIGNAL_STOP_CONTINUED because
2169 * an intervening stop signal is required to cause two
2170 * continued events regardless of ptrace.
2172 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2173 sig->group_exit_code = signr;
2175 sig->group_stop_count = 0;
2177 if (task_set_jobctl_pending(current, signr | gstop))
2178 sig->group_stop_count++;
2181 while_each_thread(current, t) {
2183 * Setting state to TASK_STOPPED for a group
2184 * stop is always done with the siglock held,
2185 * so this check has no races.
2187 if (!task_is_stopped(t) &&
2188 task_set_jobctl_pending(t, signr | gstop)) {
2189 sig->group_stop_count++;
2190 if (likely(!(t->ptrace & PT_SEIZED)))
2191 signal_wake_up(t, 0);
2193 ptrace_trap_notify(t);
2198 if (likely(!current->ptrace)) {
2202 * If there are no other threads in the group, or if there
2203 * is a group stop in progress and we are the last to stop,
2204 * report to the parent.
2206 if (task_participate_group_stop(current))
2207 notify = CLD_STOPPED;
2209 set_special_state(TASK_STOPPED);
2210 spin_unlock_irq(¤t->sighand->siglock);
2213 * Notify the parent of the group stop completion. Because
2214 * we're not holding either the siglock or tasklist_lock
2215 * here, ptracer may attach inbetween; however, this is for
2216 * group stop and should always be delivered to the real
2217 * parent of the group leader. The new ptracer will get
2218 * its notification when this task transitions into
2222 read_lock(&tasklist_lock);
2223 do_notify_parent_cldstop(current, false, notify);
2224 read_unlock(&tasklist_lock);
2227 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2228 freezable_schedule();
2232 * While ptraced, group stop is handled by STOP trap.
2233 * Schedule it and let the caller deal with it.
2235 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2241 * do_jobctl_trap - take care of ptrace jobctl traps
2243 * When PT_SEIZED, it's used for both group stop and explicit
2244 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2245 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2246 * the stop signal; otherwise, %SIGTRAP.
2248 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2249 * number as exit_code and no siginfo.
2252 * Must be called with @current->sighand->siglock held, which may be
2253 * released and re-acquired before returning with intervening sleep.
2255 static void do_jobctl_trap(void)
2257 struct signal_struct *signal = current->signal;
2258 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2260 if (current->ptrace & PT_SEIZED) {
2261 if (!signal->group_stop_count &&
2262 !(signal->flags & SIGNAL_STOP_STOPPED))
2264 WARN_ON_ONCE(!signr);
2265 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2268 WARN_ON_ONCE(!signr);
2269 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2270 current->exit_code = 0;
2274 static int ptrace_signal(int signr, siginfo_t *info)
2277 * We do not check sig_kernel_stop(signr) but set this marker
2278 * unconditionally because we do not know whether debugger will
2279 * change signr. This flag has no meaning unless we are going
2280 * to stop after return from ptrace_stop(). In this case it will
2281 * be checked in do_signal_stop(), we should only stop if it was
2282 * not cleared by SIGCONT while we were sleeping. See also the
2283 * comment in dequeue_signal().
2285 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2286 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2288 /* We're back. Did the debugger cancel the sig? */
2289 signr = current->exit_code;
2293 current->exit_code = 0;
2296 * Update the siginfo structure if the signal has
2297 * changed. If the debugger wanted something
2298 * specific in the siginfo structure then it should
2299 * have updated *info via PTRACE_SETSIGINFO.
2301 if (signr != info->si_signo) {
2302 clear_siginfo(info);
2303 info->si_signo = signr;
2305 info->si_code = SI_USER;
2307 info->si_pid = task_pid_vnr(current->parent);
2308 info->si_uid = from_kuid_munged(current_user_ns(),
2309 task_uid(current->parent));
2313 /* If the (new) signal is now blocked, requeue it. */
2314 if (sigismember(¤t->blocked, signr)) {
2315 specific_send_sig_info(signr, info, current);
2322 int get_signal(struct ksignal *ksig)
2324 struct sighand_struct *sighand = current->sighand;
2325 struct signal_struct *signal = current->signal;
2328 if (unlikely(current->task_works))
2331 if (unlikely(uprobe_deny_signal()))
2335 * Do this once, we can't return to user-mode if freezing() == T.
2336 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2337 * thus do not need another check after return.
2342 spin_lock_irq(&sighand->siglock);
2344 * Every stopped thread goes here after wakeup. Check to see if
2345 * we should notify the parent, prepare_signal(SIGCONT) encodes
2346 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2348 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2351 if (signal->flags & SIGNAL_CLD_CONTINUED)
2352 why = CLD_CONTINUED;
2356 signal->flags &= ~SIGNAL_CLD_MASK;
2358 spin_unlock_irq(&sighand->siglock);
2361 * Notify the parent that we're continuing. This event is
2362 * always per-process and doesn't make whole lot of sense
2363 * for ptracers, who shouldn't consume the state via
2364 * wait(2) either, but, for backward compatibility, notify
2365 * the ptracer of the group leader too unless it's gonna be
2368 read_lock(&tasklist_lock);
2369 do_notify_parent_cldstop(current, false, why);
2371 if (ptrace_reparented(current->group_leader))
2372 do_notify_parent_cldstop(current->group_leader,
2374 read_unlock(&tasklist_lock);
2380 struct k_sigaction *ka;
2382 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2386 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2388 spin_unlock_irq(&sighand->siglock);
2392 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2395 break; /* will return 0 */
2397 if (unlikely(current->ptrace) && signr != SIGKILL) {
2398 signr = ptrace_signal(signr, &ksig->info);
2403 ka = &sighand->action[signr-1];
2405 /* Trace actually delivered signals. */
2406 trace_signal_deliver(signr, &ksig->info, ka);
2408 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2410 if (ka->sa.sa_handler != SIG_DFL) {
2411 /* Run the handler. */
2414 if (ka->sa.sa_flags & SA_ONESHOT)
2415 ka->sa.sa_handler = SIG_DFL;
2417 break; /* will return non-zero "signr" value */
2421 * Now we are doing the default action for this signal.
2423 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2427 * Global init gets no signals it doesn't want.
2428 * Container-init gets no signals it doesn't want from same
2431 * Note that if global/container-init sees a sig_kernel_only()
2432 * signal here, the signal must have been generated internally
2433 * or must have come from an ancestor namespace. In either
2434 * case, the signal cannot be dropped.
2436 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2437 !sig_kernel_only(signr))
2440 if (sig_kernel_stop(signr)) {
2442 * The default action is to stop all threads in
2443 * the thread group. The job control signals
2444 * do nothing in an orphaned pgrp, but SIGSTOP
2445 * always works. Note that siglock needs to be
2446 * dropped during the call to is_orphaned_pgrp()
2447 * because of lock ordering with tasklist_lock.
2448 * This allows an intervening SIGCONT to be posted.
2449 * We need to check for that and bail out if necessary.
2451 if (signr != SIGSTOP) {
2452 spin_unlock_irq(&sighand->siglock);
2454 /* signals can be posted during this window */
2456 if (is_current_pgrp_orphaned())
2459 spin_lock_irq(&sighand->siglock);
2462 if (likely(do_signal_stop(ksig->info.si_signo))) {
2463 /* It released the siglock. */
2468 * We didn't actually stop, due to a race
2469 * with SIGCONT or something like that.
2474 spin_unlock_irq(&sighand->siglock);
2477 * Anything else is fatal, maybe with a core dump.
2479 current->flags |= PF_SIGNALED;
2481 if (sig_kernel_coredump(signr)) {
2482 if (print_fatal_signals)
2483 print_fatal_signal(ksig->info.si_signo);
2484 proc_coredump_connector(current);
2486 * If it was able to dump core, this kills all
2487 * other threads in the group and synchronizes with
2488 * their demise. If we lost the race with another
2489 * thread getting here, it set group_exit_code
2490 * first and our do_group_exit call below will use
2491 * that value and ignore the one we pass it.
2493 do_coredump(&ksig->info);
2497 * Death signals, no core dump.
2499 do_group_exit(ksig->info.si_signo);
2502 spin_unlock_irq(&sighand->siglock);
2505 return ksig->sig > 0;
2509 * signal_delivered -
2510 * @ksig: kernel signal struct
2511 * @stepping: nonzero if debugger single-step or block-step in use
2513 * This function should be called when a signal has successfully been
2514 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2515 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2516 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2518 static void signal_delivered(struct ksignal *ksig, int stepping)
2522 /* A signal was successfully delivered, and the
2523 saved sigmask was stored on the signal frame,
2524 and will be restored by sigreturn. So we can
2525 simply clear the restore sigmask flag. */
2526 clear_restore_sigmask();
2528 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2529 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2530 sigaddset(&blocked, ksig->sig);
2531 set_current_blocked(&blocked);
2532 tracehook_signal_handler(stepping);
2535 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2538 force_sigsegv(ksig->sig, current);
2540 signal_delivered(ksig, stepping);
2544 * It could be that complete_signal() picked us to notify about the
2545 * group-wide signal. Other threads should be notified now to take
2546 * the shared signals in @which since we will not.
2548 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2551 struct task_struct *t;
2553 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2554 if (sigisemptyset(&retarget))
2558 while_each_thread(tsk, t) {
2559 if (t->flags & PF_EXITING)
2562 if (!has_pending_signals(&retarget, &t->blocked))
2564 /* Remove the signals this thread can handle. */
2565 sigandsets(&retarget, &retarget, &t->blocked);
2567 if (!signal_pending(t))
2568 signal_wake_up(t, 0);
2570 if (sigisemptyset(&retarget))
2575 void exit_signals(struct task_struct *tsk)
2581 * @tsk is about to have PF_EXITING set - lock out users which
2582 * expect stable threadgroup.
2584 cgroup_threadgroup_change_begin(tsk);
2586 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2587 tsk->flags |= PF_EXITING;
2588 cgroup_threadgroup_change_end(tsk);
2592 spin_lock_irq(&tsk->sighand->siglock);
2594 * From now this task is not visible for group-wide signals,
2595 * see wants_signal(), do_signal_stop().
2597 tsk->flags |= PF_EXITING;
2599 cgroup_threadgroup_change_end(tsk);
2601 if (!signal_pending(tsk))
2604 unblocked = tsk->blocked;
2605 signotset(&unblocked);
2606 retarget_shared_pending(tsk, &unblocked);
2608 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2609 task_participate_group_stop(tsk))
2610 group_stop = CLD_STOPPED;
2612 spin_unlock_irq(&tsk->sighand->siglock);
2615 * If group stop has completed, deliver the notification. This
2616 * should always go to the real parent of the group leader.
2618 if (unlikely(group_stop)) {
2619 read_lock(&tasklist_lock);
2620 do_notify_parent_cldstop(tsk, false, group_stop);
2621 read_unlock(&tasklist_lock);
2625 EXPORT_SYMBOL(recalc_sigpending);
2626 EXPORT_SYMBOL_GPL(dequeue_signal);
2627 EXPORT_SYMBOL(flush_signals);
2628 EXPORT_SYMBOL(force_sig);
2629 EXPORT_SYMBOL(send_sig);
2630 EXPORT_SYMBOL(send_sig_info);
2631 EXPORT_SYMBOL(sigprocmask);
2634 * System call entry points.
2638 * sys_restart_syscall - restart a system call
2640 SYSCALL_DEFINE0(restart_syscall)
2642 struct restart_block *restart = ¤t->restart_block;
2643 return restart->fn(restart);
2646 long do_no_restart_syscall(struct restart_block *param)
2651 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2653 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2654 sigset_t newblocked;
2655 /* A set of now blocked but previously unblocked signals. */
2656 sigandnsets(&newblocked, newset, ¤t->blocked);
2657 retarget_shared_pending(tsk, &newblocked);
2659 tsk->blocked = *newset;
2660 recalc_sigpending();
2664 * set_current_blocked - change current->blocked mask
2667 * It is wrong to change ->blocked directly, this helper should be used
2668 * to ensure the process can't miss a shared signal we are going to block.
2670 void set_current_blocked(sigset_t *newset)
2672 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2673 __set_current_blocked(newset);
2676 void __set_current_blocked(const sigset_t *newset)
2678 struct task_struct *tsk = current;
2681 * In case the signal mask hasn't changed, there is nothing we need
2682 * to do. The current->blocked shouldn't be modified by other task.
2684 if (sigequalsets(&tsk->blocked, newset))
2687 spin_lock_irq(&tsk->sighand->siglock);
2688 __set_task_blocked(tsk, newset);
2689 spin_unlock_irq(&tsk->sighand->siglock);
2693 * This is also useful for kernel threads that want to temporarily
2694 * (or permanently) block certain signals.
2696 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2697 * interface happily blocks "unblockable" signals like SIGKILL
2700 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2702 struct task_struct *tsk = current;
2705 /* Lockless, only current can change ->blocked, never from irq */
2707 *oldset = tsk->blocked;
2711 sigorsets(&newset, &tsk->blocked, set);
2714 sigandnsets(&newset, &tsk->blocked, set);
2723 __set_current_blocked(&newset);
2728 * sys_rt_sigprocmask - change the list of currently blocked signals
2729 * @how: whether to add, remove, or set signals
2730 * @nset: stores pending signals
2731 * @oset: previous value of signal mask if non-null
2732 * @sigsetsize: size of sigset_t type
2734 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2735 sigset_t __user *, oset, size_t, sigsetsize)
2737 sigset_t old_set, new_set;
2740 /* XXX: Don't preclude handling different sized sigset_t's. */
2741 if (sigsetsize != sizeof(sigset_t))
2744 old_set = current->blocked;
2747 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2749 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2751 error = sigprocmask(how, &new_set, NULL);
2757 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2764 #ifdef CONFIG_COMPAT
2765 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2766 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2768 sigset_t old_set = current->blocked;
2770 /* XXX: Don't preclude handling different sized sigset_t's. */
2771 if (sigsetsize != sizeof(sigset_t))
2777 if (get_compat_sigset(&new_set, nset))
2779 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2781 error = sigprocmask(how, &new_set, NULL);
2785 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2789 static int do_sigpending(sigset_t *set)
2791 spin_lock_irq(¤t->sighand->siglock);
2792 sigorsets(set, ¤t->pending.signal,
2793 ¤t->signal->shared_pending.signal);
2794 spin_unlock_irq(¤t->sighand->siglock);
2796 /* Outside the lock because only this thread touches it. */
2797 sigandsets(set, ¤t->blocked, set);
2802 * sys_rt_sigpending - examine a pending signal that has been raised
2804 * @uset: stores pending signals
2805 * @sigsetsize: size of sigset_t type or larger
2807 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2812 if (sigsetsize > sizeof(*uset))
2815 err = do_sigpending(&set);
2816 if (!err && copy_to_user(uset, &set, sigsetsize))
2821 #ifdef CONFIG_COMPAT
2822 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2823 compat_size_t, sigsetsize)
2828 if (sigsetsize > sizeof(*uset))
2831 err = do_sigpending(&set);
2833 err = put_compat_sigset(uset, &set, sigsetsize);
2838 enum siginfo_layout siginfo_layout(int sig, int si_code)
2840 enum siginfo_layout layout = SIL_KILL;
2841 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2842 static const struct {
2843 unsigned char limit, layout;
2845 [SIGILL] = { NSIGILL, SIL_FAULT },
2846 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2847 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2848 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2849 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2850 #if defined(SIGEMT) && defined(NSIGEMT)
2851 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2853 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2854 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2855 [SIGSYS] = { NSIGSYS, SIL_SYS },
2857 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
2858 layout = filter[sig].layout;
2859 /* Handle the exceptions */
2860 if ((sig == SIGBUS) &&
2861 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
2862 layout = SIL_FAULT_MCEERR;
2863 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
2864 layout = SIL_FAULT_BNDERR;
2866 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
2867 layout = SIL_FAULT_PKUERR;
2870 else if (si_code <= NSIGPOLL)
2873 if (si_code == SI_TIMER)
2875 else if (si_code == SI_SIGIO)
2877 else if (si_code < 0)
2883 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2885 if (copy_to_user(to, from , sizeof(struct siginfo)))
2890 #ifdef CONFIG_COMPAT
2891 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2892 const struct siginfo *from)
2893 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2895 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2897 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2898 const struct siginfo *from, bool x32_ABI)
2901 struct compat_siginfo new;
2902 memset(&new, 0, sizeof(new));
2904 new.si_signo = from->si_signo;
2905 new.si_errno = from->si_errno;
2906 new.si_code = from->si_code;
2907 switch(siginfo_layout(from->si_signo, from->si_code)) {
2909 new.si_pid = from->si_pid;
2910 new.si_uid = from->si_uid;
2913 new.si_tid = from->si_tid;
2914 new.si_overrun = from->si_overrun;
2915 new.si_int = from->si_int;
2918 new.si_band = from->si_band;
2919 new.si_fd = from->si_fd;
2922 new.si_addr = ptr_to_compat(from->si_addr);
2923 #ifdef __ARCH_SI_TRAPNO
2924 new.si_trapno = from->si_trapno;
2927 case SIL_FAULT_MCEERR:
2928 new.si_addr = ptr_to_compat(from->si_addr);
2929 #ifdef __ARCH_SI_TRAPNO
2930 new.si_trapno = from->si_trapno;
2932 new.si_addr_lsb = from->si_addr_lsb;
2934 case SIL_FAULT_BNDERR:
2935 new.si_addr = ptr_to_compat(from->si_addr);
2936 #ifdef __ARCH_SI_TRAPNO
2937 new.si_trapno = from->si_trapno;
2939 new.si_lower = ptr_to_compat(from->si_lower);
2940 new.si_upper = ptr_to_compat(from->si_upper);
2942 case SIL_FAULT_PKUERR:
2943 new.si_addr = ptr_to_compat(from->si_addr);
2944 #ifdef __ARCH_SI_TRAPNO
2945 new.si_trapno = from->si_trapno;
2947 new.si_pkey = from->si_pkey;
2950 new.si_pid = from->si_pid;
2951 new.si_uid = from->si_uid;
2952 new.si_status = from->si_status;
2953 #ifdef CONFIG_X86_X32_ABI
2955 new._sifields._sigchld_x32._utime = from->si_utime;
2956 new._sifields._sigchld_x32._stime = from->si_stime;
2960 new.si_utime = from->si_utime;
2961 new.si_stime = from->si_stime;
2965 new.si_pid = from->si_pid;
2966 new.si_uid = from->si_uid;
2967 new.si_int = from->si_int;
2970 new.si_call_addr = ptr_to_compat(from->si_call_addr);
2971 new.si_syscall = from->si_syscall;
2972 new.si_arch = from->si_arch;
2976 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
2982 int copy_siginfo_from_user32(struct siginfo *to,
2983 const struct compat_siginfo __user *ufrom)
2985 struct compat_siginfo from;
2987 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
2991 to->si_signo = from.si_signo;
2992 to->si_errno = from.si_errno;
2993 to->si_code = from.si_code;
2994 switch(siginfo_layout(from.si_signo, from.si_code)) {
2996 to->si_pid = from.si_pid;
2997 to->si_uid = from.si_uid;
3000 to->si_tid = from.si_tid;
3001 to->si_overrun = from.si_overrun;
3002 to->si_int = from.si_int;
3005 to->si_band = from.si_band;
3006 to->si_fd = from.si_fd;
3009 to->si_addr = compat_ptr(from.si_addr);
3010 #ifdef __ARCH_SI_TRAPNO
3011 to->si_trapno = from.si_trapno;
3014 case SIL_FAULT_MCEERR:
3015 to->si_addr = compat_ptr(from.si_addr);
3016 #ifdef __ARCH_SI_TRAPNO
3017 to->si_trapno = from.si_trapno;
3019 to->si_addr_lsb = from.si_addr_lsb;
3021 case SIL_FAULT_BNDERR:
3022 to->si_addr = compat_ptr(from.si_addr);
3023 #ifdef __ARCH_SI_TRAPNO
3024 to->si_trapno = from.si_trapno;
3026 to->si_lower = compat_ptr(from.si_lower);
3027 to->si_upper = compat_ptr(from.si_upper);
3029 case SIL_FAULT_PKUERR:
3030 to->si_addr = compat_ptr(from.si_addr);
3031 #ifdef __ARCH_SI_TRAPNO
3032 to->si_trapno = from.si_trapno;
3034 to->si_pkey = from.si_pkey;
3037 to->si_pid = from.si_pid;
3038 to->si_uid = from.si_uid;
3039 to->si_status = from.si_status;
3040 #ifdef CONFIG_X86_X32_ABI
3041 if (in_x32_syscall()) {
3042 to->si_utime = from._sifields._sigchld_x32._utime;
3043 to->si_stime = from._sifields._sigchld_x32._stime;
3047 to->si_utime = from.si_utime;
3048 to->si_stime = from.si_stime;
3052 to->si_pid = from.si_pid;
3053 to->si_uid = from.si_uid;
3054 to->si_int = from.si_int;
3057 to->si_call_addr = compat_ptr(from.si_call_addr);
3058 to->si_syscall = from.si_syscall;
3059 to->si_arch = from.si_arch;
3064 #endif /* CONFIG_COMPAT */
3067 * do_sigtimedwait - wait for queued signals specified in @which
3068 * @which: queued signals to wait for
3069 * @info: if non-null, the signal's siginfo is returned here
3070 * @ts: upper bound on process time suspension
3072 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3073 const struct timespec *ts)
3075 ktime_t *to = NULL, timeout = KTIME_MAX;
3076 struct task_struct *tsk = current;
3077 sigset_t mask = *which;
3081 if (!timespec_valid(ts))
3083 timeout = timespec_to_ktime(*ts);
3088 * Invert the set of allowed signals to get those we want to block.
3090 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3093 spin_lock_irq(&tsk->sighand->siglock);
3094 sig = dequeue_signal(tsk, &mask, info);
3095 if (!sig && timeout) {
3097 * None ready, temporarily unblock those we're interested
3098 * while we are sleeping in so that we'll be awakened when
3099 * they arrive. Unblocking is always fine, we can avoid
3100 * set_current_blocked().
3102 tsk->real_blocked = tsk->blocked;
3103 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3104 recalc_sigpending();
3105 spin_unlock_irq(&tsk->sighand->siglock);
3107 __set_current_state(TASK_INTERRUPTIBLE);
3108 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3110 spin_lock_irq(&tsk->sighand->siglock);
3111 __set_task_blocked(tsk, &tsk->real_blocked);
3112 sigemptyset(&tsk->real_blocked);
3113 sig = dequeue_signal(tsk, &mask, info);
3115 spin_unlock_irq(&tsk->sighand->siglock);
3119 return ret ? -EINTR : -EAGAIN;
3123 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3125 * @uthese: queued signals to wait for
3126 * @uinfo: if non-null, the signal's siginfo is returned here
3127 * @uts: upper bound on process time suspension
3128 * @sigsetsize: size of sigset_t type
3130 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3131 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3139 /* XXX: Don't preclude handling different sized sigset_t's. */
3140 if (sigsetsize != sizeof(sigset_t))
3143 if (copy_from_user(&these, uthese, sizeof(these)))
3147 if (copy_from_user(&ts, uts, sizeof(ts)))
3151 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3153 if (ret > 0 && uinfo) {
3154 if (copy_siginfo_to_user(uinfo, &info))
3161 #ifdef CONFIG_COMPAT
3162 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3163 struct compat_siginfo __user *, uinfo,
3164 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3171 if (sigsetsize != sizeof(sigset_t))
3174 if (get_compat_sigset(&s, uthese))
3178 if (compat_get_timespec(&t, uts))
3182 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3184 if (ret > 0 && uinfo) {
3185 if (copy_siginfo_to_user32(uinfo, &info))
3194 * sys_kill - send a signal to a process
3195 * @pid: the PID of the process
3196 * @sig: signal to be sent
3198 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3200 struct siginfo info;
3202 clear_siginfo(&info);
3203 info.si_signo = sig;
3205 info.si_code = SI_USER;
3206 info.si_pid = task_tgid_vnr(current);
3207 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3209 return kill_something_info(sig, &info, pid);
3213 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3215 struct task_struct *p;
3219 p = find_task_by_vpid(pid);
3220 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3221 error = check_kill_permission(sig, info, p);
3223 * The null signal is a permissions and process existence
3224 * probe. No signal is actually delivered.
3226 if (!error && sig) {
3227 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3229 * If lock_task_sighand() failed we pretend the task
3230 * dies after receiving the signal. The window is tiny,
3231 * and the signal is private anyway.
3233 if (unlikely(error == -ESRCH))
3242 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3244 struct siginfo info;
3246 clear_siginfo(&info);
3247 info.si_signo = sig;
3249 info.si_code = SI_TKILL;
3250 info.si_pid = task_tgid_vnr(current);
3251 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3253 return do_send_specific(tgid, pid, sig, &info);
3257 * sys_tgkill - send signal to one specific thread
3258 * @tgid: the thread group ID of the thread
3259 * @pid: the PID of the thread
3260 * @sig: signal to be sent
3262 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3263 * exists but it's not belonging to the target process anymore. This
3264 * method solves the problem of threads exiting and PIDs getting reused.
3266 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3268 /* This is only valid for single tasks */
3269 if (pid <= 0 || tgid <= 0)
3272 return do_tkill(tgid, pid, sig);
3276 * sys_tkill - send signal to one specific task
3277 * @pid: the PID of the task
3278 * @sig: signal to be sent
3280 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3282 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3284 /* This is only valid for single tasks */
3288 return do_tkill(0, pid, sig);
3291 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3293 /* Not even root can pretend to send signals from the kernel.
3294 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3296 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3297 (task_pid_vnr(current) != pid))
3300 info->si_signo = sig;
3302 /* POSIX.1b doesn't mention process groups. */
3303 return kill_proc_info(sig, info, pid);
3307 * sys_rt_sigqueueinfo - send signal information to a signal
3308 * @pid: the PID of the thread
3309 * @sig: signal to be sent
3310 * @uinfo: signal info to be sent
3312 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3313 siginfo_t __user *, uinfo)
3316 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3318 return do_rt_sigqueueinfo(pid, sig, &info);
3321 #ifdef CONFIG_COMPAT
3322 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3325 struct compat_siginfo __user *, uinfo)
3328 int ret = copy_siginfo_from_user32(&info, uinfo);
3331 return do_rt_sigqueueinfo(pid, sig, &info);
3335 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3337 /* This is only valid for single tasks */
3338 if (pid <= 0 || tgid <= 0)
3341 /* Not even root can pretend to send signals from the kernel.
3342 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3344 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3345 (task_pid_vnr(current) != pid))
3348 info->si_signo = sig;
3350 return do_send_specific(tgid, pid, sig, info);
3353 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3354 siginfo_t __user *, uinfo)
3358 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3361 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3364 #ifdef CONFIG_COMPAT
3365 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3369 struct compat_siginfo __user *, uinfo)
3373 if (copy_siginfo_from_user32(&info, uinfo))
3375 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3380 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3382 void kernel_sigaction(int sig, __sighandler_t action)
3384 spin_lock_irq(¤t->sighand->siglock);
3385 current->sighand->action[sig - 1].sa.sa_handler = action;
3386 if (action == SIG_IGN) {
3390 sigaddset(&mask, sig);
3392 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3393 flush_sigqueue_mask(&mask, ¤t->pending);
3394 recalc_sigpending();
3396 spin_unlock_irq(¤t->sighand->siglock);
3398 EXPORT_SYMBOL(kernel_sigaction);
3400 void __weak sigaction_compat_abi(struct k_sigaction *act,
3401 struct k_sigaction *oact)
3405 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3407 struct task_struct *p = current, *t;
3408 struct k_sigaction *k;
3411 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3414 k = &p->sighand->action[sig-1];
3416 spin_lock_irq(&p->sighand->siglock);
3420 sigaction_compat_abi(act, oact);
3423 sigdelsetmask(&act->sa.sa_mask,
3424 sigmask(SIGKILL) | sigmask(SIGSTOP));
3428 * "Setting a signal action to SIG_IGN for a signal that is
3429 * pending shall cause the pending signal to be discarded,
3430 * whether or not it is blocked."
3432 * "Setting a signal action to SIG_DFL for a signal that is
3433 * pending and whose default action is to ignore the signal
3434 * (for example, SIGCHLD), shall cause the pending signal to
3435 * be discarded, whether or not it is blocked"
3437 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3439 sigaddset(&mask, sig);
3440 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3441 for_each_thread(p, t)
3442 flush_sigqueue_mask(&mask, &t->pending);
3446 spin_unlock_irq(&p->sighand->siglock);
3451 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
3453 struct task_struct *t = current;
3456 memset(oss, 0, sizeof(stack_t));
3457 oss->ss_sp = (void __user *) t->sas_ss_sp;
3458 oss->ss_size = t->sas_ss_size;
3459 oss->ss_flags = sas_ss_flags(sp) |
3460 (current->sas_ss_flags & SS_FLAG_BITS);
3464 void __user *ss_sp = ss->ss_sp;
3465 size_t ss_size = ss->ss_size;
3466 unsigned ss_flags = ss->ss_flags;
3469 if (unlikely(on_sig_stack(sp)))
3472 ss_mode = ss_flags & ~SS_FLAG_BITS;
3473 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3477 if (ss_mode == SS_DISABLE) {
3481 if (unlikely(ss_size < MINSIGSTKSZ))
3485 t->sas_ss_sp = (unsigned long) ss_sp;
3486 t->sas_ss_size = ss_size;
3487 t->sas_ss_flags = ss_flags;
3492 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3496 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3498 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3499 current_user_stack_pointer());
3500 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3505 int restore_altstack(const stack_t __user *uss)
3508 if (copy_from_user(&new, uss, sizeof(stack_t)))
3510 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
3511 /* squash all but EFAULT for now */
3515 int __save_altstack(stack_t __user *uss, unsigned long sp)
3517 struct task_struct *t = current;
3518 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3519 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3520 __put_user(t->sas_ss_size, &uss->ss_size);
3523 if (t->sas_ss_flags & SS_AUTODISARM)
3528 #ifdef CONFIG_COMPAT
3529 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3530 compat_stack_t __user *uoss_ptr)
3536 compat_stack_t uss32;
3537 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3539 uss.ss_sp = compat_ptr(uss32.ss_sp);
3540 uss.ss_flags = uss32.ss_flags;
3541 uss.ss_size = uss32.ss_size;
3543 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3544 compat_user_stack_pointer());
3545 if (ret >= 0 && uoss_ptr) {
3547 memset(&old, 0, sizeof(old));
3548 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3549 old.ss_flags = uoss.ss_flags;
3550 old.ss_size = uoss.ss_size;
3551 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3557 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3558 const compat_stack_t __user *, uss_ptr,
3559 compat_stack_t __user *, uoss_ptr)
3561 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3564 int compat_restore_altstack(const compat_stack_t __user *uss)
3566 int err = do_compat_sigaltstack(uss, NULL);
3567 /* squash all but -EFAULT for now */
3568 return err == -EFAULT ? err : 0;
3571 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3574 struct task_struct *t = current;
3575 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3577 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3578 __put_user(t->sas_ss_size, &uss->ss_size);
3581 if (t->sas_ss_flags & SS_AUTODISARM)
3587 #ifdef __ARCH_WANT_SYS_SIGPENDING
3590 * sys_sigpending - examine pending signals
3591 * @uset: where mask of pending signal is returned
3593 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
3598 if (sizeof(old_sigset_t) > sizeof(*uset))
3601 err = do_sigpending(&set);
3602 if (!err && copy_to_user(uset, &set, sizeof(old_sigset_t)))
3607 #ifdef CONFIG_COMPAT
3608 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3611 int err = do_sigpending(&set);
3613 err = put_user(set.sig[0], set32);
3620 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3622 * sys_sigprocmask - examine and change blocked signals
3623 * @how: whether to add, remove, or set signals
3624 * @nset: signals to add or remove (if non-null)
3625 * @oset: previous value of signal mask if non-null
3627 * Some platforms have their own version with special arguments;
3628 * others support only sys_rt_sigprocmask.
3631 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3632 old_sigset_t __user *, oset)
3634 old_sigset_t old_set, new_set;
3635 sigset_t new_blocked;
3637 old_set = current->blocked.sig[0];
3640 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3643 new_blocked = current->blocked;
3647 sigaddsetmask(&new_blocked, new_set);
3650 sigdelsetmask(&new_blocked, new_set);
3653 new_blocked.sig[0] = new_set;
3659 set_current_blocked(&new_blocked);
3663 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3669 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3671 #ifndef CONFIG_ODD_RT_SIGACTION
3673 * sys_rt_sigaction - alter an action taken by a process
3674 * @sig: signal to be sent
3675 * @act: new sigaction
3676 * @oact: used to save the previous sigaction
3677 * @sigsetsize: size of sigset_t type
3679 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3680 const struct sigaction __user *, act,
3681 struct sigaction __user *, oact,
3684 struct k_sigaction new_sa, old_sa;
3687 /* XXX: Don't preclude handling different sized sigset_t's. */
3688 if (sigsetsize != sizeof(sigset_t))
3692 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3696 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3699 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3705 #ifdef CONFIG_COMPAT
3706 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3707 const struct compat_sigaction __user *, act,
3708 struct compat_sigaction __user *, oact,
3709 compat_size_t, sigsetsize)
3711 struct k_sigaction new_ka, old_ka;
3712 #ifdef __ARCH_HAS_SA_RESTORER
3713 compat_uptr_t restorer;
3717 /* XXX: Don't preclude handling different sized sigset_t's. */
3718 if (sigsetsize != sizeof(compat_sigset_t))
3722 compat_uptr_t handler;
3723 ret = get_user(handler, &act->sa_handler);
3724 new_ka.sa.sa_handler = compat_ptr(handler);
3725 #ifdef __ARCH_HAS_SA_RESTORER
3726 ret |= get_user(restorer, &act->sa_restorer);
3727 new_ka.sa.sa_restorer = compat_ptr(restorer);
3729 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3730 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3735 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3737 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3739 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3740 sizeof(oact->sa_mask));
3741 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3742 #ifdef __ARCH_HAS_SA_RESTORER
3743 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3744 &oact->sa_restorer);
3750 #endif /* !CONFIG_ODD_RT_SIGACTION */
3752 #ifdef CONFIG_OLD_SIGACTION
3753 SYSCALL_DEFINE3(sigaction, int, sig,
3754 const struct old_sigaction __user *, act,
3755 struct old_sigaction __user *, oact)
3757 struct k_sigaction new_ka, old_ka;
3762 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3763 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3764 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3765 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3766 __get_user(mask, &act->sa_mask))
3768 #ifdef __ARCH_HAS_KA_RESTORER
3769 new_ka.ka_restorer = NULL;
3771 siginitset(&new_ka.sa.sa_mask, mask);
3774 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3777 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3778 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3779 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3780 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3781 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3788 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3789 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3790 const struct compat_old_sigaction __user *, act,
3791 struct compat_old_sigaction __user *, oact)
3793 struct k_sigaction new_ka, old_ka;
3795 compat_old_sigset_t mask;
3796 compat_uptr_t handler, restorer;
3799 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3800 __get_user(handler, &act->sa_handler) ||
3801 __get_user(restorer, &act->sa_restorer) ||
3802 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3803 __get_user(mask, &act->sa_mask))
3806 #ifdef __ARCH_HAS_KA_RESTORER
3807 new_ka.ka_restorer = NULL;
3809 new_ka.sa.sa_handler = compat_ptr(handler);
3810 new_ka.sa.sa_restorer = compat_ptr(restorer);
3811 siginitset(&new_ka.sa.sa_mask, mask);
3814 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3817 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3818 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3819 &oact->sa_handler) ||
3820 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3821 &oact->sa_restorer) ||
3822 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3823 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3830 #ifdef CONFIG_SGETMASK_SYSCALL
3833 * For backwards compatibility. Functionality superseded by sigprocmask.
3835 SYSCALL_DEFINE0(sgetmask)
3838 return current->blocked.sig[0];
3841 SYSCALL_DEFINE1(ssetmask, int, newmask)
3843 int old = current->blocked.sig[0];
3846 siginitset(&newset, newmask);
3847 set_current_blocked(&newset);
3851 #endif /* CONFIG_SGETMASK_SYSCALL */
3853 #ifdef __ARCH_WANT_SYS_SIGNAL
3855 * For backwards compatibility. Functionality superseded by sigaction.
3857 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3859 struct k_sigaction new_sa, old_sa;
3862 new_sa.sa.sa_handler = handler;
3863 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3864 sigemptyset(&new_sa.sa.sa_mask);
3866 ret = do_sigaction(sig, &new_sa, &old_sa);
3868 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3870 #endif /* __ARCH_WANT_SYS_SIGNAL */
3872 #ifdef __ARCH_WANT_SYS_PAUSE
3874 SYSCALL_DEFINE0(pause)
3876 while (!signal_pending(current)) {
3877 __set_current_state(TASK_INTERRUPTIBLE);
3880 return -ERESTARTNOHAND;
3885 static int sigsuspend(sigset_t *set)
3887 current->saved_sigmask = current->blocked;
3888 set_current_blocked(set);
3890 while (!signal_pending(current)) {
3891 __set_current_state(TASK_INTERRUPTIBLE);
3894 set_restore_sigmask();
3895 return -ERESTARTNOHAND;
3899 * sys_rt_sigsuspend - replace the signal mask for a value with the
3900 * @unewset value until a signal is received
3901 * @unewset: new signal mask value
3902 * @sigsetsize: size of sigset_t type
3904 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3908 /* XXX: Don't preclude handling different sized sigset_t's. */
3909 if (sigsetsize != sizeof(sigset_t))
3912 if (copy_from_user(&newset, unewset, sizeof(newset)))
3914 return sigsuspend(&newset);
3917 #ifdef CONFIG_COMPAT
3918 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3922 /* XXX: Don't preclude handling different sized sigset_t's. */
3923 if (sigsetsize != sizeof(sigset_t))
3926 if (get_compat_sigset(&newset, unewset))
3928 return sigsuspend(&newset);
3932 #ifdef CONFIG_OLD_SIGSUSPEND
3933 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3936 siginitset(&blocked, mask);
3937 return sigsuspend(&blocked);
3940 #ifdef CONFIG_OLD_SIGSUSPEND3
3941 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3944 siginitset(&blocked, mask);
3945 return sigsuspend(&blocked);
3949 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3954 void __init signals_init(void)
3956 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3957 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3958 != offsetof(struct siginfo, _sifields._pad));
3959 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
3961 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3964 #ifdef CONFIG_KGDB_KDB
3965 #include <linux/kdb.h>
3967 * kdb_send_sig - Allows kdb to send signals without exposing
3968 * signal internals. This function checks if the required locks are
3969 * available before calling the main signal code, to avoid kdb
3972 void kdb_send_sig(struct task_struct *t, int sig)
3974 static struct task_struct *kdb_prev_t;
3976 if (!spin_trylock(&t->sighand->siglock)) {
3977 kdb_printf("Can't do kill command now.\n"
3978 "The sigmask lock is held somewhere else in "
3979 "kernel, try again later\n");
3982 new_t = kdb_prev_t != t;
3984 if (t->state != TASK_RUNNING && new_t) {
3985 spin_unlock(&t->sighand->siglock);
3986 kdb_printf("Process is not RUNNING, sending a signal from "
3987 "kdb risks deadlock\n"
3988 "on the run queue locks. "
3989 "The signal has _not_ been sent.\n"
3990 "Reissue the kill command if you want to risk "
3994 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
3995 spin_unlock(&t->sighand->siglock);
3997 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4000 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4002 #endif /* CONFIG_KGDB_KDB */