2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/coredump.h>
23 #include <linux/security.h>
24 #include <linux/syscalls.h>
25 #include <linux/ptrace.h>
26 #include <linux/signal.h>
27 #include <linux/signalfd.h>
28 #include <linux/ratelimit.h>
29 #include <linux/tracehook.h>
30 #include <linux/capability.h>
31 #include <linux/freezer.h>
32 #include <linux/pid_namespace.h>
33 #include <linux/nsproxy.h>
34 #include <linux/user_namespace.h>
35 #include <linux/uprobes.h>
36 #include <linux/compat.h>
37 #include <linux/cn_proc.h>
38 #include <linux/compiler.h>
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/signal.h>
43 #include <asm/param.h>
44 #include <linux/uaccess.h>
45 #include <asm/unistd.h>
46 #include <asm/siginfo.h>
47 #include <asm/cacheflush.h>
48 #include "audit.h" /* audit_signal_info() */
51 * SLAB caches for signal bits.
54 static struct kmem_cache *sigqueue_cachep;
56 int print_fatal_signals __read_mostly;
58 static void __user *sig_handler(struct task_struct *t, int sig)
60 return t->sighand->action[sig - 1].sa.sa_handler;
63 static int sig_handler_ignored(void __user *handler, int sig)
65 /* Is it explicitly or implicitly ignored? */
66 return handler == SIG_IGN ||
67 (handler == SIG_DFL && sig_kernel_ignore(sig));
70 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
74 handler = sig_handler(t, sig);
76 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
77 handler == SIG_DFL && !force)
80 return sig_handler_ignored(handler, sig);
83 static int sig_ignored(struct task_struct *t, int sig, bool force)
86 * Blocked signals are never ignored, since the
87 * signal handler may change by the time it is
90 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
93 if (!sig_task_ignored(t, sig, force))
97 * Tracers may want to know about even ignored signals.
103 * Re-calculate pending state from the set of locally pending
104 * signals, globally pending signals, and blocked signals.
106 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
111 switch (_NSIG_WORDS) {
113 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
114 ready |= signal->sig[i] &~ blocked->sig[i];
117 case 4: ready = signal->sig[3] &~ blocked->sig[3];
118 ready |= signal->sig[2] &~ blocked->sig[2];
119 ready |= signal->sig[1] &~ blocked->sig[1];
120 ready |= signal->sig[0] &~ blocked->sig[0];
123 case 2: ready = signal->sig[1] &~ blocked->sig[1];
124 ready |= signal->sig[0] &~ blocked->sig[0];
127 case 1: ready = signal->sig[0] &~ blocked->sig[0];
132 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
134 static int recalc_sigpending_tsk(struct task_struct *t)
136 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
137 PENDING(&t->pending, &t->blocked) ||
138 PENDING(&t->signal->shared_pending, &t->blocked)) {
139 set_tsk_thread_flag(t, TIF_SIGPENDING);
143 * We must never clear the flag in another thread, or in current
144 * when it's possible the current syscall is returning -ERESTART*.
145 * So we don't clear it here, and only callers who know they should do.
151 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
152 * This is superfluous when called on current, the wakeup is a harmless no-op.
154 void recalc_sigpending_and_wake(struct task_struct *t)
156 if (recalc_sigpending_tsk(t))
157 signal_wake_up(t, 0);
160 void recalc_sigpending(void)
162 if (!recalc_sigpending_tsk(current) && !freezing(current))
163 clear_thread_flag(TIF_SIGPENDING);
167 /* Given the mask, find the first available signal that should be serviced. */
169 #define SYNCHRONOUS_MASK \
170 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
171 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
173 int next_signal(struct sigpending *pending, sigset_t *mask)
175 unsigned long i, *s, *m, x;
178 s = pending->signal.sig;
182 * Handle the first word specially: it contains the
183 * synchronous signals that need to be dequeued first.
187 if (x & SYNCHRONOUS_MASK)
188 x &= SYNCHRONOUS_MASK;
193 switch (_NSIG_WORDS) {
195 for (i = 1; i < _NSIG_WORDS; ++i) {
199 sig = ffz(~x) + i*_NSIG_BPW + 1;
208 sig = ffz(~x) + _NSIG_BPW + 1;
219 static inline void print_dropped_signal(int sig)
221 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
223 if (!print_fatal_signals)
226 if (!__ratelimit(&ratelimit_state))
229 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
230 current->comm, current->pid, sig);
234 * task_set_jobctl_pending - set jobctl pending bits
236 * @mask: pending bits to set
238 * Clear @mask from @task->jobctl. @mask must be subset of
239 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
240 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
241 * cleared. If @task is already being killed or exiting, this function
245 * Must be called with @task->sighand->siglock held.
248 * %true if @mask is set, %false if made noop because @task was dying.
250 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
252 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
253 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
254 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
256 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
259 if (mask & JOBCTL_STOP_SIGMASK)
260 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
262 task->jobctl |= mask;
267 * task_clear_jobctl_trapping - clear jobctl trapping bit
270 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
271 * Clear it and wake up the ptracer. Note that we don't need any further
272 * locking. @task->siglock guarantees that @task->parent points to the
276 * Must be called with @task->sighand->siglock held.
278 void task_clear_jobctl_trapping(struct task_struct *task)
280 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
281 task->jobctl &= ~JOBCTL_TRAPPING;
282 smp_mb(); /* advised by wake_up_bit() */
283 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
288 * task_clear_jobctl_pending - clear jobctl pending bits
290 * @mask: pending bits to clear
292 * Clear @mask from @task->jobctl. @mask must be subset of
293 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
294 * STOP bits are cleared together.
296 * If clearing of @mask leaves no stop or trap pending, this function calls
297 * task_clear_jobctl_trapping().
300 * Must be called with @task->sighand->siglock held.
302 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
304 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
306 if (mask & JOBCTL_STOP_PENDING)
307 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
309 task->jobctl &= ~mask;
311 if (!(task->jobctl & JOBCTL_PENDING_MASK))
312 task_clear_jobctl_trapping(task);
316 * task_participate_group_stop - participate in a group stop
317 * @task: task participating in a group stop
319 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
320 * Group stop states are cleared and the group stop count is consumed if
321 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
322 * stop, the appropriate %SIGNAL_* flags are set.
325 * Must be called with @task->sighand->siglock held.
328 * %true if group stop completion should be notified to the parent, %false
331 static bool task_participate_group_stop(struct task_struct *task)
333 struct signal_struct *sig = task->signal;
334 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
336 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
338 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
343 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
344 sig->group_stop_count--;
347 * Tell the caller to notify completion iff we are entering into a
348 * fresh group stop. Read comment in do_signal_stop() for details.
350 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
351 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
358 * allocate a new signal queue record
359 * - this may be called without locks if and only if t == current, otherwise an
360 * appropriate lock must be held to stop the target task from exiting
362 static struct sigqueue *
363 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
365 struct sigqueue *q = NULL;
366 struct user_struct *user;
369 * Protect access to @t credentials. This can go away when all
370 * callers hold rcu read lock.
373 user = get_uid(__task_cred(t)->user);
374 atomic_inc(&user->sigpending);
377 if (override_rlimit ||
378 atomic_read(&user->sigpending) <=
379 task_rlimit(t, RLIMIT_SIGPENDING)) {
380 q = kmem_cache_alloc(sigqueue_cachep, flags);
382 print_dropped_signal(sig);
385 if (unlikely(q == NULL)) {
386 atomic_dec(&user->sigpending);
389 INIT_LIST_HEAD(&q->list);
397 static void __sigqueue_free(struct sigqueue *q)
399 if (q->flags & SIGQUEUE_PREALLOC)
401 atomic_dec(&q->user->sigpending);
403 kmem_cache_free(sigqueue_cachep, q);
406 void flush_sigqueue(struct sigpending *queue)
410 sigemptyset(&queue->signal);
411 while (!list_empty(&queue->list)) {
412 q = list_entry(queue->list.next, struct sigqueue , list);
413 list_del_init(&q->list);
419 * Flush all pending signals for this kthread.
421 void flush_signals(struct task_struct *t)
425 spin_lock_irqsave(&t->sighand->siglock, flags);
426 clear_tsk_thread_flag(t, TIF_SIGPENDING);
427 flush_sigqueue(&t->pending);
428 flush_sigqueue(&t->signal->shared_pending);
429 spin_unlock_irqrestore(&t->sighand->siglock, flags);
432 #ifdef CONFIG_POSIX_TIMERS
433 static void __flush_itimer_signals(struct sigpending *pending)
435 sigset_t signal, retain;
436 struct sigqueue *q, *n;
438 signal = pending->signal;
439 sigemptyset(&retain);
441 list_for_each_entry_safe(q, n, &pending->list, list) {
442 int sig = q->info.si_signo;
444 if (likely(q->info.si_code != SI_TIMER)) {
445 sigaddset(&retain, sig);
447 sigdelset(&signal, sig);
448 list_del_init(&q->list);
453 sigorsets(&pending->signal, &signal, &retain);
456 void flush_itimer_signals(void)
458 struct task_struct *tsk = current;
461 spin_lock_irqsave(&tsk->sighand->siglock, flags);
462 __flush_itimer_signals(&tsk->pending);
463 __flush_itimer_signals(&tsk->signal->shared_pending);
464 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
468 void ignore_signals(struct task_struct *t)
472 for (i = 0; i < _NSIG; ++i)
473 t->sighand->action[i].sa.sa_handler = SIG_IGN;
479 * Flush all handlers for a task.
483 flush_signal_handlers(struct task_struct *t, int force_default)
486 struct k_sigaction *ka = &t->sighand->action[0];
487 for (i = _NSIG ; i != 0 ; i--) {
488 if (force_default || ka->sa.sa_handler != SIG_IGN)
489 ka->sa.sa_handler = SIG_DFL;
491 #ifdef __ARCH_HAS_SA_RESTORER
492 ka->sa.sa_restorer = NULL;
494 sigemptyset(&ka->sa.sa_mask);
499 int unhandled_signal(struct task_struct *tsk, int sig)
501 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
502 if (is_global_init(tsk))
504 if (handler != SIG_IGN && handler != SIG_DFL)
506 /* if ptraced, let the tracer determine */
510 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
512 struct sigqueue *q, *first = NULL;
515 * Collect the siginfo appropriate to this signal. Check if
516 * there is another siginfo for the same signal.
518 list_for_each_entry(q, &list->list, list) {
519 if (q->info.si_signo == sig) {
526 sigdelset(&list->signal, sig);
530 list_del_init(&first->list);
531 copy_siginfo(info, &first->info);
532 __sigqueue_free(first);
535 * Ok, it wasn't in the queue. This must be
536 * a fast-pathed signal or we must have been
537 * out of queue space. So zero out the info.
539 info->si_signo = sig;
541 info->si_code = SI_USER;
547 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
550 int sig = next_signal(pending, mask);
553 collect_signal(sig, pending, info);
558 * Dequeue a signal and return the element to the caller, which is
559 * expected to free it.
561 * All callers have to hold the siglock.
563 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
567 /* We only dequeue private signals from ourselves, we don't let
568 * signalfd steal them
570 signr = __dequeue_signal(&tsk->pending, mask, info);
572 signr = __dequeue_signal(&tsk->signal->shared_pending,
574 #ifdef CONFIG_POSIX_TIMERS
578 * itimers are process shared and we restart periodic
579 * itimers in the signal delivery path to prevent DoS
580 * attacks in the high resolution timer case. This is
581 * compliant with the old way of self-restarting
582 * itimers, as the SIGALRM is a legacy signal and only
583 * queued once. Changing the restart behaviour to
584 * restart the timer in the signal dequeue path is
585 * reducing the timer noise on heavy loaded !highres
588 if (unlikely(signr == SIGALRM)) {
589 struct hrtimer *tmr = &tsk->signal->real_timer;
591 if (!hrtimer_is_queued(tmr) &&
592 tsk->signal->it_real_incr != 0) {
593 hrtimer_forward(tmr, tmr->base->get_time(),
594 tsk->signal->it_real_incr);
595 hrtimer_restart(tmr);
605 if (unlikely(sig_kernel_stop(signr))) {
607 * Set a marker that we have dequeued a stop signal. Our
608 * caller might release the siglock and then the pending
609 * stop signal it is about to process is no longer in the
610 * pending bitmasks, but must still be cleared by a SIGCONT
611 * (and overruled by a SIGKILL). So those cases clear this
612 * shared flag after we've set it. Note that this flag may
613 * remain set after the signal we return is ignored or
614 * handled. That doesn't matter because its only purpose
615 * is to alert stop-signal processing code when another
616 * processor has come along and cleared the flag.
618 current->jobctl |= JOBCTL_STOP_DEQUEUED;
620 #ifdef CONFIG_POSIX_TIMERS
621 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
623 * Release the siglock to ensure proper locking order
624 * of timer locks outside of siglocks. Note, we leave
625 * irqs disabled here, since the posix-timers code is
626 * about to disable them again anyway.
628 spin_unlock(&tsk->sighand->siglock);
629 do_schedule_next_timer(info);
630 spin_lock(&tsk->sighand->siglock);
637 * Tell a process that it has a new active signal..
639 * NOTE! we rely on the previous spin_lock to
640 * lock interrupts for us! We can only be called with
641 * "siglock" held, and the local interrupt must
642 * have been disabled when that got acquired!
644 * No need to set need_resched since signal event passing
645 * goes through ->blocked
647 void signal_wake_up_state(struct task_struct *t, unsigned int state)
649 set_tsk_thread_flag(t, TIF_SIGPENDING);
651 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
652 * case. We don't check t->state here because there is a race with it
653 * executing another processor and just now entering stopped state.
654 * By using wake_up_state, we ensure the process will wake up and
655 * handle its death signal.
657 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
662 * Remove signals in mask from the pending set and queue.
663 * Returns 1 if any signals were found.
665 * All callers must be holding the siglock.
667 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
669 struct sigqueue *q, *n;
672 sigandsets(&m, mask, &s->signal);
673 if (sigisemptyset(&m))
676 sigandnsets(&s->signal, &s->signal, mask);
677 list_for_each_entry_safe(q, n, &s->list, list) {
678 if (sigismember(mask, q->info.si_signo)) {
679 list_del_init(&q->list);
686 static inline int is_si_special(const struct siginfo *info)
688 return info <= SEND_SIG_FORCED;
691 static inline bool si_fromuser(const struct siginfo *info)
693 return info == SEND_SIG_NOINFO ||
694 (!is_si_special(info) && SI_FROMUSER(info));
698 * called with RCU read lock from check_kill_permission()
700 static int kill_ok_by_cred(struct task_struct *t)
702 const struct cred *cred = current_cred();
703 const struct cred *tcred = __task_cred(t);
705 if (uid_eq(cred->euid, tcred->suid) ||
706 uid_eq(cred->euid, tcred->uid) ||
707 uid_eq(cred->uid, tcred->suid) ||
708 uid_eq(cred->uid, tcred->uid))
711 if (ns_capable(tcred->user_ns, CAP_KILL))
718 * Bad permissions for sending the signal
719 * - the caller must hold the RCU read lock
721 static int check_kill_permission(int sig, struct siginfo *info,
722 struct task_struct *t)
727 if (!valid_signal(sig))
730 if (!si_fromuser(info))
733 error = audit_signal_info(sig, t); /* Let audit system see the signal */
737 if (!same_thread_group(current, t) &&
738 !kill_ok_by_cred(t)) {
741 sid = task_session(t);
743 * We don't return the error if sid == NULL. The
744 * task was unhashed, the caller must notice this.
746 if (!sid || sid == task_session(current))
753 return security_task_kill(t, info, sig, 0);
757 * ptrace_trap_notify - schedule trap to notify ptracer
758 * @t: tracee wanting to notify tracer
760 * This function schedules sticky ptrace trap which is cleared on the next
761 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
764 * If @t is running, STOP trap will be taken. If trapped for STOP and
765 * ptracer is listening for events, tracee is woken up so that it can
766 * re-trap for the new event. If trapped otherwise, STOP trap will be
767 * eventually taken without returning to userland after the existing traps
768 * are finished by PTRACE_CONT.
771 * Must be called with @task->sighand->siglock held.
773 static void ptrace_trap_notify(struct task_struct *t)
775 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
776 assert_spin_locked(&t->sighand->siglock);
778 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
779 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
783 * Handle magic process-wide effects of stop/continue signals. Unlike
784 * the signal actions, these happen immediately at signal-generation
785 * time regardless of blocking, ignoring, or handling. This does the
786 * actual continuing for SIGCONT, but not the actual stopping for stop
787 * signals. The process stop is done as a signal action for SIG_DFL.
789 * Returns true if the signal should be actually delivered, otherwise
790 * it should be dropped.
792 static bool prepare_signal(int sig, struct task_struct *p, bool force)
794 struct signal_struct *signal = p->signal;
795 struct task_struct *t;
798 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
799 if (!(signal->flags & SIGNAL_GROUP_EXIT))
800 return sig == SIGKILL;
802 * The process is in the middle of dying, nothing to do.
804 } else if (sig_kernel_stop(sig)) {
806 * This is a stop signal. Remove SIGCONT from all queues.
808 siginitset(&flush, sigmask(SIGCONT));
809 flush_sigqueue_mask(&flush, &signal->shared_pending);
810 for_each_thread(p, t)
811 flush_sigqueue_mask(&flush, &t->pending);
812 } else if (sig == SIGCONT) {
815 * Remove all stop signals from all queues, wake all threads.
817 siginitset(&flush, SIG_KERNEL_STOP_MASK);
818 flush_sigqueue_mask(&flush, &signal->shared_pending);
819 for_each_thread(p, t) {
820 flush_sigqueue_mask(&flush, &t->pending);
821 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
822 if (likely(!(t->ptrace & PT_SEIZED)))
823 wake_up_state(t, __TASK_STOPPED);
825 ptrace_trap_notify(t);
829 * Notify the parent with CLD_CONTINUED if we were stopped.
831 * If we were in the middle of a group stop, we pretend it
832 * was already finished, and then continued. Since SIGCHLD
833 * doesn't queue we report only CLD_STOPPED, as if the next
834 * CLD_CONTINUED was dropped.
837 if (signal->flags & SIGNAL_STOP_STOPPED)
838 why |= SIGNAL_CLD_CONTINUED;
839 else if (signal->group_stop_count)
840 why |= SIGNAL_CLD_STOPPED;
844 * The first thread which returns from do_signal_stop()
845 * will take ->siglock, notice SIGNAL_CLD_MASK, and
846 * notify its parent. See get_signal_to_deliver().
848 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
849 signal->group_stop_count = 0;
850 signal->group_exit_code = 0;
854 return !sig_ignored(p, sig, force);
858 * Test if P wants to take SIG. After we've checked all threads with this,
859 * it's equivalent to finding no threads not blocking SIG. Any threads not
860 * blocking SIG were ruled out because they are not running and already
861 * have pending signals. Such threads will dequeue from the shared queue
862 * as soon as they're available, so putting the signal on the shared queue
863 * will be equivalent to sending it to one such thread.
865 static inline int wants_signal(int sig, struct task_struct *p)
867 if (sigismember(&p->blocked, sig))
869 if (p->flags & PF_EXITING)
873 if (task_is_stopped_or_traced(p))
875 return task_curr(p) || !signal_pending(p);
878 static void complete_signal(int sig, struct task_struct *p, int group)
880 struct signal_struct *signal = p->signal;
881 struct task_struct *t;
884 * Now find a thread we can wake up to take the signal off the queue.
886 * If the main thread wants the signal, it gets first crack.
887 * Probably the least surprising to the average bear.
889 if (wants_signal(sig, p))
891 else if (!group || thread_group_empty(p))
893 * There is just one thread and it does not need to be woken.
894 * It will dequeue unblocked signals before it runs again.
899 * Otherwise try to find a suitable thread.
901 t = signal->curr_target;
902 while (!wants_signal(sig, t)) {
904 if (t == signal->curr_target)
906 * No thread needs to be woken.
907 * Any eligible threads will see
908 * the signal in the queue soon.
912 signal->curr_target = t;
916 * Found a killable thread. If the signal will be fatal,
917 * then start taking the whole group down immediately.
919 if (sig_fatal(p, sig) &&
920 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
921 !sigismember(&t->real_blocked, sig) &&
922 (sig == SIGKILL || !t->ptrace)) {
924 * This signal will be fatal to the whole group.
926 if (!sig_kernel_coredump(sig)) {
928 * Start a group exit and wake everybody up.
929 * This way we don't have other threads
930 * running and doing things after a slower
931 * thread has the fatal signal pending.
933 signal->flags = SIGNAL_GROUP_EXIT;
934 signal->group_exit_code = sig;
935 signal->group_stop_count = 0;
938 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
939 sigaddset(&t->pending.signal, SIGKILL);
940 signal_wake_up(t, 1);
941 } while_each_thread(p, t);
947 * The signal is already in the shared-pending queue.
948 * Tell the chosen thread to wake up and dequeue it.
950 signal_wake_up(t, sig == SIGKILL);
954 static inline int legacy_queue(struct sigpending *signals, int sig)
956 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
959 #ifdef CONFIG_USER_NS
960 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
962 if (current_user_ns() == task_cred_xxx(t, user_ns))
965 if (SI_FROMKERNEL(info))
969 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
970 make_kuid(current_user_ns(), info->si_uid));
974 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
980 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
981 int group, int from_ancestor_ns)
983 struct sigpending *pending;
988 assert_spin_locked(&t->sighand->siglock);
990 result = TRACE_SIGNAL_IGNORED;
991 if (!prepare_signal(sig, t,
992 from_ancestor_ns || (info == SEND_SIG_FORCED)))
995 pending = group ? &t->signal->shared_pending : &t->pending;
997 * Short-circuit ignored signals and support queuing
998 * exactly one non-rt signal, so that we can get more
999 * detailed information about the cause of the signal.
1001 result = TRACE_SIGNAL_ALREADY_PENDING;
1002 if (legacy_queue(pending, sig))
1005 result = TRACE_SIGNAL_DELIVERED;
1007 * fast-pathed signals for kernel-internal things like SIGSTOP
1010 if (info == SEND_SIG_FORCED)
1014 * Real-time signals must be queued if sent by sigqueue, or
1015 * some other real-time mechanism. It is implementation
1016 * defined whether kill() does so. We attempt to do so, on
1017 * the principle of least surprise, but since kill is not
1018 * allowed to fail with EAGAIN when low on memory we just
1019 * make sure at least one signal gets delivered and don't
1020 * pass on the info struct.
1023 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1025 override_rlimit = 0;
1027 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1030 list_add_tail(&q->list, &pending->list);
1031 switch ((unsigned long) info) {
1032 case (unsigned long) SEND_SIG_NOINFO:
1033 q->info.si_signo = sig;
1034 q->info.si_errno = 0;
1035 q->info.si_code = SI_USER;
1036 q->info.si_pid = task_tgid_nr_ns(current,
1037 task_active_pid_ns(t));
1038 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1040 case (unsigned long) SEND_SIG_PRIV:
1041 q->info.si_signo = sig;
1042 q->info.si_errno = 0;
1043 q->info.si_code = SI_KERNEL;
1048 copy_siginfo(&q->info, info);
1049 if (from_ancestor_ns)
1054 userns_fixup_signal_uid(&q->info, t);
1056 } else if (!is_si_special(info)) {
1057 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1059 * Queue overflow, abort. We may abort if the
1060 * signal was rt and sent by user using something
1061 * other than kill().
1063 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1068 * This is a silent loss of information. We still
1069 * send the signal, but the *info bits are lost.
1071 result = TRACE_SIGNAL_LOSE_INFO;
1076 signalfd_notify(t, sig);
1077 sigaddset(&pending->signal, sig);
1078 complete_signal(sig, t, group);
1080 trace_signal_generate(sig, info, t, group, result);
1084 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1087 int from_ancestor_ns = 0;
1089 #ifdef CONFIG_PID_NS
1090 from_ancestor_ns = si_fromuser(info) &&
1091 !task_pid_nr_ns(current, task_active_pid_ns(t));
1094 return __send_signal(sig, info, t, group, from_ancestor_ns);
1097 static void print_fatal_signal(int signr)
1099 struct pt_regs *regs = signal_pt_regs();
1100 pr_info("potentially unexpected fatal signal %d.\n", signr);
1102 #if defined(__i386__) && !defined(__arch_um__)
1103 pr_info("code at %08lx: ", regs->ip);
1106 for (i = 0; i < 16; i++) {
1109 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1111 pr_cont("%02x ", insn);
1121 static int __init setup_print_fatal_signals(char *str)
1123 get_option (&str, &print_fatal_signals);
1128 __setup("print-fatal-signals=", setup_print_fatal_signals);
1131 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1133 return send_signal(sig, info, p, 1);
1137 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1139 return send_signal(sig, info, t, 0);
1142 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1145 unsigned long flags;
1148 if (lock_task_sighand(p, &flags)) {
1149 ret = send_signal(sig, info, p, group);
1150 unlock_task_sighand(p, &flags);
1157 * Force a signal that the process can't ignore: if necessary
1158 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1160 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1161 * since we do not want to have a signal handler that was blocked
1162 * be invoked when user space had explicitly blocked it.
1164 * We don't want to have recursive SIGSEGV's etc, for example,
1165 * that is why we also clear SIGNAL_UNKILLABLE.
1168 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1170 unsigned long int flags;
1171 int ret, blocked, ignored;
1172 struct k_sigaction *action;
1174 spin_lock_irqsave(&t->sighand->siglock, flags);
1175 action = &t->sighand->action[sig-1];
1176 ignored = action->sa.sa_handler == SIG_IGN;
1177 blocked = sigismember(&t->blocked, sig);
1178 if (blocked || ignored) {
1179 action->sa.sa_handler = SIG_DFL;
1181 sigdelset(&t->blocked, sig);
1182 recalc_sigpending_and_wake(t);
1185 if (action->sa.sa_handler == SIG_DFL)
1186 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1187 ret = specific_send_sig_info(sig, info, t);
1188 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1194 * Nuke all other threads in the group.
1196 int zap_other_threads(struct task_struct *p)
1198 struct task_struct *t = p;
1201 p->signal->group_stop_count = 0;
1203 while_each_thread(p, t) {
1204 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1207 /* Don't bother with already dead threads */
1210 sigaddset(&t->pending.signal, SIGKILL);
1211 signal_wake_up(t, 1);
1217 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1218 unsigned long *flags)
1220 struct sighand_struct *sighand;
1224 * Disable interrupts early to avoid deadlocks.
1225 * See rcu_read_unlock() comment header for details.
1227 local_irq_save(*flags);
1229 sighand = rcu_dereference(tsk->sighand);
1230 if (unlikely(sighand == NULL)) {
1232 local_irq_restore(*flags);
1236 * This sighand can be already freed and even reused, but
1237 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1238 * initializes ->siglock: this slab can't go away, it has
1239 * the same object type, ->siglock can't be reinitialized.
1241 * We need to ensure that tsk->sighand is still the same
1242 * after we take the lock, we can race with de_thread() or
1243 * __exit_signal(). In the latter case the next iteration
1244 * must see ->sighand == NULL.
1246 spin_lock(&sighand->siglock);
1247 if (likely(sighand == tsk->sighand)) {
1251 spin_unlock(&sighand->siglock);
1253 local_irq_restore(*flags);
1260 * send signal info to all the members of a group
1262 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1267 ret = check_kill_permission(sig, info, p);
1271 ret = do_send_sig_info(sig, info, p, true);
1277 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1278 * control characters do (^C, ^Z etc)
1279 * - the caller must hold at least a readlock on tasklist_lock
1281 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1283 struct task_struct *p = NULL;
1284 int retval, success;
1288 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1289 int err = group_send_sig_info(sig, info, p);
1292 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1293 return success ? 0 : retval;
1296 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1299 struct task_struct *p;
1303 p = pid_task(pid, PIDTYPE_PID);
1305 error = group_send_sig_info(sig, info, p);
1307 if (likely(!p || error != -ESRCH))
1311 * The task was unhashed in between, try again. If it
1312 * is dead, pid_task() will return NULL, if we race with
1313 * de_thread() it will find the new leader.
1318 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1322 error = kill_pid_info(sig, info, find_vpid(pid));
1327 static int kill_as_cred_perm(const struct cred *cred,
1328 struct task_struct *target)
1330 const struct cred *pcred = __task_cred(target);
1331 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1332 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1337 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1338 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1339 const struct cred *cred, u32 secid)
1342 struct task_struct *p;
1343 unsigned long flags;
1345 if (!valid_signal(sig))
1349 p = pid_task(pid, PIDTYPE_PID);
1354 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1358 ret = security_task_kill(p, info, sig, secid);
1363 if (lock_task_sighand(p, &flags)) {
1364 ret = __send_signal(sig, info, p, 1, 0);
1365 unlock_task_sighand(p, &flags);
1373 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1376 * kill_something_info() interprets pid in interesting ways just like kill(2).
1378 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1379 * is probably wrong. Should make it like BSD or SYSV.
1382 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1388 ret = kill_pid_info(sig, info, find_vpid(pid));
1393 read_lock(&tasklist_lock);
1395 ret = __kill_pgrp_info(sig, info,
1396 pid ? find_vpid(-pid) : task_pgrp(current));
1398 int retval = 0, count = 0;
1399 struct task_struct * p;
1401 for_each_process(p) {
1402 if (task_pid_vnr(p) > 1 &&
1403 !same_thread_group(p, current)) {
1404 int err = group_send_sig_info(sig, info, p);
1410 ret = count ? retval : -ESRCH;
1412 read_unlock(&tasklist_lock);
1418 * These are for backward compatibility with the rest of the kernel source.
1421 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1424 * Make sure legacy kernel users don't send in bad values
1425 * (normal paths check this in check_kill_permission).
1427 if (!valid_signal(sig))
1430 return do_send_sig_info(sig, info, p, false);
1433 #define __si_special(priv) \
1434 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1437 send_sig(int sig, struct task_struct *p, int priv)
1439 return send_sig_info(sig, __si_special(priv), p);
1443 force_sig(int sig, struct task_struct *p)
1445 force_sig_info(sig, SEND_SIG_PRIV, p);
1449 * When things go south during signal handling, we
1450 * will force a SIGSEGV. And if the signal that caused
1451 * the problem was already a SIGSEGV, we'll want to
1452 * make sure we don't even try to deliver the signal..
1455 force_sigsegv(int sig, struct task_struct *p)
1457 if (sig == SIGSEGV) {
1458 unsigned long flags;
1459 spin_lock_irqsave(&p->sighand->siglock, flags);
1460 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1461 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1463 force_sig(SIGSEGV, p);
1467 int kill_pgrp(struct pid *pid, int sig, int priv)
1471 read_lock(&tasklist_lock);
1472 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1473 read_unlock(&tasklist_lock);
1477 EXPORT_SYMBOL(kill_pgrp);
1479 int kill_pid(struct pid *pid, int sig, int priv)
1481 return kill_pid_info(sig, __si_special(priv), pid);
1483 EXPORT_SYMBOL(kill_pid);
1486 * These functions support sending signals using preallocated sigqueue
1487 * structures. This is needed "because realtime applications cannot
1488 * afford to lose notifications of asynchronous events, like timer
1489 * expirations or I/O completions". In the case of POSIX Timers
1490 * we allocate the sigqueue structure from the timer_create. If this
1491 * allocation fails we are able to report the failure to the application
1492 * with an EAGAIN error.
1494 struct sigqueue *sigqueue_alloc(void)
1496 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1499 q->flags |= SIGQUEUE_PREALLOC;
1504 void sigqueue_free(struct sigqueue *q)
1506 unsigned long flags;
1507 spinlock_t *lock = ¤t->sighand->siglock;
1509 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1511 * We must hold ->siglock while testing q->list
1512 * to serialize with collect_signal() or with
1513 * __exit_signal()->flush_sigqueue().
1515 spin_lock_irqsave(lock, flags);
1516 q->flags &= ~SIGQUEUE_PREALLOC;
1518 * If it is queued it will be freed when dequeued,
1519 * like the "regular" sigqueue.
1521 if (!list_empty(&q->list))
1523 spin_unlock_irqrestore(lock, flags);
1529 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1531 int sig = q->info.si_signo;
1532 struct sigpending *pending;
1533 unsigned long flags;
1536 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1539 if (!likely(lock_task_sighand(t, &flags)))
1542 ret = 1; /* the signal is ignored */
1543 result = TRACE_SIGNAL_IGNORED;
1544 if (!prepare_signal(sig, t, false))
1548 if (unlikely(!list_empty(&q->list))) {
1550 * If an SI_TIMER entry is already queue just increment
1551 * the overrun count.
1553 BUG_ON(q->info.si_code != SI_TIMER);
1554 q->info.si_overrun++;
1555 result = TRACE_SIGNAL_ALREADY_PENDING;
1558 q->info.si_overrun = 0;
1560 signalfd_notify(t, sig);
1561 pending = group ? &t->signal->shared_pending : &t->pending;
1562 list_add_tail(&q->list, &pending->list);
1563 sigaddset(&pending->signal, sig);
1564 complete_signal(sig, t, group);
1565 result = TRACE_SIGNAL_DELIVERED;
1567 trace_signal_generate(sig, &q->info, t, group, result);
1568 unlock_task_sighand(t, &flags);
1574 * Let a parent know about the death of a child.
1575 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1577 * Returns true if our parent ignored us and so we've switched to
1580 bool do_notify_parent(struct task_struct *tsk, int sig)
1582 struct siginfo info;
1583 unsigned long flags;
1584 struct sighand_struct *psig;
1585 bool autoreap = false;
1590 /* do_notify_parent_cldstop should have been called instead. */
1591 BUG_ON(task_is_stopped_or_traced(tsk));
1593 BUG_ON(!tsk->ptrace &&
1594 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1596 if (sig != SIGCHLD) {
1598 * This is only possible if parent == real_parent.
1599 * Check if it has changed security domain.
1601 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1605 info.si_signo = sig;
1608 * We are under tasklist_lock here so our parent is tied to
1609 * us and cannot change.
1611 * task_active_pid_ns will always return the same pid namespace
1612 * until a task passes through release_task.
1614 * write_lock() currently calls preempt_disable() which is the
1615 * same as rcu_read_lock(), but according to Oleg, this is not
1616 * correct to rely on this
1619 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1620 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1624 task_cputime(tsk, &utime, &stime);
1625 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1626 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1628 info.si_status = tsk->exit_code & 0x7f;
1629 if (tsk->exit_code & 0x80)
1630 info.si_code = CLD_DUMPED;
1631 else if (tsk->exit_code & 0x7f)
1632 info.si_code = CLD_KILLED;
1634 info.si_code = CLD_EXITED;
1635 info.si_status = tsk->exit_code >> 8;
1638 psig = tsk->parent->sighand;
1639 spin_lock_irqsave(&psig->siglock, flags);
1640 if (!tsk->ptrace && sig == SIGCHLD &&
1641 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1642 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1644 * We are exiting and our parent doesn't care. POSIX.1
1645 * defines special semantics for setting SIGCHLD to SIG_IGN
1646 * or setting the SA_NOCLDWAIT flag: we should be reaped
1647 * automatically and not left for our parent's wait4 call.
1648 * Rather than having the parent do it as a magic kind of
1649 * signal handler, we just set this to tell do_exit that we
1650 * can be cleaned up without becoming a zombie. Note that
1651 * we still call __wake_up_parent in this case, because a
1652 * blocked sys_wait4 might now return -ECHILD.
1654 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1655 * is implementation-defined: we do (if you don't want
1656 * it, just use SIG_IGN instead).
1659 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1662 if (valid_signal(sig) && sig)
1663 __group_send_sig_info(sig, &info, tsk->parent);
1664 __wake_up_parent(tsk, tsk->parent);
1665 spin_unlock_irqrestore(&psig->siglock, flags);
1671 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1672 * @tsk: task reporting the state change
1673 * @for_ptracer: the notification is for ptracer
1674 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1676 * Notify @tsk's parent that the stopped/continued state has changed. If
1677 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1678 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1681 * Must be called with tasklist_lock at least read locked.
1683 static void do_notify_parent_cldstop(struct task_struct *tsk,
1684 bool for_ptracer, int why)
1686 struct siginfo info;
1687 unsigned long flags;
1688 struct task_struct *parent;
1689 struct sighand_struct *sighand;
1693 parent = tsk->parent;
1695 tsk = tsk->group_leader;
1696 parent = tsk->real_parent;
1699 info.si_signo = SIGCHLD;
1702 * see comment in do_notify_parent() about the following 4 lines
1705 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1706 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1709 task_cputime(tsk, &utime, &stime);
1710 info.si_utime = nsec_to_clock_t(utime);
1711 info.si_stime = nsec_to_clock_t(stime);
1716 info.si_status = SIGCONT;
1719 info.si_status = tsk->signal->group_exit_code & 0x7f;
1722 info.si_status = tsk->exit_code & 0x7f;
1728 sighand = parent->sighand;
1729 spin_lock_irqsave(&sighand->siglock, flags);
1730 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1731 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1732 __group_send_sig_info(SIGCHLD, &info, parent);
1734 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1736 __wake_up_parent(tsk, parent);
1737 spin_unlock_irqrestore(&sighand->siglock, flags);
1740 static inline int may_ptrace_stop(void)
1742 if (!likely(current->ptrace))
1745 * Are we in the middle of do_coredump?
1746 * If so and our tracer is also part of the coredump stopping
1747 * is a deadlock situation, and pointless because our tracer
1748 * is dead so don't allow us to stop.
1749 * If SIGKILL was already sent before the caller unlocked
1750 * ->siglock we must see ->core_state != NULL. Otherwise it
1751 * is safe to enter schedule().
1753 * This is almost outdated, a task with the pending SIGKILL can't
1754 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1755 * after SIGKILL was already dequeued.
1757 if (unlikely(current->mm->core_state) &&
1758 unlikely(current->mm == current->parent->mm))
1765 * Return non-zero if there is a SIGKILL that should be waking us up.
1766 * Called with the siglock held.
1768 static int sigkill_pending(struct task_struct *tsk)
1770 return sigismember(&tsk->pending.signal, SIGKILL) ||
1771 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1775 * This must be called with current->sighand->siglock held.
1777 * This should be the path for all ptrace stops.
1778 * We always set current->last_siginfo while stopped here.
1779 * That makes it a way to test a stopped process for
1780 * being ptrace-stopped vs being job-control-stopped.
1782 * If we actually decide not to stop at all because the tracer
1783 * is gone, we keep current->exit_code unless clear_code.
1785 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1786 __releases(¤t->sighand->siglock)
1787 __acquires(¤t->sighand->siglock)
1789 bool gstop_done = false;
1791 if (arch_ptrace_stop_needed(exit_code, info)) {
1793 * The arch code has something special to do before a
1794 * ptrace stop. This is allowed to block, e.g. for faults
1795 * on user stack pages. We can't keep the siglock while
1796 * calling arch_ptrace_stop, so we must release it now.
1797 * To preserve proper semantics, we must do this before
1798 * any signal bookkeeping like checking group_stop_count.
1799 * Meanwhile, a SIGKILL could come in before we retake the
1800 * siglock. That must prevent us from sleeping in TASK_TRACED.
1801 * So after regaining the lock, we must check for SIGKILL.
1803 spin_unlock_irq(¤t->sighand->siglock);
1804 arch_ptrace_stop(exit_code, info);
1805 spin_lock_irq(¤t->sighand->siglock);
1806 if (sigkill_pending(current))
1811 * We're committing to trapping. TRACED should be visible before
1812 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1813 * Also, transition to TRACED and updates to ->jobctl should be
1814 * atomic with respect to siglock and should be done after the arch
1815 * hook as siglock is released and regrabbed across it.
1817 set_current_state(TASK_TRACED);
1819 current->last_siginfo = info;
1820 current->exit_code = exit_code;
1823 * If @why is CLD_STOPPED, we're trapping to participate in a group
1824 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1825 * across siglock relocks since INTERRUPT was scheduled, PENDING
1826 * could be clear now. We act as if SIGCONT is received after
1827 * TASK_TRACED is entered - ignore it.
1829 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1830 gstop_done = task_participate_group_stop(current);
1832 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1833 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1834 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1835 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1837 /* entering a trap, clear TRAPPING */
1838 task_clear_jobctl_trapping(current);
1840 spin_unlock_irq(¤t->sighand->siglock);
1841 read_lock(&tasklist_lock);
1842 if (may_ptrace_stop()) {
1844 * Notify parents of the stop.
1846 * While ptraced, there are two parents - the ptracer and
1847 * the real_parent of the group_leader. The ptracer should
1848 * know about every stop while the real parent is only
1849 * interested in the completion of group stop. The states
1850 * for the two don't interact with each other. Notify
1851 * separately unless they're gonna be duplicates.
1853 do_notify_parent_cldstop(current, true, why);
1854 if (gstop_done && ptrace_reparented(current))
1855 do_notify_parent_cldstop(current, false, why);
1858 * Don't want to allow preemption here, because
1859 * sys_ptrace() needs this task to be inactive.
1861 * XXX: implement read_unlock_no_resched().
1864 read_unlock(&tasklist_lock);
1865 preempt_enable_no_resched();
1866 freezable_schedule();
1869 * By the time we got the lock, our tracer went away.
1870 * Don't drop the lock yet, another tracer may come.
1872 * If @gstop_done, the ptracer went away between group stop
1873 * completion and here. During detach, it would have set
1874 * JOBCTL_STOP_PENDING on us and we'll re-enter
1875 * TASK_STOPPED in do_signal_stop() on return, so notifying
1876 * the real parent of the group stop completion is enough.
1879 do_notify_parent_cldstop(current, false, why);
1881 /* tasklist protects us from ptrace_freeze_traced() */
1882 __set_current_state(TASK_RUNNING);
1884 current->exit_code = 0;
1885 read_unlock(&tasklist_lock);
1889 * We are back. Now reacquire the siglock before touching
1890 * last_siginfo, so that we are sure to have synchronized with
1891 * any signal-sending on another CPU that wants to examine it.
1893 spin_lock_irq(¤t->sighand->siglock);
1894 current->last_siginfo = NULL;
1896 /* LISTENING can be set only during STOP traps, clear it */
1897 current->jobctl &= ~JOBCTL_LISTENING;
1900 * Queued signals ignored us while we were stopped for tracing.
1901 * So check for any that we should take before resuming user mode.
1902 * This sets TIF_SIGPENDING, but never clears it.
1904 recalc_sigpending_tsk(current);
1907 static void ptrace_do_notify(int signr, int exit_code, int why)
1911 memset(&info, 0, sizeof info);
1912 info.si_signo = signr;
1913 info.si_code = exit_code;
1914 info.si_pid = task_pid_vnr(current);
1915 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1917 /* Let the debugger run. */
1918 ptrace_stop(exit_code, why, 1, &info);
1921 void ptrace_notify(int exit_code)
1923 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1924 if (unlikely(current->task_works))
1927 spin_lock_irq(¤t->sighand->siglock);
1928 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1929 spin_unlock_irq(¤t->sighand->siglock);
1933 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1934 * @signr: signr causing group stop if initiating
1936 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1937 * and participate in it. If already set, participate in the existing
1938 * group stop. If participated in a group stop (and thus slept), %true is
1939 * returned with siglock released.
1941 * If ptraced, this function doesn't handle stop itself. Instead,
1942 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1943 * untouched. The caller must ensure that INTERRUPT trap handling takes
1944 * places afterwards.
1947 * Must be called with @current->sighand->siglock held, which is released
1951 * %false if group stop is already cancelled or ptrace trap is scheduled.
1952 * %true if participated in group stop.
1954 static bool do_signal_stop(int signr)
1955 __releases(¤t->sighand->siglock)
1957 struct signal_struct *sig = current->signal;
1959 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1960 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1961 struct task_struct *t;
1963 /* signr will be recorded in task->jobctl for retries */
1964 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1966 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1967 unlikely(signal_group_exit(sig)))
1970 * There is no group stop already in progress. We must
1973 * While ptraced, a task may be resumed while group stop is
1974 * still in effect and then receive a stop signal and
1975 * initiate another group stop. This deviates from the
1976 * usual behavior as two consecutive stop signals can't
1977 * cause two group stops when !ptraced. That is why we
1978 * also check !task_is_stopped(t) below.
1980 * The condition can be distinguished by testing whether
1981 * SIGNAL_STOP_STOPPED is already set. Don't generate
1982 * group_exit_code in such case.
1984 * This is not necessary for SIGNAL_STOP_CONTINUED because
1985 * an intervening stop signal is required to cause two
1986 * continued events regardless of ptrace.
1988 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1989 sig->group_exit_code = signr;
1991 sig->group_stop_count = 0;
1993 if (task_set_jobctl_pending(current, signr | gstop))
1994 sig->group_stop_count++;
1997 while_each_thread(current, t) {
1999 * Setting state to TASK_STOPPED for a group
2000 * stop is always done with the siglock held,
2001 * so this check has no races.
2003 if (!task_is_stopped(t) &&
2004 task_set_jobctl_pending(t, signr | gstop)) {
2005 sig->group_stop_count++;
2006 if (likely(!(t->ptrace & PT_SEIZED)))
2007 signal_wake_up(t, 0);
2009 ptrace_trap_notify(t);
2014 if (likely(!current->ptrace)) {
2018 * If there are no other threads in the group, or if there
2019 * is a group stop in progress and we are the last to stop,
2020 * report to the parent.
2022 if (task_participate_group_stop(current))
2023 notify = CLD_STOPPED;
2025 __set_current_state(TASK_STOPPED);
2026 spin_unlock_irq(¤t->sighand->siglock);
2029 * Notify the parent of the group stop completion. Because
2030 * we're not holding either the siglock or tasklist_lock
2031 * here, ptracer may attach inbetween; however, this is for
2032 * group stop and should always be delivered to the real
2033 * parent of the group leader. The new ptracer will get
2034 * its notification when this task transitions into
2038 read_lock(&tasklist_lock);
2039 do_notify_parent_cldstop(current, false, notify);
2040 read_unlock(&tasklist_lock);
2043 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2044 freezable_schedule();
2048 * While ptraced, group stop is handled by STOP trap.
2049 * Schedule it and let the caller deal with it.
2051 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2057 * do_jobctl_trap - take care of ptrace jobctl traps
2059 * When PT_SEIZED, it's used for both group stop and explicit
2060 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2061 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2062 * the stop signal; otherwise, %SIGTRAP.
2064 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2065 * number as exit_code and no siginfo.
2068 * Must be called with @current->sighand->siglock held, which may be
2069 * released and re-acquired before returning with intervening sleep.
2071 static void do_jobctl_trap(void)
2073 struct signal_struct *signal = current->signal;
2074 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2076 if (current->ptrace & PT_SEIZED) {
2077 if (!signal->group_stop_count &&
2078 !(signal->flags & SIGNAL_STOP_STOPPED))
2080 WARN_ON_ONCE(!signr);
2081 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2084 WARN_ON_ONCE(!signr);
2085 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2086 current->exit_code = 0;
2090 static int ptrace_signal(int signr, siginfo_t *info)
2092 ptrace_signal_deliver();
2094 * We do not check sig_kernel_stop(signr) but set this marker
2095 * unconditionally because we do not know whether debugger will
2096 * change signr. This flag has no meaning unless we are going
2097 * to stop after return from ptrace_stop(). In this case it will
2098 * be checked in do_signal_stop(), we should only stop if it was
2099 * not cleared by SIGCONT while we were sleeping. See also the
2100 * comment in dequeue_signal().
2102 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2103 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2105 /* We're back. Did the debugger cancel the sig? */
2106 signr = current->exit_code;
2110 current->exit_code = 0;
2113 * Update the siginfo structure if the signal has
2114 * changed. If the debugger wanted something
2115 * specific in the siginfo structure then it should
2116 * have updated *info via PTRACE_SETSIGINFO.
2118 if (signr != info->si_signo) {
2119 info->si_signo = signr;
2121 info->si_code = SI_USER;
2123 info->si_pid = task_pid_vnr(current->parent);
2124 info->si_uid = from_kuid_munged(current_user_ns(),
2125 task_uid(current->parent));
2129 /* If the (new) signal is now blocked, requeue it. */
2130 if (sigismember(¤t->blocked, signr)) {
2131 specific_send_sig_info(signr, info, current);
2138 int get_signal(struct ksignal *ksig)
2140 struct sighand_struct *sighand = current->sighand;
2141 struct signal_struct *signal = current->signal;
2144 if (unlikely(current->task_works))
2147 if (unlikely(uprobe_deny_signal()))
2151 * Do this once, we can't return to user-mode if freezing() == T.
2152 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2153 * thus do not need another check after return.
2158 spin_lock_irq(&sighand->siglock);
2160 * Every stopped thread goes here after wakeup. Check to see if
2161 * we should notify the parent, prepare_signal(SIGCONT) encodes
2162 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2164 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2167 if (signal->flags & SIGNAL_CLD_CONTINUED)
2168 why = CLD_CONTINUED;
2172 signal->flags &= ~SIGNAL_CLD_MASK;
2174 spin_unlock_irq(&sighand->siglock);
2177 * Notify the parent that we're continuing. This event is
2178 * always per-process and doesn't make whole lot of sense
2179 * for ptracers, who shouldn't consume the state via
2180 * wait(2) either, but, for backward compatibility, notify
2181 * the ptracer of the group leader too unless it's gonna be
2184 read_lock(&tasklist_lock);
2185 do_notify_parent_cldstop(current, false, why);
2187 if (ptrace_reparented(current->group_leader))
2188 do_notify_parent_cldstop(current->group_leader,
2190 read_unlock(&tasklist_lock);
2196 struct k_sigaction *ka;
2198 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2202 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2204 spin_unlock_irq(&sighand->siglock);
2208 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2211 break; /* will return 0 */
2213 if (unlikely(current->ptrace) && signr != SIGKILL) {
2214 signr = ptrace_signal(signr, &ksig->info);
2219 ka = &sighand->action[signr-1];
2221 /* Trace actually delivered signals. */
2222 trace_signal_deliver(signr, &ksig->info, ka);
2224 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2226 if (ka->sa.sa_handler != SIG_DFL) {
2227 /* Run the handler. */
2230 if (ka->sa.sa_flags & SA_ONESHOT)
2231 ka->sa.sa_handler = SIG_DFL;
2233 break; /* will return non-zero "signr" value */
2237 * Now we are doing the default action for this signal.
2239 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2243 * Global init gets no signals it doesn't want.
2244 * Container-init gets no signals it doesn't want from same
2247 * Note that if global/container-init sees a sig_kernel_only()
2248 * signal here, the signal must have been generated internally
2249 * or must have come from an ancestor namespace. In either
2250 * case, the signal cannot be dropped.
2252 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2253 !sig_kernel_only(signr))
2256 if (sig_kernel_stop(signr)) {
2258 * The default action is to stop all threads in
2259 * the thread group. The job control signals
2260 * do nothing in an orphaned pgrp, but SIGSTOP
2261 * always works. Note that siglock needs to be
2262 * dropped during the call to is_orphaned_pgrp()
2263 * because of lock ordering with tasklist_lock.
2264 * This allows an intervening SIGCONT to be posted.
2265 * We need to check for that and bail out if necessary.
2267 if (signr != SIGSTOP) {
2268 spin_unlock_irq(&sighand->siglock);
2270 /* signals can be posted during this window */
2272 if (is_current_pgrp_orphaned())
2275 spin_lock_irq(&sighand->siglock);
2278 if (likely(do_signal_stop(ksig->info.si_signo))) {
2279 /* It released the siglock. */
2284 * We didn't actually stop, due to a race
2285 * with SIGCONT or something like that.
2290 spin_unlock_irq(&sighand->siglock);
2293 * Anything else is fatal, maybe with a core dump.
2295 current->flags |= PF_SIGNALED;
2297 if (sig_kernel_coredump(signr)) {
2298 if (print_fatal_signals)
2299 print_fatal_signal(ksig->info.si_signo);
2300 proc_coredump_connector(current);
2302 * If it was able to dump core, this kills all
2303 * other threads in the group and synchronizes with
2304 * their demise. If we lost the race with another
2305 * thread getting here, it set group_exit_code
2306 * first and our do_group_exit call below will use
2307 * that value and ignore the one we pass it.
2309 do_coredump(&ksig->info);
2313 * Death signals, no core dump.
2315 do_group_exit(ksig->info.si_signo);
2318 spin_unlock_irq(&sighand->siglock);
2321 return ksig->sig > 0;
2325 * signal_delivered -
2326 * @ksig: kernel signal struct
2327 * @stepping: nonzero if debugger single-step or block-step in use
2329 * This function should be called when a signal has successfully been
2330 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2331 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2332 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2334 static void signal_delivered(struct ksignal *ksig, int stepping)
2338 /* A signal was successfully delivered, and the
2339 saved sigmask was stored on the signal frame,
2340 and will be restored by sigreturn. So we can
2341 simply clear the restore sigmask flag. */
2342 clear_restore_sigmask();
2344 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2345 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2346 sigaddset(&blocked, ksig->sig);
2347 set_current_blocked(&blocked);
2348 tracehook_signal_handler(stepping);
2351 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2354 force_sigsegv(ksig->sig, current);
2356 signal_delivered(ksig, stepping);
2360 * It could be that complete_signal() picked us to notify about the
2361 * group-wide signal. Other threads should be notified now to take
2362 * the shared signals in @which since we will not.
2364 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2367 struct task_struct *t;
2369 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2370 if (sigisemptyset(&retarget))
2374 while_each_thread(tsk, t) {
2375 if (t->flags & PF_EXITING)
2378 if (!has_pending_signals(&retarget, &t->blocked))
2380 /* Remove the signals this thread can handle. */
2381 sigandsets(&retarget, &retarget, &t->blocked);
2383 if (!signal_pending(t))
2384 signal_wake_up(t, 0);
2386 if (sigisemptyset(&retarget))
2391 void exit_signals(struct task_struct *tsk)
2397 * @tsk is about to have PF_EXITING set - lock out users which
2398 * expect stable threadgroup.
2400 cgroup_threadgroup_change_begin(tsk);
2402 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2403 tsk->flags |= PF_EXITING;
2404 cgroup_threadgroup_change_end(tsk);
2408 spin_lock_irq(&tsk->sighand->siglock);
2410 * From now this task is not visible for group-wide signals,
2411 * see wants_signal(), do_signal_stop().
2413 tsk->flags |= PF_EXITING;
2415 cgroup_threadgroup_change_end(tsk);
2417 if (!signal_pending(tsk))
2420 unblocked = tsk->blocked;
2421 signotset(&unblocked);
2422 retarget_shared_pending(tsk, &unblocked);
2424 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2425 task_participate_group_stop(tsk))
2426 group_stop = CLD_STOPPED;
2428 spin_unlock_irq(&tsk->sighand->siglock);
2431 * If group stop has completed, deliver the notification. This
2432 * should always go to the real parent of the group leader.
2434 if (unlikely(group_stop)) {
2435 read_lock(&tasklist_lock);
2436 do_notify_parent_cldstop(tsk, false, group_stop);
2437 read_unlock(&tasklist_lock);
2441 EXPORT_SYMBOL(recalc_sigpending);
2442 EXPORT_SYMBOL_GPL(dequeue_signal);
2443 EXPORT_SYMBOL(flush_signals);
2444 EXPORT_SYMBOL(force_sig);
2445 EXPORT_SYMBOL(send_sig);
2446 EXPORT_SYMBOL(send_sig_info);
2447 EXPORT_SYMBOL(sigprocmask);
2450 * System call entry points.
2454 * sys_restart_syscall - restart a system call
2456 SYSCALL_DEFINE0(restart_syscall)
2458 struct restart_block *restart = ¤t->restart_block;
2459 return restart->fn(restart);
2462 long do_no_restart_syscall(struct restart_block *param)
2467 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2469 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2470 sigset_t newblocked;
2471 /* A set of now blocked but previously unblocked signals. */
2472 sigandnsets(&newblocked, newset, ¤t->blocked);
2473 retarget_shared_pending(tsk, &newblocked);
2475 tsk->blocked = *newset;
2476 recalc_sigpending();
2480 * set_current_blocked - change current->blocked mask
2483 * It is wrong to change ->blocked directly, this helper should be used
2484 * to ensure the process can't miss a shared signal we are going to block.
2486 void set_current_blocked(sigset_t *newset)
2488 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2489 __set_current_blocked(newset);
2492 void __set_current_blocked(const sigset_t *newset)
2494 struct task_struct *tsk = current;
2497 * In case the signal mask hasn't changed, there is nothing we need
2498 * to do. The current->blocked shouldn't be modified by other task.
2500 if (sigequalsets(&tsk->blocked, newset))
2503 spin_lock_irq(&tsk->sighand->siglock);
2504 __set_task_blocked(tsk, newset);
2505 spin_unlock_irq(&tsk->sighand->siglock);
2509 * This is also useful for kernel threads that want to temporarily
2510 * (or permanently) block certain signals.
2512 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2513 * interface happily blocks "unblockable" signals like SIGKILL
2516 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2518 struct task_struct *tsk = current;
2521 /* Lockless, only current can change ->blocked, never from irq */
2523 *oldset = tsk->blocked;
2527 sigorsets(&newset, &tsk->blocked, set);
2530 sigandnsets(&newset, &tsk->blocked, set);
2539 __set_current_blocked(&newset);
2544 * sys_rt_sigprocmask - change the list of currently blocked signals
2545 * @how: whether to add, remove, or set signals
2546 * @nset: stores pending signals
2547 * @oset: previous value of signal mask if non-null
2548 * @sigsetsize: size of sigset_t type
2550 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2551 sigset_t __user *, oset, size_t, sigsetsize)
2553 sigset_t old_set, new_set;
2556 /* XXX: Don't preclude handling different sized sigset_t's. */
2557 if (sigsetsize != sizeof(sigset_t))
2560 old_set = current->blocked;
2563 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2565 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2567 error = sigprocmask(how, &new_set, NULL);
2573 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2580 #ifdef CONFIG_COMPAT
2581 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2582 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2585 sigset_t old_set = current->blocked;
2587 /* XXX: Don't preclude handling different sized sigset_t's. */
2588 if (sigsetsize != sizeof(sigset_t))
2592 compat_sigset_t new32;
2595 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2598 sigset_from_compat(&new_set, &new32);
2599 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2601 error = sigprocmask(how, &new_set, NULL);
2606 compat_sigset_t old32;
2607 sigset_to_compat(&old32, &old_set);
2608 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2613 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2614 (sigset_t __user *)oset, sigsetsize);
2619 static int do_sigpending(void *set, unsigned long sigsetsize)
2621 if (sigsetsize > sizeof(sigset_t))
2624 spin_lock_irq(¤t->sighand->siglock);
2625 sigorsets(set, ¤t->pending.signal,
2626 ¤t->signal->shared_pending.signal);
2627 spin_unlock_irq(¤t->sighand->siglock);
2629 /* Outside the lock because only this thread touches it. */
2630 sigandsets(set, ¤t->blocked, set);
2635 * sys_rt_sigpending - examine a pending signal that has been raised
2637 * @uset: stores pending signals
2638 * @sigsetsize: size of sigset_t type or larger
2640 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2643 int err = do_sigpending(&set, sigsetsize);
2644 if (!err && copy_to_user(uset, &set, sigsetsize))
2649 #ifdef CONFIG_COMPAT
2650 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2651 compat_size_t, sigsetsize)
2655 int err = do_sigpending(&set, sigsetsize);
2657 compat_sigset_t set32;
2658 sigset_to_compat(&set32, &set);
2659 /* we can get here only if sigsetsize <= sizeof(set) */
2660 if (copy_to_user(uset, &set32, sigsetsize))
2665 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2670 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2672 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2676 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2678 if (from->si_code < 0)
2679 return __copy_to_user(to, from, sizeof(siginfo_t))
2682 * If you change siginfo_t structure, please be sure
2683 * this code is fixed accordingly.
2684 * Please remember to update the signalfd_copyinfo() function
2685 * inside fs/signalfd.c too, in case siginfo_t changes.
2686 * It should never copy any pad contained in the structure
2687 * to avoid security leaks, but must copy the generic
2688 * 3 ints plus the relevant union member.
2690 err = __put_user(from->si_signo, &to->si_signo);
2691 err |= __put_user(from->si_errno, &to->si_errno);
2692 err |= __put_user((short)from->si_code, &to->si_code);
2693 switch (from->si_code & __SI_MASK) {
2695 err |= __put_user(from->si_pid, &to->si_pid);
2696 err |= __put_user(from->si_uid, &to->si_uid);
2699 err |= __put_user(from->si_tid, &to->si_tid);
2700 err |= __put_user(from->si_overrun, &to->si_overrun);
2701 err |= __put_user(from->si_ptr, &to->si_ptr);
2704 err |= __put_user(from->si_band, &to->si_band);
2705 err |= __put_user(from->si_fd, &to->si_fd);
2708 err |= __put_user(from->si_addr, &to->si_addr);
2709 #ifdef __ARCH_SI_TRAPNO
2710 err |= __put_user(from->si_trapno, &to->si_trapno);
2712 #ifdef BUS_MCEERR_AO
2714 * Other callers might not initialize the si_lsb field,
2715 * so check explicitly for the right codes here.
2717 if (from->si_signo == SIGBUS &&
2718 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2719 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2722 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2723 err |= __put_user(from->si_lower, &to->si_lower);
2724 err |= __put_user(from->si_upper, &to->si_upper);
2728 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2729 err |= __put_user(from->si_pkey, &to->si_pkey);
2733 err |= __put_user(from->si_pid, &to->si_pid);
2734 err |= __put_user(from->si_uid, &to->si_uid);
2735 err |= __put_user(from->si_status, &to->si_status);
2736 err |= __put_user(from->si_utime, &to->si_utime);
2737 err |= __put_user(from->si_stime, &to->si_stime);
2739 case __SI_RT: /* This is not generated by the kernel as of now. */
2740 case __SI_MESGQ: /* But this is */
2741 err |= __put_user(from->si_pid, &to->si_pid);
2742 err |= __put_user(from->si_uid, &to->si_uid);
2743 err |= __put_user(from->si_ptr, &to->si_ptr);
2745 #ifdef __ARCH_SIGSYS
2747 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2748 err |= __put_user(from->si_syscall, &to->si_syscall);
2749 err |= __put_user(from->si_arch, &to->si_arch);
2752 default: /* this is just in case for now ... */
2753 err |= __put_user(from->si_pid, &to->si_pid);
2754 err |= __put_user(from->si_uid, &to->si_uid);
2763 * do_sigtimedwait - wait for queued signals specified in @which
2764 * @which: queued signals to wait for
2765 * @info: if non-null, the signal's siginfo is returned here
2766 * @ts: upper bound on process time suspension
2768 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2769 const struct timespec *ts)
2771 ktime_t *to = NULL, timeout = KTIME_MAX;
2772 struct task_struct *tsk = current;
2773 sigset_t mask = *which;
2777 if (!timespec_valid(ts))
2779 timeout = timespec_to_ktime(*ts);
2784 * Invert the set of allowed signals to get those we want to block.
2786 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2789 spin_lock_irq(&tsk->sighand->siglock);
2790 sig = dequeue_signal(tsk, &mask, info);
2791 if (!sig && timeout) {
2793 * None ready, temporarily unblock those we're interested
2794 * while we are sleeping in so that we'll be awakened when
2795 * they arrive. Unblocking is always fine, we can avoid
2796 * set_current_blocked().
2798 tsk->real_blocked = tsk->blocked;
2799 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2800 recalc_sigpending();
2801 spin_unlock_irq(&tsk->sighand->siglock);
2803 __set_current_state(TASK_INTERRUPTIBLE);
2804 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2806 spin_lock_irq(&tsk->sighand->siglock);
2807 __set_task_blocked(tsk, &tsk->real_blocked);
2808 sigemptyset(&tsk->real_blocked);
2809 sig = dequeue_signal(tsk, &mask, info);
2811 spin_unlock_irq(&tsk->sighand->siglock);
2815 return ret ? -EINTR : -EAGAIN;
2819 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2821 * @uthese: queued signals to wait for
2822 * @uinfo: if non-null, the signal's siginfo is returned here
2823 * @uts: upper bound on process time suspension
2824 * @sigsetsize: size of sigset_t type
2826 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2827 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2835 /* XXX: Don't preclude handling different sized sigset_t's. */
2836 if (sigsetsize != sizeof(sigset_t))
2839 if (copy_from_user(&these, uthese, sizeof(these)))
2843 if (copy_from_user(&ts, uts, sizeof(ts)))
2847 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2849 if (ret > 0 && uinfo) {
2850 if (copy_siginfo_to_user(uinfo, &info))
2858 * sys_kill - send a signal to a process
2859 * @pid: the PID of the process
2860 * @sig: signal to be sent
2862 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2864 struct siginfo info;
2866 info.si_signo = sig;
2868 info.si_code = SI_USER;
2869 info.si_pid = task_tgid_vnr(current);
2870 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2872 return kill_something_info(sig, &info, pid);
2876 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2878 struct task_struct *p;
2882 p = find_task_by_vpid(pid);
2883 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2884 error = check_kill_permission(sig, info, p);
2886 * The null signal is a permissions and process existence
2887 * probe. No signal is actually delivered.
2889 if (!error && sig) {
2890 error = do_send_sig_info(sig, info, p, false);
2892 * If lock_task_sighand() failed we pretend the task
2893 * dies after receiving the signal. The window is tiny,
2894 * and the signal is private anyway.
2896 if (unlikely(error == -ESRCH))
2905 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2907 struct siginfo info = {};
2909 info.si_signo = sig;
2911 info.si_code = SI_TKILL;
2912 info.si_pid = task_tgid_vnr(current);
2913 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2915 return do_send_specific(tgid, pid, sig, &info);
2919 * sys_tgkill - send signal to one specific thread
2920 * @tgid: the thread group ID of the thread
2921 * @pid: the PID of the thread
2922 * @sig: signal to be sent
2924 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2925 * exists but it's not belonging to the target process anymore. This
2926 * method solves the problem of threads exiting and PIDs getting reused.
2928 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2930 /* This is only valid for single tasks */
2931 if (pid <= 0 || tgid <= 0)
2934 return do_tkill(tgid, pid, sig);
2938 * sys_tkill - send signal to one specific task
2939 * @pid: the PID of the task
2940 * @sig: signal to be sent
2942 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2944 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2946 /* This is only valid for single tasks */
2950 return do_tkill(0, pid, sig);
2953 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2955 /* Not even root can pretend to send signals from the kernel.
2956 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2958 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2959 (task_pid_vnr(current) != pid))
2962 info->si_signo = sig;
2964 /* POSIX.1b doesn't mention process groups. */
2965 return kill_proc_info(sig, info, pid);
2969 * sys_rt_sigqueueinfo - send signal information to a signal
2970 * @pid: the PID of the thread
2971 * @sig: signal to be sent
2972 * @uinfo: signal info to be sent
2974 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2975 siginfo_t __user *, uinfo)
2978 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2980 return do_rt_sigqueueinfo(pid, sig, &info);
2983 #ifdef CONFIG_COMPAT
2984 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
2987 struct compat_siginfo __user *, uinfo)
2989 siginfo_t info = {};
2990 int ret = copy_siginfo_from_user32(&info, uinfo);
2993 return do_rt_sigqueueinfo(pid, sig, &info);
2997 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2999 /* This is only valid for single tasks */
3000 if (pid <= 0 || tgid <= 0)
3003 /* Not even root can pretend to send signals from the kernel.
3004 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3006 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3007 (task_pid_vnr(current) != pid))
3010 info->si_signo = sig;
3012 return do_send_specific(tgid, pid, sig, info);
3015 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3016 siginfo_t __user *, uinfo)
3020 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3023 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3026 #ifdef CONFIG_COMPAT
3027 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3031 struct compat_siginfo __user *, uinfo)
3033 siginfo_t info = {};
3035 if (copy_siginfo_from_user32(&info, uinfo))
3037 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3042 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3044 void kernel_sigaction(int sig, __sighandler_t action)
3046 spin_lock_irq(¤t->sighand->siglock);
3047 current->sighand->action[sig - 1].sa.sa_handler = action;
3048 if (action == SIG_IGN) {
3052 sigaddset(&mask, sig);
3054 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3055 flush_sigqueue_mask(&mask, ¤t->pending);
3056 recalc_sigpending();
3058 spin_unlock_irq(¤t->sighand->siglock);
3060 EXPORT_SYMBOL(kernel_sigaction);
3062 void __weak sigaction_compat_abi(struct k_sigaction *act,
3063 struct k_sigaction *oact)
3067 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3069 struct task_struct *p = current, *t;
3070 struct k_sigaction *k;
3073 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3076 k = &p->sighand->action[sig-1];
3078 spin_lock_irq(&p->sighand->siglock);
3082 sigaction_compat_abi(act, oact);
3085 sigdelsetmask(&act->sa.sa_mask,
3086 sigmask(SIGKILL) | sigmask(SIGSTOP));
3090 * "Setting a signal action to SIG_IGN for a signal that is
3091 * pending shall cause the pending signal to be discarded,
3092 * whether or not it is blocked."
3094 * "Setting a signal action to SIG_DFL for a signal that is
3095 * pending and whose default action is to ignore the signal
3096 * (for example, SIGCHLD), shall cause the pending signal to
3097 * be discarded, whether or not it is blocked"
3099 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3101 sigaddset(&mask, sig);
3102 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3103 for_each_thread(p, t)
3104 flush_sigqueue_mask(&mask, &t->pending);
3108 spin_unlock_irq(&p->sighand->siglock);
3113 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3118 oss.ss_sp = (void __user *) current->sas_ss_sp;
3119 oss.ss_size = current->sas_ss_size;
3120 oss.ss_flags = sas_ss_flags(sp) |
3121 (current->sas_ss_flags & SS_FLAG_BITS);
3130 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3132 error = __get_user(ss_sp, &uss->ss_sp) |
3133 __get_user(ss_flags, &uss->ss_flags) |
3134 __get_user(ss_size, &uss->ss_size);
3139 if (on_sig_stack(sp))
3142 ss_mode = ss_flags & ~SS_FLAG_BITS;
3144 if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3148 if (ss_mode == SS_DISABLE) {
3153 if (ss_size < MINSIGSTKSZ)
3157 current->sas_ss_sp = (unsigned long) ss_sp;
3158 current->sas_ss_size = ss_size;
3159 current->sas_ss_flags = ss_flags;
3165 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3167 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3168 __put_user(oss.ss_size, &uoss->ss_size) |
3169 __put_user(oss.ss_flags, &uoss->ss_flags);
3175 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3177 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3180 int restore_altstack(const stack_t __user *uss)
3182 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3183 /* squash all but EFAULT for now */
3184 return err == -EFAULT ? err : 0;
3187 int __save_altstack(stack_t __user *uss, unsigned long sp)
3189 struct task_struct *t = current;
3190 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3191 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3192 __put_user(t->sas_ss_size, &uss->ss_size);
3195 if (t->sas_ss_flags & SS_AUTODISARM)
3200 #ifdef CONFIG_COMPAT
3201 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3202 const compat_stack_t __user *, uss_ptr,
3203 compat_stack_t __user *, uoss_ptr)
3210 compat_stack_t uss32;
3212 memset(&uss, 0, sizeof(stack_t));
3213 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3215 uss.ss_sp = compat_ptr(uss32.ss_sp);
3216 uss.ss_flags = uss32.ss_flags;
3217 uss.ss_size = uss32.ss_size;
3221 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3222 (stack_t __force __user *) &uoss,
3223 compat_user_stack_pointer());
3225 if (ret >= 0 && uoss_ptr) {
3226 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3227 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3228 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3229 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3235 int compat_restore_altstack(const compat_stack_t __user *uss)
3237 int err = compat_sys_sigaltstack(uss, NULL);
3238 /* squash all but -EFAULT for now */
3239 return err == -EFAULT ? err : 0;
3242 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3245 struct task_struct *t = current;
3246 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3248 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3249 __put_user(t->sas_ss_size, &uss->ss_size);
3252 if (t->sas_ss_flags & SS_AUTODISARM)
3258 #ifdef __ARCH_WANT_SYS_SIGPENDING
3261 * sys_sigpending - examine pending signals
3262 * @set: where mask of pending signal is returned
3264 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3266 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3271 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3273 * sys_sigprocmask - examine and change blocked signals
3274 * @how: whether to add, remove, or set signals
3275 * @nset: signals to add or remove (if non-null)
3276 * @oset: previous value of signal mask if non-null
3278 * Some platforms have their own version with special arguments;
3279 * others support only sys_rt_sigprocmask.
3282 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3283 old_sigset_t __user *, oset)
3285 old_sigset_t old_set, new_set;
3286 sigset_t new_blocked;
3288 old_set = current->blocked.sig[0];
3291 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3294 new_blocked = current->blocked;
3298 sigaddsetmask(&new_blocked, new_set);
3301 sigdelsetmask(&new_blocked, new_set);
3304 new_blocked.sig[0] = new_set;
3310 set_current_blocked(&new_blocked);
3314 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3320 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3322 #ifndef CONFIG_ODD_RT_SIGACTION
3324 * sys_rt_sigaction - alter an action taken by a process
3325 * @sig: signal to be sent
3326 * @act: new sigaction
3327 * @oact: used to save the previous sigaction
3328 * @sigsetsize: size of sigset_t type
3330 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3331 const struct sigaction __user *, act,
3332 struct sigaction __user *, oact,
3335 struct k_sigaction new_sa, old_sa;
3338 /* XXX: Don't preclude handling different sized sigset_t's. */
3339 if (sigsetsize != sizeof(sigset_t))
3343 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3347 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3350 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3356 #ifdef CONFIG_COMPAT
3357 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3358 const struct compat_sigaction __user *, act,
3359 struct compat_sigaction __user *, oact,
3360 compat_size_t, sigsetsize)
3362 struct k_sigaction new_ka, old_ka;
3363 compat_sigset_t mask;
3364 #ifdef __ARCH_HAS_SA_RESTORER
3365 compat_uptr_t restorer;
3369 /* XXX: Don't preclude handling different sized sigset_t's. */
3370 if (sigsetsize != sizeof(compat_sigset_t))
3374 compat_uptr_t handler;
3375 ret = get_user(handler, &act->sa_handler);
3376 new_ka.sa.sa_handler = compat_ptr(handler);
3377 #ifdef __ARCH_HAS_SA_RESTORER
3378 ret |= get_user(restorer, &act->sa_restorer);
3379 new_ka.sa.sa_restorer = compat_ptr(restorer);
3381 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3382 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3385 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3388 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3390 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3391 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3393 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3394 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3395 #ifdef __ARCH_HAS_SA_RESTORER
3396 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3397 &oact->sa_restorer);
3403 #endif /* !CONFIG_ODD_RT_SIGACTION */
3405 #ifdef CONFIG_OLD_SIGACTION
3406 SYSCALL_DEFINE3(sigaction, int, sig,
3407 const struct old_sigaction __user *, act,
3408 struct old_sigaction __user *, oact)
3410 struct k_sigaction new_ka, old_ka;
3415 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3416 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3417 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3418 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3419 __get_user(mask, &act->sa_mask))
3421 #ifdef __ARCH_HAS_KA_RESTORER
3422 new_ka.ka_restorer = NULL;
3424 siginitset(&new_ka.sa.sa_mask, mask);
3427 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3430 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3431 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3432 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3433 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3434 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3441 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3442 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3443 const struct compat_old_sigaction __user *, act,
3444 struct compat_old_sigaction __user *, oact)
3446 struct k_sigaction new_ka, old_ka;
3448 compat_old_sigset_t mask;
3449 compat_uptr_t handler, restorer;
3452 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3453 __get_user(handler, &act->sa_handler) ||
3454 __get_user(restorer, &act->sa_restorer) ||
3455 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3456 __get_user(mask, &act->sa_mask))
3459 #ifdef __ARCH_HAS_KA_RESTORER
3460 new_ka.ka_restorer = NULL;
3462 new_ka.sa.sa_handler = compat_ptr(handler);
3463 new_ka.sa.sa_restorer = compat_ptr(restorer);
3464 siginitset(&new_ka.sa.sa_mask, mask);
3467 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3470 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3471 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3472 &oact->sa_handler) ||
3473 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3474 &oact->sa_restorer) ||
3475 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3476 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3483 #ifdef CONFIG_SGETMASK_SYSCALL
3486 * For backwards compatibility. Functionality superseded by sigprocmask.
3488 SYSCALL_DEFINE0(sgetmask)
3491 return current->blocked.sig[0];
3494 SYSCALL_DEFINE1(ssetmask, int, newmask)
3496 int old = current->blocked.sig[0];
3499 siginitset(&newset, newmask);
3500 set_current_blocked(&newset);
3504 #endif /* CONFIG_SGETMASK_SYSCALL */
3506 #ifdef __ARCH_WANT_SYS_SIGNAL
3508 * For backwards compatibility. Functionality superseded by sigaction.
3510 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3512 struct k_sigaction new_sa, old_sa;
3515 new_sa.sa.sa_handler = handler;
3516 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3517 sigemptyset(&new_sa.sa.sa_mask);
3519 ret = do_sigaction(sig, &new_sa, &old_sa);
3521 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3523 #endif /* __ARCH_WANT_SYS_SIGNAL */
3525 #ifdef __ARCH_WANT_SYS_PAUSE
3527 SYSCALL_DEFINE0(pause)
3529 while (!signal_pending(current)) {
3530 __set_current_state(TASK_INTERRUPTIBLE);
3533 return -ERESTARTNOHAND;
3538 static int sigsuspend(sigset_t *set)
3540 current->saved_sigmask = current->blocked;
3541 set_current_blocked(set);
3543 while (!signal_pending(current)) {
3544 __set_current_state(TASK_INTERRUPTIBLE);
3547 set_restore_sigmask();
3548 return -ERESTARTNOHAND;
3552 * sys_rt_sigsuspend - replace the signal mask for a value with the
3553 * @unewset value until a signal is received
3554 * @unewset: new signal mask value
3555 * @sigsetsize: size of sigset_t type
3557 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3561 /* XXX: Don't preclude handling different sized sigset_t's. */
3562 if (sigsetsize != sizeof(sigset_t))
3565 if (copy_from_user(&newset, unewset, sizeof(newset)))
3567 return sigsuspend(&newset);
3570 #ifdef CONFIG_COMPAT
3571 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3575 compat_sigset_t newset32;
3577 /* XXX: Don't preclude handling different sized sigset_t's. */
3578 if (sigsetsize != sizeof(sigset_t))
3581 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3583 sigset_from_compat(&newset, &newset32);
3584 return sigsuspend(&newset);
3586 /* on little-endian bitmaps don't care about granularity */
3587 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3592 #ifdef CONFIG_OLD_SIGSUSPEND
3593 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3596 siginitset(&blocked, mask);
3597 return sigsuspend(&blocked);
3600 #ifdef CONFIG_OLD_SIGSUSPEND3
3601 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3604 siginitset(&blocked, mask);
3605 return sigsuspend(&blocked);
3609 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3614 void __init signals_init(void)
3616 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3617 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3618 != offsetof(struct siginfo, _sifields._pad));
3620 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3623 #ifdef CONFIG_KGDB_KDB
3624 #include <linux/kdb.h>
3626 * kdb_send_sig_info - Allows kdb to send signals without exposing
3627 * signal internals. This function checks if the required locks are
3628 * available before calling the main signal code, to avoid kdb
3632 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3634 static struct task_struct *kdb_prev_t;
3636 if (!spin_trylock(&t->sighand->siglock)) {
3637 kdb_printf("Can't do kill command now.\n"
3638 "The sigmask lock is held somewhere else in "
3639 "kernel, try again later\n");
3642 spin_unlock(&t->sighand->siglock);
3643 new_t = kdb_prev_t != t;
3645 if (t->state != TASK_RUNNING && new_t) {
3646 kdb_printf("Process is not RUNNING, sending a signal from "
3647 "kdb risks deadlock\n"
3648 "on the run queue locks. "
3649 "The signal has _not_ been sent.\n"
3650 "Reissue the kill command if you want to risk "
3654 sig = info->si_signo;
3655 if (send_sig_info(sig, info, t))
3656 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3659 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3661 #endif /* CONFIG_KGDB_KDB */