2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h" /* audit_signal_info() */
41 * SLAB caches for signal bits.
44 static struct kmem_cache *sigqueue_cachep;
46 int print_fatal_signals __read_mostly;
48 static void __user *sig_handler(struct task_struct *t, int sig)
50 return t->sighand->action[sig - 1].sa.sa_handler;
53 static int sig_handler_ignored(void __user *handler, int sig)
55 /* Is it explicitly or implicitly ignored? */
56 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
60 static int sig_task_ignored(struct task_struct *t, int sig,
65 handler = sig_handler(t, sig);
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 handler == SIG_DFL && !from_ancestor_ns)
71 return sig_handler_ignored(handler, sig);
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
84 if (!sig_task_ignored(t, sig, from_ancestor_ns))
88 * Tracers may want to know about even ignored signals.
90 return !tracehook_consider_ignored_signal(t, sig);
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
102 switch (_NSIG_WORDS) {
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
125 static int recalc_sigpending_tsk(struct task_struct *t)
127 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
128 PENDING(&t->pending, &t->blocked) ||
129 PENDING(&t->signal->shared_pending, &t->blocked)) {
130 set_tsk_thread_flag(t, TIF_SIGPENDING);
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
145 void recalc_sigpending_and_wake(struct task_struct *t)
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
151 void recalc_sigpending(void)
153 if (!recalc_sigpending_tsk(current) && !freezing(current))
154 clear_thread_flag(TIF_SIGPENDING);
158 /* Given the mask, find the first available signal that should be serviced. */
160 #define SYNCHRONOUS_MASK \
161 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
162 sigmask(SIGTRAP) | sigmask(SIGFPE))
164 int next_signal(struct sigpending *pending, sigset_t *mask)
166 unsigned long i, *s, *m, x;
169 s = pending->signal.sig;
173 * Handle the first word specially: it contains the
174 * synchronous signals that need to be dequeued first.
178 if (x & SYNCHRONOUS_MASK)
179 x &= SYNCHRONOUS_MASK;
184 switch (_NSIG_WORDS) {
186 for (i = 1; i < _NSIG_WORDS; ++i) {
190 sig = ffz(~x) + i*_NSIG_BPW + 1;
199 sig = ffz(~x) + _NSIG_BPW + 1;
210 static inline void print_dropped_signal(int sig)
212 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
214 if (!print_fatal_signals)
217 if (!__ratelimit(&ratelimit_state))
220 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
221 current->comm, current->pid, sig);
225 * task_set_jobctl_pending - set jobctl pending bits
227 * @mask: pending bits to set
229 * Clear @mask from @task->jobctl. @mask must be subset of
230 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
231 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
232 * cleared. If @task is already being killed or exiting, this function
236 * Must be called with @task->sighand->siglock held.
239 * %true if @mask is set, %false if made noop because @task was dying.
241 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
243 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
244 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
245 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
247 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
250 if (mask & JOBCTL_STOP_SIGMASK)
251 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
253 task->jobctl |= mask;
258 * task_clear_jobctl_trapping - clear jobctl trapping bit
261 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
262 * Clear it and wake up the ptracer. Note that we don't need any further
263 * locking. @task->siglock guarantees that @task->parent points to the
267 * Must be called with @task->sighand->siglock held.
269 void task_clear_jobctl_trapping(struct task_struct *task)
271 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
272 task->jobctl &= ~JOBCTL_TRAPPING;
273 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
278 * task_clear_jobctl_pending - clear jobctl pending bits
280 * @mask: pending bits to clear
282 * Clear @mask from @task->jobctl. @mask must be subset of
283 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
284 * STOP bits are cleared together.
286 * If clearing of @mask leaves no stop or trap pending, this function calls
287 * task_clear_jobctl_trapping().
290 * Must be called with @task->sighand->siglock held.
292 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
294 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
296 if (mask & JOBCTL_STOP_PENDING)
297 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
299 task->jobctl &= ~mask;
301 if (!(task->jobctl & JOBCTL_PENDING_MASK))
302 task_clear_jobctl_trapping(task);
306 * task_participate_group_stop - participate in a group stop
307 * @task: task participating in a group stop
309 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
310 * Group stop states are cleared and the group stop count is consumed if
311 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
312 * stop, the appropriate %SIGNAL_* flags are set.
315 * Must be called with @task->sighand->siglock held.
318 * %true if group stop completion should be notified to the parent, %false
321 static bool task_participate_group_stop(struct task_struct *task)
323 struct signal_struct *sig = task->signal;
324 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
326 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
328 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
333 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
334 sig->group_stop_count--;
337 * Tell the caller to notify completion iff we are entering into a
338 * fresh group stop. Read comment in do_signal_stop() for details.
340 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
341 sig->flags = SIGNAL_STOP_STOPPED;
348 * allocate a new signal queue record
349 * - this may be called without locks if and only if t == current, otherwise an
350 * appropriate lock must be held to stop the target task from exiting
352 static struct sigqueue *
353 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
355 struct sigqueue *q = NULL;
356 struct user_struct *user;
359 * Protect access to @t credentials. This can go away when all
360 * callers hold rcu read lock.
363 user = get_uid(__task_cred(t)->user);
364 atomic_inc(&user->sigpending);
367 if (override_rlimit ||
368 atomic_read(&user->sigpending) <=
369 task_rlimit(t, RLIMIT_SIGPENDING)) {
370 q = kmem_cache_alloc(sigqueue_cachep, flags);
372 print_dropped_signal(sig);
375 if (unlikely(q == NULL)) {
376 atomic_dec(&user->sigpending);
379 INIT_LIST_HEAD(&q->list);
387 static void __sigqueue_free(struct sigqueue *q)
389 if (q->flags & SIGQUEUE_PREALLOC)
391 atomic_dec(&q->user->sigpending);
393 kmem_cache_free(sigqueue_cachep, q);
396 void flush_sigqueue(struct sigpending *queue)
400 sigemptyset(&queue->signal);
401 while (!list_empty(&queue->list)) {
402 q = list_entry(queue->list.next, struct sigqueue , list);
403 list_del_init(&q->list);
409 * Flush all pending signals for a task.
411 void __flush_signals(struct task_struct *t)
413 clear_tsk_thread_flag(t, TIF_SIGPENDING);
414 flush_sigqueue(&t->pending);
415 flush_sigqueue(&t->signal->shared_pending);
418 void flush_signals(struct task_struct *t)
422 spin_lock_irqsave(&t->sighand->siglock, flags);
424 spin_unlock_irqrestore(&t->sighand->siglock, flags);
427 static void __flush_itimer_signals(struct sigpending *pending)
429 sigset_t signal, retain;
430 struct sigqueue *q, *n;
432 signal = pending->signal;
433 sigemptyset(&retain);
435 list_for_each_entry_safe(q, n, &pending->list, list) {
436 int sig = q->info.si_signo;
438 if (likely(q->info.si_code != SI_TIMER)) {
439 sigaddset(&retain, sig);
441 sigdelset(&signal, sig);
442 list_del_init(&q->list);
447 sigorsets(&pending->signal, &signal, &retain);
450 void flush_itimer_signals(void)
452 struct task_struct *tsk = current;
455 spin_lock_irqsave(&tsk->sighand->siglock, flags);
456 __flush_itimer_signals(&tsk->pending);
457 __flush_itimer_signals(&tsk->signal->shared_pending);
458 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
461 void ignore_signals(struct task_struct *t)
465 for (i = 0; i < _NSIG; ++i)
466 t->sighand->action[i].sa.sa_handler = SIG_IGN;
472 * Flush all handlers for a task.
476 flush_signal_handlers(struct task_struct *t, int force_default)
479 struct k_sigaction *ka = &t->sighand->action[0];
480 for (i = _NSIG ; i != 0 ; i--) {
481 if (force_default || ka->sa.sa_handler != SIG_IGN)
482 ka->sa.sa_handler = SIG_DFL;
484 sigemptyset(&ka->sa.sa_mask);
489 int unhandled_signal(struct task_struct *tsk, int sig)
491 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
492 if (is_global_init(tsk))
494 if (handler != SIG_IGN && handler != SIG_DFL)
496 return !tracehook_consider_fatal_signal(tsk, sig);
500 * Notify the system that a driver wants to block all signals for this
501 * process, and wants to be notified if any signals at all were to be
502 * sent/acted upon. If the notifier routine returns non-zero, then the
503 * signal will be acted upon after all. If the notifier routine returns 0,
504 * then then signal will be blocked. Only one block per process is
505 * allowed. priv is a pointer to private data that the notifier routine
506 * can use to determine if the signal should be blocked or not.
509 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
513 spin_lock_irqsave(¤t->sighand->siglock, flags);
514 current->notifier_mask = mask;
515 current->notifier_data = priv;
516 current->notifier = notifier;
517 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
520 /* Notify the system that blocking has ended. */
523 unblock_all_signals(void)
527 spin_lock_irqsave(¤t->sighand->siglock, flags);
528 current->notifier = NULL;
529 current->notifier_data = NULL;
531 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
534 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
536 struct sigqueue *q, *first = NULL;
539 * Collect the siginfo appropriate to this signal. Check if
540 * there is another siginfo for the same signal.
542 list_for_each_entry(q, &list->list, list) {
543 if (q->info.si_signo == sig) {
550 sigdelset(&list->signal, sig);
554 list_del_init(&first->list);
555 copy_siginfo(info, &first->info);
556 __sigqueue_free(first);
559 * Ok, it wasn't in the queue. This must be
560 * a fast-pathed signal or we must have been
561 * out of queue space. So zero out the info.
563 info->si_signo = sig;
565 info->si_code = SI_USER;
571 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
574 int sig = next_signal(pending, mask);
577 if (current->notifier) {
578 if (sigismember(current->notifier_mask, sig)) {
579 if (!(current->notifier)(current->notifier_data)) {
580 clear_thread_flag(TIF_SIGPENDING);
586 collect_signal(sig, pending, info);
593 * Dequeue a signal and return the element to the caller, which is
594 * expected to free it.
596 * All callers have to hold the siglock.
598 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
602 /* We only dequeue private signals from ourselves, we don't let
603 * signalfd steal them
605 signr = __dequeue_signal(&tsk->pending, mask, info);
607 signr = __dequeue_signal(&tsk->signal->shared_pending,
612 * itimers are process shared and we restart periodic
613 * itimers in the signal delivery path to prevent DoS
614 * attacks in the high resolution timer case. This is
615 * compliant with the old way of self-restarting
616 * itimers, as the SIGALRM is a legacy signal and only
617 * queued once. Changing the restart behaviour to
618 * restart the timer in the signal dequeue path is
619 * reducing the timer noise on heavy loaded !highres
622 if (unlikely(signr == SIGALRM)) {
623 struct hrtimer *tmr = &tsk->signal->real_timer;
625 if (!hrtimer_is_queued(tmr) &&
626 tsk->signal->it_real_incr.tv64 != 0) {
627 hrtimer_forward(tmr, tmr->base->get_time(),
628 tsk->signal->it_real_incr);
629 hrtimer_restart(tmr);
638 if (unlikely(sig_kernel_stop(signr))) {
640 * Set a marker that we have dequeued a stop signal. Our
641 * caller might release the siglock and then the pending
642 * stop signal it is about to process is no longer in the
643 * pending bitmasks, but must still be cleared by a SIGCONT
644 * (and overruled by a SIGKILL). So those cases clear this
645 * shared flag after we've set it. Note that this flag may
646 * remain set after the signal we return is ignored or
647 * handled. That doesn't matter because its only purpose
648 * is to alert stop-signal processing code when another
649 * processor has come along and cleared the flag.
651 current->jobctl |= JOBCTL_STOP_DEQUEUED;
653 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
655 * Release the siglock to ensure proper locking order
656 * of timer locks outside of siglocks. Note, we leave
657 * irqs disabled here, since the posix-timers code is
658 * about to disable them again anyway.
660 spin_unlock(&tsk->sighand->siglock);
661 do_schedule_next_timer(info);
662 spin_lock(&tsk->sighand->siglock);
668 * Tell a process that it has a new active signal..
670 * NOTE! we rely on the previous spin_lock to
671 * lock interrupts for us! We can only be called with
672 * "siglock" held, and the local interrupt must
673 * have been disabled when that got acquired!
675 * No need to set need_resched since signal event passing
676 * goes through ->blocked
678 void signal_wake_up(struct task_struct *t, int resume)
682 set_tsk_thread_flag(t, TIF_SIGPENDING);
685 * For SIGKILL, we want to wake it up in the stopped/traced/killable
686 * case. We don't check t->state here because there is a race with it
687 * executing another processor and just now entering stopped state.
688 * By using wake_up_state, we ensure the process will wake up and
689 * handle its death signal.
691 mask = TASK_INTERRUPTIBLE;
693 mask |= TASK_WAKEKILL;
694 if (!wake_up_state(t, mask))
699 * Remove signals in mask from the pending set and queue.
700 * Returns 1 if any signals were found.
702 * All callers must be holding the siglock.
704 * This version takes a sigset mask and looks at all signals,
705 * not just those in the first mask word.
707 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
709 struct sigqueue *q, *n;
712 sigandsets(&m, mask, &s->signal);
713 if (sigisemptyset(&m))
716 sigandnsets(&s->signal, &s->signal, mask);
717 list_for_each_entry_safe(q, n, &s->list, list) {
718 if (sigismember(mask, q->info.si_signo)) {
719 list_del_init(&q->list);
726 * Remove signals in mask from the pending set and queue.
727 * Returns 1 if any signals were found.
729 * All callers must be holding the siglock.
731 static int rm_from_queue(unsigned long mask, struct sigpending *s)
733 struct sigqueue *q, *n;
735 if (!sigtestsetmask(&s->signal, mask))
738 sigdelsetmask(&s->signal, mask);
739 list_for_each_entry_safe(q, n, &s->list, list) {
740 if (q->info.si_signo < SIGRTMIN &&
741 (mask & sigmask(q->info.si_signo))) {
742 list_del_init(&q->list);
749 static inline int is_si_special(const struct siginfo *info)
751 return info <= SEND_SIG_FORCED;
754 static inline bool si_fromuser(const struct siginfo *info)
756 return info == SEND_SIG_NOINFO ||
757 (!is_si_special(info) && SI_FROMUSER(info));
761 * called with RCU read lock from check_kill_permission()
763 static int kill_ok_by_cred(struct task_struct *t)
765 const struct cred *cred = current_cred();
766 const struct cred *tcred = __task_cred(t);
768 if (cred->user->user_ns == tcred->user->user_ns &&
769 (cred->euid == tcred->suid ||
770 cred->euid == tcred->uid ||
771 cred->uid == tcred->suid ||
772 cred->uid == tcred->uid))
775 if (ns_capable(tcred->user->user_ns, CAP_KILL))
782 * Bad permissions for sending the signal
783 * - the caller must hold the RCU read lock
785 static int check_kill_permission(int sig, struct siginfo *info,
786 struct task_struct *t)
791 if (!valid_signal(sig))
794 if (!si_fromuser(info))
797 error = audit_signal_info(sig, t); /* Let audit system see the signal */
801 if (!same_thread_group(current, t) &&
802 !kill_ok_by_cred(t)) {
805 sid = task_session(t);
807 * We don't return the error if sid == NULL. The
808 * task was unhashed, the caller must notice this.
810 if (!sid || sid == task_session(current))
817 return security_task_kill(t, info, sig, 0);
821 * Handle magic process-wide effects of stop/continue signals. Unlike
822 * the signal actions, these happen immediately at signal-generation
823 * time regardless of blocking, ignoring, or handling. This does the
824 * actual continuing for SIGCONT, but not the actual stopping for stop
825 * signals. The process stop is done as a signal action for SIG_DFL.
827 * Returns true if the signal should be actually delivered, otherwise
828 * it should be dropped.
830 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
832 struct signal_struct *signal = p->signal;
833 struct task_struct *t;
835 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
837 * The process is in the middle of dying, nothing to do.
839 } else if (sig_kernel_stop(sig)) {
841 * This is a stop signal. Remove SIGCONT from all queues.
843 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
846 rm_from_queue(sigmask(SIGCONT), &t->pending);
847 } while_each_thread(p, t);
848 } else if (sig == SIGCONT) {
851 * Remove all stop signals from all queues, wake all threads.
853 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
856 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
857 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
858 wake_up_state(t, __TASK_STOPPED);
859 } while_each_thread(p, t);
862 * Notify the parent with CLD_CONTINUED if we were stopped.
864 * If we were in the middle of a group stop, we pretend it
865 * was already finished, and then continued. Since SIGCHLD
866 * doesn't queue we report only CLD_STOPPED, as if the next
867 * CLD_CONTINUED was dropped.
870 if (signal->flags & SIGNAL_STOP_STOPPED)
871 why |= SIGNAL_CLD_CONTINUED;
872 else if (signal->group_stop_count)
873 why |= SIGNAL_CLD_STOPPED;
877 * The first thread which returns from do_signal_stop()
878 * will take ->siglock, notice SIGNAL_CLD_MASK, and
879 * notify its parent. See get_signal_to_deliver().
881 signal->flags = why | SIGNAL_STOP_CONTINUED;
882 signal->group_stop_count = 0;
883 signal->group_exit_code = 0;
887 return !sig_ignored(p, sig, from_ancestor_ns);
891 * Test if P wants to take SIG. After we've checked all threads with this,
892 * it's equivalent to finding no threads not blocking SIG. Any threads not
893 * blocking SIG were ruled out because they are not running and already
894 * have pending signals. Such threads will dequeue from the shared queue
895 * as soon as they're available, so putting the signal on the shared queue
896 * will be equivalent to sending it to one such thread.
898 static inline int wants_signal(int sig, struct task_struct *p)
900 if (sigismember(&p->blocked, sig))
902 if (p->flags & PF_EXITING)
906 if (task_is_stopped_or_traced(p))
908 return task_curr(p) || !signal_pending(p);
911 static void complete_signal(int sig, struct task_struct *p, int group)
913 struct signal_struct *signal = p->signal;
914 struct task_struct *t;
917 * Now find a thread we can wake up to take the signal off the queue.
919 * If the main thread wants the signal, it gets first crack.
920 * Probably the least surprising to the average bear.
922 if (wants_signal(sig, p))
924 else if (!group || thread_group_empty(p))
926 * There is just one thread and it does not need to be woken.
927 * It will dequeue unblocked signals before it runs again.
932 * Otherwise try to find a suitable thread.
934 t = signal->curr_target;
935 while (!wants_signal(sig, t)) {
937 if (t == signal->curr_target)
939 * No thread needs to be woken.
940 * Any eligible threads will see
941 * the signal in the queue soon.
945 signal->curr_target = t;
949 * Found a killable thread. If the signal will be fatal,
950 * then start taking the whole group down immediately.
952 if (sig_fatal(p, sig) &&
953 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
954 !sigismember(&t->real_blocked, sig) &&
956 !tracehook_consider_fatal_signal(t, sig))) {
958 * This signal will be fatal to the whole group.
960 if (!sig_kernel_coredump(sig)) {
962 * Start a group exit and wake everybody up.
963 * This way we don't have other threads
964 * running and doing things after a slower
965 * thread has the fatal signal pending.
967 signal->flags = SIGNAL_GROUP_EXIT;
968 signal->group_exit_code = sig;
969 signal->group_stop_count = 0;
972 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
973 sigaddset(&t->pending.signal, SIGKILL);
974 signal_wake_up(t, 1);
975 } while_each_thread(p, t);
981 * The signal is already in the shared-pending queue.
982 * Tell the chosen thread to wake up and dequeue it.
984 signal_wake_up(t, sig == SIGKILL);
988 static inline int legacy_queue(struct sigpending *signals, int sig)
990 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
993 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
994 int group, int from_ancestor_ns)
996 struct sigpending *pending;
1000 trace_signal_generate(sig, info, t);
1002 assert_spin_locked(&t->sighand->siglock);
1004 if (!prepare_signal(sig, t, from_ancestor_ns))
1007 pending = group ? &t->signal->shared_pending : &t->pending;
1009 * Short-circuit ignored signals and support queuing
1010 * exactly one non-rt signal, so that we can get more
1011 * detailed information about the cause of the signal.
1013 if (legacy_queue(pending, sig))
1016 * fast-pathed signals for kernel-internal things like SIGSTOP
1019 if (info == SEND_SIG_FORCED)
1023 * Real-time signals must be queued if sent by sigqueue, or
1024 * some other real-time mechanism. It is implementation
1025 * defined whether kill() does so. We attempt to do so, on
1026 * the principle of least surprise, but since kill is not
1027 * allowed to fail with EAGAIN when low on memory we just
1028 * make sure at least one signal gets delivered and don't
1029 * pass on the info struct.
1032 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1034 override_rlimit = 0;
1036 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1039 list_add_tail(&q->list, &pending->list);
1040 switch ((unsigned long) info) {
1041 case (unsigned long) SEND_SIG_NOINFO:
1042 q->info.si_signo = sig;
1043 q->info.si_errno = 0;
1044 q->info.si_code = SI_USER;
1045 q->info.si_pid = task_tgid_nr_ns(current,
1046 task_active_pid_ns(t));
1047 q->info.si_uid = current_uid();
1049 case (unsigned long) SEND_SIG_PRIV:
1050 q->info.si_signo = sig;
1051 q->info.si_errno = 0;
1052 q->info.si_code = SI_KERNEL;
1057 copy_siginfo(&q->info, info);
1058 if (from_ancestor_ns)
1062 } else if (!is_si_special(info)) {
1063 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1065 * Queue overflow, abort. We may abort if the
1066 * signal was rt and sent by user using something
1067 * other than kill().
1069 trace_signal_overflow_fail(sig, group, info);
1073 * This is a silent loss of information. We still
1074 * send the signal, but the *info bits are lost.
1076 trace_signal_lose_info(sig, group, info);
1081 signalfd_notify(t, sig);
1082 sigaddset(&pending->signal, sig);
1083 complete_signal(sig, t, group);
1087 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1090 int from_ancestor_ns = 0;
1092 #ifdef CONFIG_PID_NS
1093 from_ancestor_ns = si_fromuser(info) &&
1094 !task_pid_nr_ns(current, task_active_pid_ns(t));
1097 return __send_signal(sig, info, t, group, from_ancestor_ns);
1100 static void print_fatal_signal(struct pt_regs *regs, int signr)
1102 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1103 current->comm, task_pid_nr(current), signr);
1105 #if defined(__i386__) && !defined(__arch_um__)
1106 printk("code at %08lx: ", regs->ip);
1109 for (i = 0; i < 16; i++) {
1112 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1114 printk("%02x ", insn);
1124 static int __init setup_print_fatal_signals(char *str)
1126 get_option (&str, &print_fatal_signals);
1131 __setup("print-fatal-signals=", setup_print_fatal_signals);
1134 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1136 return send_signal(sig, info, p, 1);
1140 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1142 return send_signal(sig, info, t, 0);
1145 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1148 unsigned long flags;
1151 if (lock_task_sighand(p, &flags)) {
1152 ret = send_signal(sig, info, p, group);
1153 unlock_task_sighand(p, &flags);
1160 * Force a signal that the process can't ignore: if necessary
1161 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1163 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1164 * since we do not want to have a signal handler that was blocked
1165 * be invoked when user space had explicitly blocked it.
1167 * We don't want to have recursive SIGSEGV's etc, for example,
1168 * that is why we also clear SIGNAL_UNKILLABLE.
1171 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1173 unsigned long int flags;
1174 int ret, blocked, ignored;
1175 struct k_sigaction *action;
1177 spin_lock_irqsave(&t->sighand->siglock, flags);
1178 action = &t->sighand->action[sig-1];
1179 ignored = action->sa.sa_handler == SIG_IGN;
1180 blocked = sigismember(&t->blocked, sig);
1181 if (blocked || ignored) {
1182 action->sa.sa_handler = SIG_DFL;
1184 sigdelset(&t->blocked, sig);
1185 recalc_sigpending_and_wake(t);
1188 if (action->sa.sa_handler == SIG_DFL)
1189 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1190 ret = specific_send_sig_info(sig, info, t);
1191 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1197 * Nuke all other threads in the group.
1199 int zap_other_threads(struct task_struct *p)
1201 struct task_struct *t = p;
1204 p->signal->group_stop_count = 0;
1206 while_each_thread(p, t) {
1207 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1210 /* Don't bother with already dead threads */
1213 sigaddset(&t->pending.signal, SIGKILL);
1214 signal_wake_up(t, 1);
1220 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1221 unsigned long *flags)
1223 struct sighand_struct *sighand;
1227 sighand = rcu_dereference(tsk->sighand);
1228 if (unlikely(sighand == NULL))
1231 spin_lock_irqsave(&sighand->siglock, *flags);
1232 if (likely(sighand == tsk->sighand))
1234 spin_unlock_irqrestore(&sighand->siglock, *flags);
1242 * send signal info to all the members of a group
1244 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1249 ret = check_kill_permission(sig, info, p);
1253 ret = do_send_sig_info(sig, info, p, true);
1259 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1260 * control characters do (^C, ^Z etc)
1261 * - the caller must hold at least a readlock on tasklist_lock
1263 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1265 struct task_struct *p = NULL;
1266 int retval, success;
1270 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1271 int err = group_send_sig_info(sig, info, p);
1274 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1275 return success ? 0 : retval;
1278 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1281 struct task_struct *p;
1285 p = pid_task(pid, PIDTYPE_PID);
1287 error = group_send_sig_info(sig, info, p);
1288 if (unlikely(error == -ESRCH))
1290 * The task was unhashed in between, try again.
1291 * If it is dead, pid_task() will return NULL,
1292 * if we race with de_thread() it will find the
1302 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1306 error = kill_pid_info(sig, info, find_vpid(pid));
1311 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1312 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1313 uid_t uid, uid_t euid, u32 secid)
1316 struct task_struct *p;
1317 const struct cred *pcred;
1318 unsigned long flags;
1320 if (!valid_signal(sig))
1324 p = pid_task(pid, PIDTYPE_PID);
1329 pcred = __task_cred(p);
1330 if (si_fromuser(info) &&
1331 euid != pcred->suid && euid != pcred->uid &&
1332 uid != pcred->suid && uid != pcred->uid) {
1336 ret = security_task_kill(p, info, sig, secid);
1341 if (lock_task_sighand(p, &flags)) {
1342 ret = __send_signal(sig, info, p, 1, 0);
1343 unlock_task_sighand(p, &flags);
1351 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1354 * kill_something_info() interprets pid in interesting ways just like kill(2).
1356 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1357 * is probably wrong. Should make it like BSD or SYSV.
1360 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1366 ret = kill_pid_info(sig, info, find_vpid(pid));
1371 read_lock(&tasklist_lock);
1373 ret = __kill_pgrp_info(sig, info,
1374 pid ? find_vpid(-pid) : task_pgrp(current));
1376 int retval = 0, count = 0;
1377 struct task_struct * p;
1379 for_each_process(p) {
1380 if (task_pid_vnr(p) > 1 &&
1381 !same_thread_group(p, current)) {
1382 int err = group_send_sig_info(sig, info, p);
1388 ret = count ? retval : -ESRCH;
1390 read_unlock(&tasklist_lock);
1396 * These are for backward compatibility with the rest of the kernel source.
1399 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1402 * Make sure legacy kernel users don't send in bad values
1403 * (normal paths check this in check_kill_permission).
1405 if (!valid_signal(sig))
1408 return do_send_sig_info(sig, info, p, false);
1411 #define __si_special(priv) \
1412 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1415 send_sig(int sig, struct task_struct *p, int priv)
1417 return send_sig_info(sig, __si_special(priv), p);
1421 force_sig(int sig, struct task_struct *p)
1423 force_sig_info(sig, SEND_SIG_PRIV, p);
1427 * When things go south during signal handling, we
1428 * will force a SIGSEGV. And if the signal that caused
1429 * the problem was already a SIGSEGV, we'll want to
1430 * make sure we don't even try to deliver the signal..
1433 force_sigsegv(int sig, struct task_struct *p)
1435 if (sig == SIGSEGV) {
1436 unsigned long flags;
1437 spin_lock_irqsave(&p->sighand->siglock, flags);
1438 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1439 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1441 force_sig(SIGSEGV, p);
1445 int kill_pgrp(struct pid *pid, int sig, int priv)
1449 read_lock(&tasklist_lock);
1450 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1451 read_unlock(&tasklist_lock);
1455 EXPORT_SYMBOL(kill_pgrp);
1457 int kill_pid(struct pid *pid, int sig, int priv)
1459 return kill_pid_info(sig, __si_special(priv), pid);
1461 EXPORT_SYMBOL(kill_pid);
1464 * These functions support sending signals using preallocated sigqueue
1465 * structures. This is needed "because realtime applications cannot
1466 * afford to lose notifications of asynchronous events, like timer
1467 * expirations or I/O completions". In the case of POSIX Timers
1468 * we allocate the sigqueue structure from the timer_create. If this
1469 * allocation fails we are able to report the failure to the application
1470 * with an EAGAIN error.
1472 struct sigqueue *sigqueue_alloc(void)
1474 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1477 q->flags |= SIGQUEUE_PREALLOC;
1482 void sigqueue_free(struct sigqueue *q)
1484 unsigned long flags;
1485 spinlock_t *lock = ¤t->sighand->siglock;
1487 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1489 * We must hold ->siglock while testing q->list
1490 * to serialize with collect_signal() or with
1491 * __exit_signal()->flush_sigqueue().
1493 spin_lock_irqsave(lock, flags);
1494 q->flags &= ~SIGQUEUE_PREALLOC;
1496 * If it is queued it will be freed when dequeued,
1497 * like the "regular" sigqueue.
1499 if (!list_empty(&q->list))
1501 spin_unlock_irqrestore(lock, flags);
1507 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1509 int sig = q->info.si_signo;
1510 struct sigpending *pending;
1511 unsigned long flags;
1514 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1517 if (!likely(lock_task_sighand(t, &flags)))
1520 ret = 1; /* the signal is ignored */
1521 if (!prepare_signal(sig, t, 0))
1525 if (unlikely(!list_empty(&q->list))) {
1527 * If an SI_TIMER entry is already queue just increment
1528 * the overrun count.
1530 BUG_ON(q->info.si_code != SI_TIMER);
1531 q->info.si_overrun++;
1534 q->info.si_overrun = 0;
1536 signalfd_notify(t, sig);
1537 pending = group ? &t->signal->shared_pending : &t->pending;
1538 list_add_tail(&q->list, &pending->list);
1539 sigaddset(&pending->signal, sig);
1540 complete_signal(sig, t, group);
1542 unlock_task_sighand(t, &flags);
1548 * Let a parent know about the death of a child.
1549 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1551 * Returns -1 if our parent ignored us and so we've switched to
1552 * self-reaping, or else @sig.
1554 int do_notify_parent(struct task_struct *tsk, int sig)
1556 struct siginfo info;
1557 unsigned long flags;
1558 struct sighand_struct *psig;
1563 /* do_notify_parent_cldstop should have been called instead. */
1564 BUG_ON(task_is_stopped_or_traced(tsk));
1566 BUG_ON(!task_ptrace(tsk) &&
1567 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1569 info.si_signo = sig;
1572 * we are under tasklist_lock here so our parent is tied to
1573 * us and cannot exit and release its namespace.
1575 * the only it can is to switch its nsproxy with sys_unshare,
1576 * bu uncharing pid namespaces is not allowed, so we'll always
1577 * see relevant namespace
1579 * write_lock() currently calls preempt_disable() which is the
1580 * same as rcu_read_lock(), but according to Oleg, this is not
1581 * correct to rely on this
1584 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1585 info.si_uid = __task_cred(tsk)->uid;
1588 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1589 tsk->signal->utime));
1590 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1591 tsk->signal->stime));
1593 info.si_status = tsk->exit_code & 0x7f;
1594 if (tsk->exit_code & 0x80)
1595 info.si_code = CLD_DUMPED;
1596 else if (tsk->exit_code & 0x7f)
1597 info.si_code = CLD_KILLED;
1599 info.si_code = CLD_EXITED;
1600 info.si_status = tsk->exit_code >> 8;
1603 psig = tsk->parent->sighand;
1604 spin_lock_irqsave(&psig->siglock, flags);
1605 if (!task_ptrace(tsk) && sig == SIGCHLD &&
1606 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1607 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1609 * We are exiting and our parent doesn't care. POSIX.1
1610 * defines special semantics for setting SIGCHLD to SIG_IGN
1611 * or setting the SA_NOCLDWAIT flag: we should be reaped
1612 * automatically and not left for our parent's wait4 call.
1613 * Rather than having the parent do it as a magic kind of
1614 * signal handler, we just set this to tell do_exit that we
1615 * can be cleaned up without becoming a zombie. Note that
1616 * we still call __wake_up_parent in this case, because a
1617 * blocked sys_wait4 might now return -ECHILD.
1619 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1620 * is implementation-defined: we do (if you don't want
1621 * it, just use SIG_IGN instead).
1623 ret = tsk->exit_signal = -1;
1624 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1627 if (valid_signal(sig) && sig > 0)
1628 __group_send_sig_info(sig, &info, tsk->parent);
1629 __wake_up_parent(tsk, tsk->parent);
1630 spin_unlock_irqrestore(&psig->siglock, flags);
1636 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1637 * @tsk: task reporting the state change
1638 * @for_ptracer: the notification is for ptracer
1639 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1641 * Notify @tsk's parent that the stopped/continued state has changed. If
1642 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1643 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1646 * Must be called with tasklist_lock at least read locked.
1648 static void do_notify_parent_cldstop(struct task_struct *tsk,
1649 bool for_ptracer, int why)
1651 struct siginfo info;
1652 unsigned long flags;
1653 struct task_struct *parent;
1654 struct sighand_struct *sighand;
1657 parent = tsk->parent;
1659 tsk = tsk->group_leader;
1660 parent = tsk->real_parent;
1663 info.si_signo = SIGCHLD;
1666 * see comment in do_notify_parent() about the following 4 lines
1669 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1670 info.si_uid = __task_cred(tsk)->uid;
1673 info.si_utime = cputime_to_clock_t(tsk->utime);
1674 info.si_stime = cputime_to_clock_t(tsk->stime);
1679 info.si_status = SIGCONT;
1682 info.si_status = tsk->signal->group_exit_code & 0x7f;
1685 info.si_status = tsk->exit_code & 0x7f;
1691 sighand = parent->sighand;
1692 spin_lock_irqsave(&sighand->siglock, flags);
1693 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1694 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1695 __group_send_sig_info(SIGCHLD, &info, parent);
1697 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1699 __wake_up_parent(tsk, parent);
1700 spin_unlock_irqrestore(&sighand->siglock, flags);
1703 static inline int may_ptrace_stop(void)
1705 if (!likely(task_ptrace(current)))
1708 * Are we in the middle of do_coredump?
1709 * If so and our tracer is also part of the coredump stopping
1710 * is a deadlock situation, and pointless because our tracer
1711 * is dead so don't allow us to stop.
1712 * If SIGKILL was already sent before the caller unlocked
1713 * ->siglock we must see ->core_state != NULL. Otherwise it
1714 * is safe to enter schedule().
1716 if (unlikely(current->mm->core_state) &&
1717 unlikely(current->mm == current->parent->mm))
1724 * Return non-zero if there is a SIGKILL that should be waking us up.
1725 * Called with the siglock held.
1727 static int sigkill_pending(struct task_struct *tsk)
1729 return sigismember(&tsk->pending.signal, SIGKILL) ||
1730 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1734 * Test whether the target task of the usual cldstop notification - the
1735 * real_parent of @child - is in the same group as the ptracer.
1737 static bool real_parent_is_ptracer(struct task_struct *child)
1739 return same_thread_group(child->parent, child->real_parent);
1743 * This must be called with current->sighand->siglock held.
1745 * This should be the path for all ptrace stops.
1746 * We always set current->last_siginfo while stopped here.
1747 * That makes it a way to test a stopped process for
1748 * being ptrace-stopped vs being job-control-stopped.
1750 * If we actually decide not to stop at all because the tracer
1751 * is gone, we keep current->exit_code unless clear_code.
1753 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1754 __releases(¤t->sighand->siglock)
1755 __acquires(¤t->sighand->siglock)
1757 bool gstop_done = false;
1759 if (arch_ptrace_stop_needed(exit_code, info)) {
1761 * The arch code has something special to do before a
1762 * ptrace stop. This is allowed to block, e.g. for faults
1763 * on user stack pages. We can't keep the siglock while
1764 * calling arch_ptrace_stop, so we must release it now.
1765 * To preserve proper semantics, we must do this before
1766 * any signal bookkeeping like checking group_stop_count.
1767 * Meanwhile, a SIGKILL could come in before we retake the
1768 * siglock. That must prevent us from sleeping in TASK_TRACED.
1769 * So after regaining the lock, we must check for SIGKILL.
1771 spin_unlock_irq(¤t->sighand->siglock);
1772 arch_ptrace_stop(exit_code, info);
1773 spin_lock_irq(¤t->sighand->siglock);
1774 if (sigkill_pending(current))
1779 * We're committing to trapping. TRACED should be visible before
1780 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1781 * Also, transition to TRACED and updates to ->jobctl should be
1782 * atomic with respect to siglock and should be done after the arch
1783 * hook as siglock is released and regrabbed across it.
1785 set_current_state(TASK_TRACED);
1787 current->last_siginfo = info;
1788 current->exit_code = exit_code;
1791 * If @why is CLD_STOPPED, we're trapping to participate in a group
1792 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1793 * across siglock relocks since INTERRUPT was scheduled, PENDING
1794 * could be clear now. We act as if SIGCONT is received after
1795 * TASK_TRACED is entered - ignore it.
1797 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1798 gstop_done = task_participate_group_stop(current);
1800 /* any trap clears pending STOP trap */
1801 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1803 /* entering a trap, clear TRAPPING */
1804 task_clear_jobctl_trapping(current);
1806 spin_unlock_irq(¤t->sighand->siglock);
1807 read_lock(&tasklist_lock);
1808 if (may_ptrace_stop()) {
1810 * Notify parents of the stop.
1812 * While ptraced, there are two parents - the ptracer and
1813 * the real_parent of the group_leader. The ptracer should
1814 * know about every stop while the real parent is only
1815 * interested in the completion of group stop. The states
1816 * for the two don't interact with each other. Notify
1817 * separately unless they're gonna be duplicates.
1819 do_notify_parent_cldstop(current, true, why);
1820 if (gstop_done && !real_parent_is_ptracer(current))
1821 do_notify_parent_cldstop(current, false, why);
1824 * Don't want to allow preemption here, because
1825 * sys_ptrace() needs this task to be inactive.
1827 * XXX: implement read_unlock_no_resched().
1830 read_unlock(&tasklist_lock);
1831 preempt_enable_no_resched();
1835 * By the time we got the lock, our tracer went away.
1836 * Don't drop the lock yet, another tracer may come.
1838 * If @gstop_done, the ptracer went away between group stop
1839 * completion and here. During detach, it would have set
1840 * JOBCTL_STOP_PENDING on us and we'll re-enter
1841 * TASK_STOPPED in do_signal_stop() on return, so notifying
1842 * the real parent of the group stop completion is enough.
1845 do_notify_parent_cldstop(current, false, why);
1847 __set_current_state(TASK_RUNNING);
1849 current->exit_code = 0;
1850 read_unlock(&tasklist_lock);
1854 * While in TASK_TRACED, we were considered "frozen enough".
1855 * Now that we woke up, it's crucial if we're supposed to be
1856 * frozen that we freeze now before running anything substantial.
1861 * We are back. Now reacquire the siglock before touching
1862 * last_siginfo, so that we are sure to have synchronized with
1863 * any signal-sending on another CPU that wants to examine it.
1865 spin_lock_irq(¤t->sighand->siglock);
1866 current->last_siginfo = NULL;
1869 * Queued signals ignored us while we were stopped for tracing.
1870 * So check for any that we should take before resuming user mode.
1871 * This sets TIF_SIGPENDING, but never clears it.
1873 recalc_sigpending_tsk(current);
1876 void ptrace_notify(int exit_code)
1880 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1882 memset(&info, 0, sizeof info);
1883 info.si_signo = SIGTRAP;
1884 info.si_code = exit_code;
1885 info.si_pid = task_pid_vnr(current);
1886 info.si_uid = current_uid();
1888 /* Let the debugger run. */
1889 spin_lock_irq(¤t->sighand->siglock);
1890 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1891 spin_unlock_irq(¤t->sighand->siglock);
1895 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1896 * @signr: signr causing group stop if initiating
1898 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1899 * and participate in it. If already set, participate in the existing
1900 * group stop. If participated in a group stop (and thus slept), %true is
1901 * returned with siglock released.
1903 * If ptraced, this function doesn't handle stop itself. Instead,
1904 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1905 * untouched. The caller must ensure that INTERRUPT trap handling takes
1906 * places afterwards.
1909 * Must be called with @current->sighand->siglock held, which is released
1913 * %false if group stop is already cancelled or ptrace trap is scheduled.
1914 * %true if participated in group stop.
1916 static bool do_signal_stop(int signr)
1917 __releases(¤t->sighand->siglock)
1919 struct signal_struct *sig = current->signal;
1921 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1922 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1923 struct task_struct *t;
1925 /* signr will be recorded in task->jobctl for retries */
1926 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1928 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1929 unlikely(signal_group_exit(sig)))
1932 * There is no group stop already in progress. We must
1935 * While ptraced, a task may be resumed while group stop is
1936 * still in effect and then receive a stop signal and
1937 * initiate another group stop. This deviates from the
1938 * usual behavior as two consecutive stop signals can't
1939 * cause two group stops when !ptraced. That is why we
1940 * also check !task_is_stopped(t) below.
1942 * The condition can be distinguished by testing whether
1943 * SIGNAL_STOP_STOPPED is already set. Don't generate
1944 * group_exit_code in such case.
1946 * This is not necessary for SIGNAL_STOP_CONTINUED because
1947 * an intervening stop signal is required to cause two
1948 * continued events regardless of ptrace.
1950 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1951 sig->group_exit_code = signr;
1953 WARN_ON_ONCE(!task_ptrace(current));
1955 sig->group_stop_count = 0;
1957 if (task_set_jobctl_pending(current, signr | gstop))
1958 sig->group_stop_count++;
1960 for (t = next_thread(current); t != current;
1961 t = next_thread(t)) {
1963 * Setting state to TASK_STOPPED for a group
1964 * stop is always done with the siglock held,
1965 * so this check has no races.
1967 if (!task_is_stopped(t) &&
1968 task_set_jobctl_pending(t, signr | gstop)) {
1969 sig->group_stop_count++;
1970 signal_wake_up(t, 0);
1975 if (likely(!task_ptrace(current))) {
1979 * If there are no other threads in the group, or if there
1980 * is a group stop in progress and we are the last to stop,
1981 * report to the parent.
1983 if (task_participate_group_stop(current))
1984 notify = CLD_STOPPED;
1986 __set_current_state(TASK_STOPPED);
1987 spin_unlock_irq(¤t->sighand->siglock);
1990 * Notify the parent of the group stop completion. Because
1991 * we're not holding either the siglock or tasklist_lock
1992 * here, ptracer may attach inbetween; however, this is for
1993 * group stop and should always be delivered to the real
1994 * parent of the group leader. The new ptracer will get
1995 * its notification when this task transitions into
1999 read_lock(&tasklist_lock);
2000 do_notify_parent_cldstop(current, false, notify);
2001 read_unlock(&tasklist_lock);
2004 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2009 * While ptraced, group stop is handled by STOP trap.
2010 * Schedule it and let the caller deal with it.
2012 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2018 * do_jobctl_trap - take care of ptrace jobctl traps
2020 * It is currently used only to trap for group stop while ptraced.
2023 * Must be called with @current->sighand->siglock held, which may be
2024 * released and re-acquired before returning with intervening sleep.
2026 static void do_jobctl_trap(void)
2028 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2030 WARN_ON_ONCE(!signr);
2031 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2032 current->exit_code = 0;
2035 static int ptrace_signal(int signr, siginfo_t *info,
2036 struct pt_regs *regs, void *cookie)
2038 if (!task_ptrace(current))
2041 ptrace_signal_deliver(regs, cookie);
2043 /* Let the debugger run. */
2044 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2046 /* We're back. Did the debugger cancel the sig? */
2047 signr = current->exit_code;
2051 current->exit_code = 0;
2054 * Update the siginfo structure if the signal has
2055 * changed. If the debugger wanted something
2056 * specific in the siginfo structure then it should
2057 * have updated *info via PTRACE_SETSIGINFO.
2059 if (signr != info->si_signo) {
2060 info->si_signo = signr;
2062 info->si_code = SI_USER;
2063 info->si_pid = task_pid_vnr(current->parent);
2064 info->si_uid = task_uid(current->parent);
2067 /* If the (new) signal is now blocked, requeue it. */
2068 if (sigismember(¤t->blocked, signr)) {
2069 specific_send_sig_info(signr, info, current);
2076 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2077 struct pt_regs *regs, void *cookie)
2079 struct sighand_struct *sighand = current->sighand;
2080 struct signal_struct *signal = current->signal;
2085 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2086 * While in TASK_STOPPED, we were considered "frozen enough".
2087 * Now that we woke up, it's crucial if we're supposed to be
2088 * frozen that we freeze now before running anything substantial.
2092 spin_lock_irq(&sighand->siglock);
2094 * Every stopped thread goes here after wakeup. Check to see if
2095 * we should notify the parent, prepare_signal(SIGCONT) encodes
2096 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2098 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2099 struct task_struct *leader;
2102 if (signal->flags & SIGNAL_CLD_CONTINUED)
2103 why = CLD_CONTINUED;
2107 signal->flags &= ~SIGNAL_CLD_MASK;
2109 spin_unlock_irq(&sighand->siglock);
2112 * Notify the parent that we're continuing. This event is
2113 * always per-process and doesn't make whole lot of sense
2114 * for ptracers, who shouldn't consume the state via
2115 * wait(2) either, but, for backward compatibility, notify
2116 * the ptracer of the group leader too unless it's gonna be
2119 read_lock(&tasklist_lock);
2121 do_notify_parent_cldstop(current, false, why);
2123 leader = current->group_leader;
2124 if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
2125 do_notify_parent_cldstop(leader, true, why);
2127 read_unlock(&tasklist_lock);
2133 struct k_sigaction *ka;
2135 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2139 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2141 spin_unlock_irq(&sighand->siglock);
2145 signr = dequeue_signal(current, ¤t->blocked, info);
2148 break; /* will return 0 */
2150 if (signr != SIGKILL) {
2151 signr = ptrace_signal(signr, info,
2157 ka = &sighand->action[signr-1];
2159 /* Trace actually delivered signals. */
2160 trace_signal_deliver(signr, info, ka);
2162 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2164 if (ka->sa.sa_handler != SIG_DFL) {
2165 /* Run the handler. */
2168 if (ka->sa.sa_flags & SA_ONESHOT)
2169 ka->sa.sa_handler = SIG_DFL;
2171 break; /* will return non-zero "signr" value */
2175 * Now we are doing the default action for this signal.
2177 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2181 * Global init gets no signals it doesn't want.
2182 * Container-init gets no signals it doesn't want from same
2185 * Note that if global/container-init sees a sig_kernel_only()
2186 * signal here, the signal must have been generated internally
2187 * or must have come from an ancestor namespace. In either
2188 * case, the signal cannot be dropped.
2190 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2191 !sig_kernel_only(signr))
2194 if (sig_kernel_stop(signr)) {
2196 * The default action is to stop all threads in
2197 * the thread group. The job control signals
2198 * do nothing in an orphaned pgrp, but SIGSTOP
2199 * always works. Note that siglock needs to be
2200 * dropped during the call to is_orphaned_pgrp()
2201 * because of lock ordering with tasklist_lock.
2202 * This allows an intervening SIGCONT to be posted.
2203 * We need to check for that and bail out if necessary.
2205 if (signr != SIGSTOP) {
2206 spin_unlock_irq(&sighand->siglock);
2208 /* signals can be posted during this window */
2210 if (is_current_pgrp_orphaned())
2213 spin_lock_irq(&sighand->siglock);
2216 if (likely(do_signal_stop(info->si_signo))) {
2217 /* It released the siglock. */
2222 * We didn't actually stop, due to a race
2223 * with SIGCONT or something like that.
2228 spin_unlock_irq(&sighand->siglock);
2231 * Anything else is fatal, maybe with a core dump.
2233 current->flags |= PF_SIGNALED;
2235 if (sig_kernel_coredump(signr)) {
2236 if (print_fatal_signals)
2237 print_fatal_signal(regs, info->si_signo);
2239 * If it was able to dump core, this kills all
2240 * other threads in the group and synchronizes with
2241 * their demise. If we lost the race with another
2242 * thread getting here, it set group_exit_code
2243 * first and our do_group_exit call below will use
2244 * that value and ignore the one we pass it.
2246 do_coredump(info->si_signo, info->si_signo, regs);
2250 * Death signals, no core dump.
2252 do_group_exit(info->si_signo);
2255 spin_unlock_irq(&sighand->siglock);
2260 * It could be that complete_signal() picked us to notify about the
2261 * group-wide signal. Other threads should be notified now to take
2262 * the shared signals in @which since we will not.
2264 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2267 struct task_struct *t;
2269 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2270 if (sigisemptyset(&retarget))
2274 while_each_thread(tsk, t) {
2275 if (t->flags & PF_EXITING)
2278 if (!has_pending_signals(&retarget, &t->blocked))
2280 /* Remove the signals this thread can handle. */
2281 sigandsets(&retarget, &retarget, &t->blocked);
2283 if (!signal_pending(t))
2284 signal_wake_up(t, 0);
2286 if (sigisemptyset(&retarget))
2291 void exit_signals(struct task_struct *tsk)
2296 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2297 tsk->flags |= PF_EXITING;
2301 spin_lock_irq(&tsk->sighand->siglock);
2303 * From now this task is not visible for group-wide signals,
2304 * see wants_signal(), do_signal_stop().
2306 tsk->flags |= PF_EXITING;
2307 if (!signal_pending(tsk))
2310 unblocked = tsk->blocked;
2311 signotset(&unblocked);
2312 retarget_shared_pending(tsk, &unblocked);
2314 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2315 task_participate_group_stop(tsk))
2316 group_stop = CLD_STOPPED;
2318 spin_unlock_irq(&tsk->sighand->siglock);
2321 * If group stop has completed, deliver the notification. This
2322 * should always go to the real parent of the group leader.
2324 if (unlikely(group_stop)) {
2325 read_lock(&tasklist_lock);
2326 do_notify_parent_cldstop(tsk, false, group_stop);
2327 read_unlock(&tasklist_lock);
2331 EXPORT_SYMBOL(recalc_sigpending);
2332 EXPORT_SYMBOL_GPL(dequeue_signal);
2333 EXPORT_SYMBOL(flush_signals);
2334 EXPORT_SYMBOL(force_sig);
2335 EXPORT_SYMBOL(send_sig);
2336 EXPORT_SYMBOL(send_sig_info);
2337 EXPORT_SYMBOL(sigprocmask);
2338 EXPORT_SYMBOL(block_all_signals);
2339 EXPORT_SYMBOL(unblock_all_signals);
2343 * System call entry points.
2347 * sys_restart_syscall - restart a system call
2349 SYSCALL_DEFINE0(restart_syscall)
2351 struct restart_block *restart = ¤t_thread_info()->restart_block;
2352 return restart->fn(restart);
2355 long do_no_restart_syscall(struct restart_block *param)
2360 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2362 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2363 sigset_t newblocked;
2364 /* A set of now blocked but previously unblocked signals. */
2365 sigandnsets(&newblocked, newset, ¤t->blocked);
2366 retarget_shared_pending(tsk, &newblocked);
2368 tsk->blocked = *newset;
2369 recalc_sigpending();
2373 * set_current_blocked - change current->blocked mask
2376 * It is wrong to change ->blocked directly, this helper should be used
2377 * to ensure the process can't miss a shared signal we are going to block.
2379 void set_current_blocked(const sigset_t *newset)
2381 struct task_struct *tsk = current;
2383 spin_lock_irq(&tsk->sighand->siglock);
2384 __set_task_blocked(tsk, newset);
2385 spin_unlock_irq(&tsk->sighand->siglock);
2389 * This is also useful for kernel threads that want to temporarily
2390 * (or permanently) block certain signals.
2392 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2393 * interface happily blocks "unblockable" signals like SIGKILL
2396 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2398 struct task_struct *tsk = current;
2401 /* Lockless, only current can change ->blocked, never from irq */
2403 *oldset = tsk->blocked;
2407 sigorsets(&newset, &tsk->blocked, set);
2410 sigandnsets(&newset, &tsk->blocked, set);
2419 set_current_blocked(&newset);
2424 * sys_rt_sigprocmask - change the list of currently blocked signals
2425 * @how: whether to add, remove, or set signals
2426 * @set: stores pending signals
2427 * @oset: previous value of signal mask if non-null
2428 * @sigsetsize: size of sigset_t type
2430 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2431 sigset_t __user *, oset, size_t, sigsetsize)
2433 sigset_t old_set, new_set;
2436 /* XXX: Don't preclude handling different sized sigset_t's. */
2437 if (sigsetsize != sizeof(sigset_t))
2440 old_set = current->blocked;
2443 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2445 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2447 error = sigprocmask(how, &new_set, NULL);
2453 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2460 long do_sigpending(void __user *set, unsigned long sigsetsize)
2462 long error = -EINVAL;
2465 if (sigsetsize > sizeof(sigset_t))
2468 spin_lock_irq(¤t->sighand->siglock);
2469 sigorsets(&pending, ¤t->pending.signal,
2470 ¤t->signal->shared_pending.signal);
2471 spin_unlock_irq(¤t->sighand->siglock);
2473 /* Outside the lock because only this thread touches it. */
2474 sigandsets(&pending, ¤t->blocked, &pending);
2477 if (!copy_to_user(set, &pending, sigsetsize))
2485 * sys_rt_sigpending - examine a pending signal that has been raised
2487 * @set: stores pending signals
2488 * @sigsetsize: size of sigset_t type or larger
2490 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2492 return do_sigpending(set, sigsetsize);
2495 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2497 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2501 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2503 if (from->si_code < 0)
2504 return __copy_to_user(to, from, sizeof(siginfo_t))
2507 * If you change siginfo_t structure, please be sure
2508 * this code is fixed accordingly.
2509 * Please remember to update the signalfd_copyinfo() function
2510 * inside fs/signalfd.c too, in case siginfo_t changes.
2511 * It should never copy any pad contained in the structure
2512 * to avoid security leaks, but must copy the generic
2513 * 3 ints plus the relevant union member.
2515 err = __put_user(from->si_signo, &to->si_signo);
2516 err |= __put_user(from->si_errno, &to->si_errno);
2517 err |= __put_user((short)from->si_code, &to->si_code);
2518 switch (from->si_code & __SI_MASK) {
2520 err |= __put_user(from->si_pid, &to->si_pid);
2521 err |= __put_user(from->si_uid, &to->si_uid);
2524 err |= __put_user(from->si_tid, &to->si_tid);
2525 err |= __put_user(from->si_overrun, &to->si_overrun);
2526 err |= __put_user(from->si_ptr, &to->si_ptr);
2529 err |= __put_user(from->si_band, &to->si_band);
2530 err |= __put_user(from->si_fd, &to->si_fd);
2533 err |= __put_user(from->si_addr, &to->si_addr);
2534 #ifdef __ARCH_SI_TRAPNO
2535 err |= __put_user(from->si_trapno, &to->si_trapno);
2537 #ifdef BUS_MCEERR_AO
2539 * Other callers might not initialize the si_lsb field,
2540 * so check explicitly for the right codes here.
2542 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2543 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2547 err |= __put_user(from->si_pid, &to->si_pid);
2548 err |= __put_user(from->si_uid, &to->si_uid);
2549 err |= __put_user(from->si_status, &to->si_status);
2550 err |= __put_user(from->si_utime, &to->si_utime);
2551 err |= __put_user(from->si_stime, &to->si_stime);
2553 case __SI_RT: /* This is not generated by the kernel as of now. */
2554 case __SI_MESGQ: /* But this is */
2555 err |= __put_user(from->si_pid, &to->si_pid);
2556 err |= __put_user(from->si_uid, &to->si_uid);
2557 err |= __put_user(from->si_ptr, &to->si_ptr);
2559 default: /* this is just in case for now ... */
2560 err |= __put_user(from->si_pid, &to->si_pid);
2561 err |= __put_user(from->si_uid, &to->si_uid);
2570 * do_sigtimedwait - wait for queued signals specified in @which
2571 * @which: queued signals to wait for
2572 * @info: if non-null, the signal's siginfo is returned here
2573 * @ts: upper bound on process time suspension
2575 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2576 const struct timespec *ts)
2578 struct task_struct *tsk = current;
2579 long timeout = MAX_SCHEDULE_TIMEOUT;
2580 sigset_t mask = *which;
2584 if (!timespec_valid(ts))
2586 timeout = timespec_to_jiffies(ts);
2588 * We can be close to the next tick, add another one
2589 * to ensure we will wait at least the time asked for.
2591 if (ts->tv_sec || ts->tv_nsec)
2596 * Invert the set of allowed signals to get those we want to block.
2598 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2601 spin_lock_irq(&tsk->sighand->siglock);
2602 sig = dequeue_signal(tsk, &mask, info);
2603 if (!sig && timeout) {
2605 * None ready, temporarily unblock those we're interested
2606 * while we are sleeping in so that we'll be awakened when
2607 * they arrive. Unblocking is always fine, we can avoid
2608 * set_current_blocked().
2610 tsk->real_blocked = tsk->blocked;
2611 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2612 recalc_sigpending();
2613 spin_unlock_irq(&tsk->sighand->siglock);
2615 timeout = schedule_timeout_interruptible(timeout);
2617 spin_lock_irq(&tsk->sighand->siglock);
2618 __set_task_blocked(tsk, &tsk->real_blocked);
2619 siginitset(&tsk->real_blocked, 0);
2620 sig = dequeue_signal(tsk, &mask, info);
2622 spin_unlock_irq(&tsk->sighand->siglock);
2626 return timeout ? -EINTR : -EAGAIN;
2630 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2632 * @uthese: queued signals to wait for
2633 * @uinfo: if non-null, the signal's siginfo is returned here
2634 * @uts: upper bound on process time suspension
2635 * @sigsetsize: size of sigset_t type
2637 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2638 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2646 /* XXX: Don't preclude handling different sized sigset_t's. */
2647 if (sigsetsize != sizeof(sigset_t))
2650 if (copy_from_user(&these, uthese, sizeof(these)))
2654 if (copy_from_user(&ts, uts, sizeof(ts)))
2658 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2660 if (ret > 0 && uinfo) {
2661 if (copy_siginfo_to_user(uinfo, &info))
2669 * sys_kill - send a signal to a process
2670 * @pid: the PID of the process
2671 * @sig: signal to be sent
2673 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2675 struct siginfo info;
2677 info.si_signo = sig;
2679 info.si_code = SI_USER;
2680 info.si_pid = task_tgid_vnr(current);
2681 info.si_uid = current_uid();
2683 return kill_something_info(sig, &info, pid);
2687 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2689 struct task_struct *p;
2693 p = find_task_by_vpid(pid);
2694 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2695 error = check_kill_permission(sig, info, p);
2697 * The null signal is a permissions and process existence
2698 * probe. No signal is actually delivered.
2700 if (!error && sig) {
2701 error = do_send_sig_info(sig, info, p, false);
2703 * If lock_task_sighand() failed we pretend the task
2704 * dies after receiving the signal. The window is tiny,
2705 * and the signal is private anyway.
2707 if (unlikely(error == -ESRCH))
2716 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2718 struct siginfo info;
2720 info.si_signo = sig;
2722 info.si_code = SI_TKILL;
2723 info.si_pid = task_tgid_vnr(current);
2724 info.si_uid = current_uid();
2726 return do_send_specific(tgid, pid, sig, &info);
2730 * sys_tgkill - send signal to one specific thread
2731 * @tgid: the thread group ID of the thread
2732 * @pid: the PID of the thread
2733 * @sig: signal to be sent
2735 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2736 * exists but it's not belonging to the target process anymore. This
2737 * method solves the problem of threads exiting and PIDs getting reused.
2739 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2741 /* This is only valid for single tasks */
2742 if (pid <= 0 || tgid <= 0)
2745 return do_tkill(tgid, pid, sig);
2749 * sys_tkill - send signal to one specific task
2750 * @pid: the PID of the task
2751 * @sig: signal to be sent
2753 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2755 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2757 /* This is only valid for single tasks */
2761 return do_tkill(0, pid, sig);
2765 * sys_rt_sigqueueinfo - send signal information to a signal
2766 * @pid: the PID of the thread
2767 * @sig: signal to be sent
2768 * @uinfo: signal info to be sent
2770 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2771 siginfo_t __user *, uinfo)
2775 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2778 /* Not even root can pretend to send signals from the kernel.
2779 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2781 if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2782 /* We used to allow any < 0 si_code */
2783 WARN_ON_ONCE(info.si_code < 0);
2786 info.si_signo = sig;
2788 /* POSIX.1b doesn't mention process groups. */
2789 return kill_proc_info(sig, &info, pid);
2792 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2794 /* This is only valid for single tasks */
2795 if (pid <= 0 || tgid <= 0)
2798 /* Not even root can pretend to send signals from the kernel.
2799 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2801 if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2802 /* We used to allow any < 0 si_code */
2803 WARN_ON_ONCE(info->si_code < 0);
2806 info->si_signo = sig;
2808 return do_send_specific(tgid, pid, sig, info);
2811 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2812 siginfo_t __user *, uinfo)
2816 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2819 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2822 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2824 struct task_struct *t = current;
2825 struct k_sigaction *k;
2828 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2831 k = &t->sighand->action[sig-1];
2833 spin_lock_irq(¤t->sighand->siglock);
2838 sigdelsetmask(&act->sa.sa_mask,
2839 sigmask(SIGKILL) | sigmask(SIGSTOP));
2843 * "Setting a signal action to SIG_IGN for a signal that is
2844 * pending shall cause the pending signal to be discarded,
2845 * whether or not it is blocked."
2847 * "Setting a signal action to SIG_DFL for a signal that is
2848 * pending and whose default action is to ignore the signal
2849 * (for example, SIGCHLD), shall cause the pending signal to
2850 * be discarded, whether or not it is blocked"
2852 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2854 sigaddset(&mask, sig);
2855 rm_from_queue_full(&mask, &t->signal->shared_pending);
2857 rm_from_queue_full(&mask, &t->pending);
2859 } while (t != current);
2863 spin_unlock_irq(¤t->sighand->siglock);
2868 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2873 oss.ss_sp = (void __user *) current->sas_ss_sp;
2874 oss.ss_size = current->sas_ss_size;
2875 oss.ss_flags = sas_ss_flags(sp);
2883 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2885 error = __get_user(ss_sp, &uss->ss_sp) |
2886 __get_user(ss_flags, &uss->ss_flags) |
2887 __get_user(ss_size, &uss->ss_size);
2892 if (on_sig_stack(sp))
2897 * Note - this code used to test ss_flags incorrectly:
2898 * old code may have been written using ss_flags==0
2899 * to mean ss_flags==SS_ONSTACK (as this was the only
2900 * way that worked) - this fix preserves that older
2903 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2906 if (ss_flags == SS_DISABLE) {
2911 if (ss_size < MINSIGSTKSZ)
2915 current->sas_ss_sp = (unsigned long) ss_sp;
2916 current->sas_ss_size = ss_size;
2922 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2924 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2925 __put_user(oss.ss_size, &uoss->ss_size) |
2926 __put_user(oss.ss_flags, &uoss->ss_flags);
2933 #ifdef __ARCH_WANT_SYS_SIGPENDING
2936 * sys_sigpending - examine pending signals
2937 * @set: where mask of pending signal is returned
2939 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2941 return do_sigpending(set, sizeof(*set));
2946 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2948 * sys_sigprocmask - examine and change blocked signals
2949 * @how: whether to add, remove, or set signals
2950 * @nset: signals to add or remove (if non-null)
2951 * @oset: previous value of signal mask if non-null
2953 * Some platforms have their own version with special arguments;
2954 * others support only sys_rt_sigprocmask.
2957 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
2958 old_sigset_t __user *, oset)
2960 old_sigset_t old_set, new_set;
2961 sigset_t new_blocked;
2963 old_set = current->blocked.sig[0];
2966 if (copy_from_user(&new_set, nset, sizeof(*nset)))
2968 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2970 new_blocked = current->blocked;
2974 sigaddsetmask(&new_blocked, new_set);
2977 sigdelsetmask(&new_blocked, new_set);
2980 new_blocked.sig[0] = new_set;
2986 set_current_blocked(&new_blocked);
2990 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2996 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2998 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
3000 * sys_rt_sigaction - alter an action taken by a process
3001 * @sig: signal to be sent
3002 * @act: new sigaction
3003 * @oact: used to save the previous sigaction
3004 * @sigsetsize: size of sigset_t type
3006 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3007 const struct sigaction __user *, act,
3008 struct sigaction __user *, oact,
3011 struct k_sigaction new_sa, old_sa;
3014 /* XXX: Don't preclude handling different sized sigset_t's. */
3015 if (sigsetsize != sizeof(sigset_t))
3019 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3023 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3026 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3032 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
3034 #ifdef __ARCH_WANT_SYS_SGETMASK
3037 * For backwards compatibility. Functionality superseded by sigprocmask.
3039 SYSCALL_DEFINE0(sgetmask)
3042 return current->blocked.sig[0];
3045 SYSCALL_DEFINE1(ssetmask, int, newmask)
3049 spin_lock_irq(¤t->sighand->siglock);
3050 old = current->blocked.sig[0];
3052 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
3054 recalc_sigpending();
3055 spin_unlock_irq(¤t->sighand->siglock);
3059 #endif /* __ARCH_WANT_SGETMASK */
3061 #ifdef __ARCH_WANT_SYS_SIGNAL
3063 * For backwards compatibility. Functionality superseded by sigaction.
3065 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3067 struct k_sigaction new_sa, old_sa;
3070 new_sa.sa.sa_handler = handler;
3071 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3072 sigemptyset(&new_sa.sa.sa_mask);
3074 ret = do_sigaction(sig, &new_sa, &old_sa);
3076 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3078 #endif /* __ARCH_WANT_SYS_SIGNAL */
3080 #ifdef __ARCH_WANT_SYS_PAUSE
3082 SYSCALL_DEFINE0(pause)
3084 while (!signal_pending(current)) {
3085 current->state = TASK_INTERRUPTIBLE;
3088 return -ERESTARTNOHAND;
3093 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3095 * sys_rt_sigsuspend - replace the signal mask for a value with the
3096 * @unewset value until a signal is received
3097 * @unewset: new signal mask value
3098 * @sigsetsize: size of sigset_t type
3100 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3104 /* XXX: Don't preclude handling different sized sigset_t's. */
3105 if (sigsetsize != sizeof(sigset_t))
3108 if (copy_from_user(&newset, unewset, sizeof(newset)))
3110 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3112 spin_lock_irq(¤t->sighand->siglock);
3113 current->saved_sigmask = current->blocked;
3114 current->blocked = newset;
3115 recalc_sigpending();
3116 spin_unlock_irq(¤t->sighand->siglock);
3118 current->state = TASK_INTERRUPTIBLE;
3120 set_restore_sigmask();
3121 return -ERESTARTNOHAND;
3123 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3125 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3130 void __init signals_init(void)
3132 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3135 #ifdef CONFIG_KGDB_KDB
3136 #include <linux/kdb.h>
3138 * kdb_send_sig_info - Allows kdb to send signals without exposing
3139 * signal internals. This function checks if the required locks are
3140 * available before calling the main signal code, to avoid kdb
3144 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3146 static struct task_struct *kdb_prev_t;
3148 if (!spin_trylock(&t->sighand->siglock)) {
3149 kdb_printf("Can't do kill command now.\n"
3150 "The sigmask lock is held somewhere else in "
3151 "kernel, try again later\n");
3154 spin_unlock(&t->sighand->siglock);
3155 new_t = kdb_prev_t != t;
3157 if (t->state != TASK_RUNNING && new_t) {
3158 kdb_printf("Process is not RUNNING, sending a signal from "
3159 "kdb risks deadlock\n"
3160 "on the run queue locks. "
3161 "The signal has _not_ been sent.\n"
3162 "Reissue the kill command if you want to risk "
3166 sig = info->si_signo;
3167 if (send_sig_info(sig, info, t))
3168 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3171 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3173 #endif /* CONFIG_KGDB_KDB */