2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h" /* audit_signal_info() */
41 * SLAB caches for signal bits.
44 static struct kmem_cache *sigqueue_cachep;
46 int print_fatal_signals __read_mostly;
48 static void __user *sig_handler(struct task_struct *t, int sig)
50 return t->sighand->action[sig - 1].sa.sa_handler;
53 static int sig_handler_ignored(void __user *handler, int sig)
55 /* Is it explicitly or implicitly ignored? */
56 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
60 static int sig_task_ignored(struct task_struct *t, int sig,
65 handler = sig_handler(t, sig);
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 handler == SIG_DFL && !from_ancestor_ns)
71 return sig_handler_ignored(handler, sig);
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
84 if (!sig_task_ignored(t, sig, from_ancestor_ns))
88 * Tracers may want to know about even ignored signals.
90 return !tracehook_consider_ignored_signal(t, sig);
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
102 switch (_NSIG_WORDS) {
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
125 static int recalc_sigpending_tsk(struct task_struct *t)
127 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
128 PENDING(&t->pending, &t->blocked) ||
129 PENDING(&t->signal->shared_pending, &t->blocked)) {
130 set_tsk_thread_flag(t, TIF_SIGPENDING);
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
145 void recalc_sigpending_and_wake(struct task_struct *t)
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
151 void recalc_sigpending(void)
153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 clear_thread_flag(TIF_SIGPENDING);
160 /* Given the mask, find the first available signal that should be serviced. */
162 #define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
166 int next_signal(struct sigpending *pending, sigset_t *mask)
168 unsigned long i, *s, *m, x;
171 s = pending->signal.sig;
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
186 switch (_NSIG_WORDS) {
188 for (i = 1; i < _NSIG_WORDS; ++i) {
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
201 sig = ffz(~x) + _NSIG_BPW + 1;
212 static inline void print_dropped_signal(int sig)
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
216 if (!print_fatal_signals)
219 if (!__ratelimit(&ratelimit_state))
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
227 * task_clear_jobctl_trapping - clear jobctl trapping bit
230 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
231 * Clear it and wake up the ptracer. Note that we don't need any further
232 * locking. @task->siglock guarantees that @task->parent points to the
236 * Must be called with @task->sighand->siglock held.
238 static void task_clear_jobctl_trapping(struct task_struct *task)
240 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
241 task->jobctl &= ~JOBCTL_TRAPPING;
242 __wake_up_sync_key(&task->parent->signal->wait_chldexit,
243 TASK_UNINTERRUPTIBLE, 1, task);
248 * task_clear_jobctl_pending - clear jobctl pending bits
250 * @mask: pending bits to clear
252 * Clear @mask from @task->jobctl. @mask must be subset of
253 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
254 * STOP bits are cleared together.
257 * Must be called with @task->sighand->siglock held.
259 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
261 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
263 if (mask & JOBCTL_STOP_PENDING)
264 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
266 task->jobctl &= ~mask;
270 * task_participate_group_stop - participate in a group stop
271 * @task: task participating in a group stop
273 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
274 * Group stop states are cleared and the group stop count is consumed if
275 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
276 * stop, the appropriate %SIGNAL_* flags are set.
279 * Must be called with @task->sighand->siglock held.
282 * %true if group stop completion should be notified to the parent, %false
285 static bool task_participate_group_stop(struct task_struct *task)
287 struct signal_struct *sig = task->signal;
288 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
290 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
292 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
297 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
298 sig->group_stop_count--;
301 * Tell the caller to notify completion iff we are entering into a
302 * fresh group stop. Read comment in do_signal_stop() for details.
304 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
305 sig->flags = SIGNAL_STOP_STOPPED;
312 * allocate a new signal queue record
313 * - this may be called without locks if and only if t == current, otherwise an
314 * appropriate lock must be held to stop the target task from exiting
316 static struct sigqueue *
317 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
319 struct sigqueue *q = NULL;
320 struct user_struct *user;
323 * Protect access to @t credentials. This can go away when all
324 * callers hold rcu read lock.
327 user = get_uid(__task_cred(t)->user);
328 atomic_inc(&user->sigpending);
331 if (override_rlimit ||
332 atomic_read(&user->sigpending) <=
333 task_rlimit(t, RLIMIT_SIGPENDING)) {
334 q = kmem_cache_alloc(sigqueue_cachep, flags);
336 print_dropped_signal(sig);
339 if (unlikely(q == NULL)) {
340 atomic_dec(&user->sigpending);
343 INIT_LIST_HEAD(&q->list);
351 static void __sigqueue_free(struct sigqueue *q)
353 if (q->flags & SIGQUEUE_PREALLOC)
355 atomic_dec(&q->user->sigpending);
357 kmem_cache_free(sigqueue_cachep, q);
360 void flush_sigqueue(struct sigpending *queue)
364 sigemptyset(&queue->signal);
365 while (!list_empty(&queue->list)) {
366 q = list_entry(queue->list.next, struct sigqueue , list);
367 list_del_init(&q->list);
373 * Flush all pending signals for a task.
375 void __flush_signals(struct task_struct *t)
377 clear_tsk_thread_flag(t, TIF_SIGPENDING);
378 flush_sigqueue(&t->pending);
379 flush_sigqueue(&t->signal->shared_pending);
382 void flush_signals(struct task_struct *t)
386 spin_lock_irqsave(&t->sighand->siglock, flags);
388 spin_unlock_irqrestore(&t->sighand->siglock, flags);
391 static void __flush_itimer_signals(struct sigpending *pending)
393 sigset_t signal, retain;
394 struct sigqueue *q, *n;
396 signal = pending->signal;
397 sigemptyset(&retain);
399 list_for_each_entry_safe(q, n, &pending->list, list) {
400 int sig = q->info.si_signo;
402 if (likely(q->info.si_code != SI_TIMER)) {
403 sigaddset(&retain, sig);
405 sigdelset(&signal, sig);
406 list_del_init(&q->list);
411 sigorsets(&pending->signal, &signal, &retain);
414 void flush_itimer_signals(void)
416 struct task_struct *tsk = current;
419 spin_lock_irqsave(&tsk->sighand->siglock, flags);
420 __flush_itimer_signals(&tsk->pending);
421 __flush_itimer_signals(&tsk->signal->shared_pending);
422 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
425 void ignore_signals(struct task_struct *t)
429 for (i = 0; i < _NSIG; ++i)
430 t->sighand->action[i].sa.sa_handler = SIG_IGN;
436 * Flush all handlers for a task.
440 flush_signal_handlers(struct task_struct *t, int force_default)
443 struct k_sigaction *ka = &t->sighand->action[0];
444 for (i = _NSIG ; i != 0 ; i--) {
445 if (force_default || ka->sa.sa_handler != SIG_IGN)
446 ka->sa.sa_handler = SIG_DFL;
448 sigemptyset(&ka->sa.sa_mask);
453 int unhandled_signal(struct task_struct *tsk, int sig)
455 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
456 if (is_global_init(tsk))
458 if (handler != SIG_IGN && handler != SIG_DFL)
460 return !tracehook_consider_fatal_signal(tsk, sig);
464 * Notify the system that a driver wants to block all signals for this
465 * process, and wants to be notified if any signals at all were to be
466 * sent/acted upon. If the notifier routine returns non-zero, then the
467 * signal will be acted upon after all. If the notifier routine returns 0,
468 * then then signal will be blocked. Only one block per process is
469 * allowed. priv is a pointer to private data that the notifier routine
470 * can use to determine if the signal should be blocked or not.
473 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
477 spin_lock_irqsave(¤t->sighand->siglock, flags);
478 current->notifier_mask = mask;
479 current->notifier_data = priv;
480 current->notifier = notifier;
481 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
484 /* Notify the system that blocking has ended. */
487 unblock_all_signals(void)
491 spin_lock_irqsave(¤t->sighand->siglock, flags);
492 current->notifier = NULL;
493 current->notifier_data = NULL;
495 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
498 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
500 struct sigqueue *q, *first = NULL;
503 * Collect the siginfo appropriate to this signal. Check if
504 * there is another siginfo for the same signal.
506 list_for_each_entry(q, &list->list, list) {
507 if (q->info.si_signo == sig) {
514 sigdelset(&list->signal, sig);
518 list_del_init(&first->list);
519 copy_siginfo(info, &first->info);
520 __sigqueue_free(first);
523 * Ok, it wasn't in the queue. This must be
524 * a fast-pathed signal or we must have been
525 * out of queue space. So zero out the info.
527 info->si_signo = sig;
529 info->si_code = SI_USER;
535 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
538 int sig = next_signal(pending, mask);
541 if (current->notifier) {
542 if (sigismember(current->notifier_mask, sig)) {
543 if (!(current->notifier)(current->notifier_data)) {
544 clear_thread_flag(TIF_SIGPENDING);
550 collect_signal(sig, pending, info);
557 * Dequeue a signal and return the element to the caller, which is
558 * expected to free it.
560 * All callers have to hold the siglock.
562 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
566 /* We only dequeue private signals from ourselves, we don't let
567 * signalfd steal them
569 signr = __dequeue_signal(&tsk->pending, mask, info);
571 signr = __dequeue_signal(&tsk->signal->shared_pending,
576 * itimers are process shared and we restart periodic
577 * itimers in the signal delivery path to prevent DoS
578 * attacks in the high resolution timer case. This is
579 * compliant with the old way of self-restarting
580 * itimers, as the SIGALRM is a legacy signal and only
581 * queued once. Changing the restart behaviour to
582 * restart the timer in the signal dequeue path is
583 * reducing the timer noise on heavy loaded !highres
586 if (unlikely(signr == SIGALRM)) {
587 struct hrtimer *tmr = &tsk->signal->real_timer;
589 if (!hrtimer_is_queued(tmr) &&
590 tsk->signal->it_real_incr.tv64 != 0) {
591 hrtimer_forward(tmr, tmr->base->get_time(),
592 tsk->signal->it_real_incr);
593 hrtimer_restart(tmr);
602 if (unlikely(sig_kernel_stop(signr))) {
604 * Set a marker that we have dequeued a stop signal. Our
605 * caller might release the siglock and then the pending
606 * stop signal it is about to process is no longer in the
607 * pending bitmasks, but must still be cleared by a SIGCONT
608 * (and overruled by a SIGKILL). So those cases clear this
609 * shared flag after we've set it. Note that this flag may
610 * remain set after the signal we return is ignored or
611 * handled. That doesn't matter because its only purpose
612 * is to alert stop-signal processing code when another
613 * processor has come along and cleared the flag.
615 current->jobctl |= JOBCTL_STOP_DEQUEUED;
617 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
619 * Release the siglock to ensure proper locking order
620 * of timer locks outside of siglocks. Note, we leave
621 * irqs disabled here, since the posix-timers code is
622 * about to disable them again anyway.
624 spin_unlock(&tsk->sighand->siglock);
625 do_schedule_next_timer(info);
626 spin_lock(&tsk->sighand->siglock);
632 * Tell a process that it has a new active signal..
634 * NOTE! we rely on the previous spin_lock to
635 * lock interrupts for us! We can only be called with
636 * "siglock" held, and the local interrupt must
637 * have been disabled when that got acquired!
639 * No need to set need_resched since signal event passing
640 * goes through ->blocked
642 void signal_wake_up(struct task_struct *t, int resume)
646 set_tsk_thread_flag(t, TIF_SIGPENDING);
649 * For SIGKILL, we want to wake it up in the stopped/traced/killable
650 * case. We don't check t->state here because there is a race with it
651 * executing another processor and just now entering stopped state.
652 * By using wake_up_state, we ensure the process will wake up and
653 * handle its death signal.
655 mask = TASK_INTERRUPTIBLE;
657 mask |= TASK_WAKEKILL;
658 if (!wake_up_state(t, mask))
663 * Remove signals in mask from the pending set and queue.
664 * Returns 1 if any signals were found.
666 * All callers must be holding the siglock.
668 * This version takes a sigset mask and looks at all signals,
669 * not just those in the first mask word.
671 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
673 struct sigqueue *q, *n;
676 sigandsets(&m, mask, &s->signal);
677 if (sigisemptyset(&m))
680 sigandnsets(&s->signal, &s->signal, mask);
681 list_for_each_entry_safe(q, n, &s->list, list) {
682 if (sigismember(mask, q->info.si_signo)) {
683 list_del_init(&q->list);
690 * Remove signals in mask from the pending set and queue.
691 * Returns 1 if any signals were found.
693 * All callers must be holding the siglock.
695 static int rm_from_queue(unsigned long mask, struct sigpending *s)
697 struct sigqueue *q, *n;
699 if (!sigtestsetmask(&s->signal, mask))
702 sigdelsetmask(&s->signal, mask);
703 list_for_each_entry_safe(q, n, &s->list, list) {
704 if (q->info.si_signo < SIGRTMIN &&
705 (mask & sigmask(q->info.si_signo))) {
706 list_del_init(&q->list);
713 static inline int is_si_special(const struct siginfo *info)
715 return info <= SEND_SIG_FORCED;
718 static inline bool si_fromuser(const struct siginfo *info)
720 return info == SEND_SIG_NOINFO ||
721 (!is_si_special(info) && SI_FROMUSER(info));
725 * called with RCU read lock from check_kill_permission()
727 static int kill_ok_by_cred(struct task_struct *t)
729 const struct cred *cred = current_cred();
730 const struct cred *tcred = __task_cred(t);
732 if (cred->user->user_ns == tcred->user->user_ns &&
733 (cred->euid == tcred->suid ||
734 cred->euid == tcred->uid ||
735 cred->uid == tcred->suid ||
736 cred->uid == tcred->uid))
739 if (ns_capable(tcred->user->user_ns, CAP_KILL))
746 * Bad permissions for sending the signal
747 * - the caller must hold the RCU read lock
749 static int check_kill_permission(int sig, struct siginfo *info,
750 struct task_struct *t)
755 if (!valid_signal(sig))
758 if (!si_fromuser(info))
761 error = audit_signal_info(sig, t); /* Let audit system see the signal */
765 if (!same_thread_group(current, t) &&
766 !kill_ok_by_cred(t)) {
769 sid = task_session(t);
771 * We don't return the error if sid == NULL. The
772 * task was unhashed, the caller must notice this.
774 if (!sid || sid == task_session(current))
781 return security_task_kill(t, info, sig, 0);
785 * Handle magic process-wide effects of stop/continue signals. Unlike
786 * the signal actions, these happen immediately at signal-generation
787 * time regardless of blocking, ignoring, or handling. This does the
788 * actual continuing for SIGCONT, but not the actual stopping for stop
789 * signals. The process stop is done as a signal action for SIG_DFL.
791 * Returns true if the signal should be actually delivered, otherwise
792 * it should be dropped.
794 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
796 struct signal_struct *signal = p->signal;
797 struct task_struct *t;
799 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
801 * The process is in the middle of dying, nothing to do.
803 } else if (sig_kernel_stop(sig)) {
805 * This is a stop signal. Remove SIGCONT from all queues.
807 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
810 rm_from_queue(sigmask(SIGCONT), &t->pending);
811 } while_each_thread(p, t);
812 } else if (sig == SIGCONT) {
815 * Remove all stop signals from all queues, wake all threads.
817 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
820 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
821 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
822 wake_up_state(t, __TASK_STOPPED);
823 } while_each_thread(p, t);
826 * Notify the parent with CLD_CONTINUED if we were stopped.
828 * If we were in the middle of a group stop, we pretend it
829 * was already finished, and then continued. Since SIGCHLD
830 * doesn't queue we report only CLD_STOPPED, as if the next
831 * CLD_CONTINUED was dropped.
834 if (signal->flags & SIGNAL_STOP_STOPPED)
835 why |= SIGNAL_CLD_CONTINUED;
836 else if (signal->group_stop_count)
837 why |= SIGNAL_CLD_STOPPED;
841 * The first thread which returns from do_signal_stop()
842 * will take ->siglock, notice SIGNAL_CLD_MASK, and
843 * notify its parent. See get_signal_to_deliver().
845 signal->flags = why | SIGNAL_STOP_CONTINUED;
846 signal->group_stop_count = 0;
847 signal->group_exit_code = 0;
851 return !sig_ignored(p, sig, from_ancestor_ns);
855 * Test if P wants to take SIG. After we've checked all threads with this,
856 * it's equivalent to finding no threads not blocking SIG. Any threads not
857 * blocking SIG were ruled out because they are not running and already
858 * have pending signals. Such threads will dequeue from the shared queue
859 * as soon as they're available, so putting the signal on the shared queue
860 * will be equivalent to sending it to one such thread.
862 static inline int wants_signal(int sig, struct task_struct *p)
864 if (sigismember(&p->blocked, sig))
866 if (p->flags & PF_EXITING)
870 if (task_is_stopped_or_traced(p))
872 return task_curr(p) || !signal_pending(p);
875 static void complete_signal(int sig, struct task_struct *p, int group)
877 struct signal_struct *signal = p->signal;
878 struct task_struct *t;
881 * Now find a thread we can wake up to take the signal off the queue.
883 * If the main thread wants the signal, it gets first crack.
884 * Probably the least surprising to the average bear.
886 if (wants_signal(sig, p))
888 else if (!group || thread_group_empty(p))
890 * There is just one thread and it does not need to be woken.
891 * It will dequeue unblocked signals before it runs again.
896 * Otherwise try to find a suitable thread.
898 t = signal->curr_target;
899 while (!wants_signal(sig, t)) {
901 if (t == signal->curr_target)
903 * No thread needs to be woken.
904 * Any eligible threads will see
905 * the signal in the queue soon.
909 signal->curr_target = t;
913 * Found a killable thread. If the signal will be fatal,
914 * then start taking the whole group down immediately.
916 if (sig_fatal(p, sig) &&
917 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
918 !sigismember(&t->real_blocked, sig) &&
920 !tracehook_consider_fatal_signal(t, sig))) {
922 * This signal will be fatal to the whole group.
924 if (!sig_kernel_coredump(sig)) {
926 * Start a group exit and wake everybody up.
927 * This way we don't have other threads
928 * running and doing things after a slower
929 * thread has the fatal signal pending.
931 signal->flags = SIGNAL_GROUP_EXIT;
932 signal->group_exit_code = sig;
933 signal->group_stop_count = 0;
936 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
937 sigaddset(&t->pending.signal, SIGKILL);
938 signal_wake_up(t, 1);
939 } while_each_thread(p, t);
945 * The signal is already in the shared-pending queue.
946 * Tell the chosen thread to wake up and dequeue it.
948 signal_wake_up(t, sig == SIGKILL);
952 static inline int legacy_queue(struct sigpending *signals, int sig)
954 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
957 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
958 int group, int from_ancestor_ns)
960 struct sigpending *pending;
964 trace_signal_generate(sig, info, t);
966 assert_spin_locked(&t->sighand->siglock);
968 if (!prepare_signal(sig, t, from_ancestor_ns))
971 pending = group ? &t->signal->shared_pending : &t->pending;
973 * Short-circuit ignored signals and support queuing
974 * exactly one non-rt signal, so that we can get more
975 * detailed information about the cause of the signal.
977 if (legacy_queue(pending, sig))
980 * fast-pathed signals for kernel-internal things like SIGSTOP
983 if (info == SEND_SIG_FORCED)
987 * Real-time signals must be queued if sent by sigqueue, or
988 * some other real-time mechanism. It is implementation
989 * defined whether kill() does so. We attempt to do so, on
990 * the principle of least surprise, but since kill is not
991 * allowed to fail with EAGAIN when low on memory we just
992 * make sure at least one signal gets delivered and don't
993 * pass on the info struct.
996 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1000 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1003 list_add_tail(&q->list, &pending->list);
1004 switch ((unsigned long) info) {
1005 case (unsigned long) SEND_SIG_NOINFO:
1006 q->info.si_signo = sig;
1007 q->info.si_errno = 0;
1008 q->info.si_code = SI_USER;
1009 q->info.si_pid = task_tgid_nr_ns(current,
1010 task_active_pid_ns(t));
1011 q->info.si_uid = current_uid();
1013 case (unsigned long) SEND_SIG_PRIV:
1014 q->info.si_signo = sig;
1015 q->info.si_errno = 0;
1016 q->info.si_code = SI_KERNEL;
1021 copy_siginfo(&q->info, info);
1022 if (from_ancestor_ns)
1026 } else if (!is_si_special(info)) {
1027 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1029 * Queue overflow, abort. We may abort if the
1030 * signal was rt and sent by user using something
1031 * other than kill().
1033 trace_signal_overflow_fail(sig, group, info);
1037 * This is a silent loss of information. We still
1038 * send the signal, but the *info bits are lost.
1040 trace_signal_lose_info(sig, group, info);
1045 signalfd_notify(t, sig);
1046 sigaddset(&pending->signal, sig);
1047 complete_signal(sig, t, group);
1051 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1054 int from_ancestor_ns = 0;
1056 #ifdef CONFIG_PID_NS
1057 from_ancestor_ns = si_fromuser(info) &&
1058 !task_pid_nr_ns(current, task_active_pid_ns(t));
1061 return __send_signal(sig, info, t, group, from_ancestor_ns);
1064 static void print_fatal_signal(struct pt_regs *regs, int signr)
1066 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1067 current->comm, task_pid_nr(current), signr);
1069 #if defined(__i386__) && !defined(__arch_um__)
1070 printk("code at %08lx: ", regs->ip);
1073 for (i = 0; i < 16; i++) {
1076 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1078 printk("%02x ", insn);
1088 static int __init setup_print_fatal_signals(char *str)
1090 get_option (&str, &print_fatal_signals);
1095 __setup("print-fatal-signals=", setup_print_fatal_signals);
1098 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1100 return send_signal(sig, info, p, 1);
1104 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1106 return send_signal(sig, info, t, 0);
1109 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1112 unsigned long flags;
1115 if (lock_task_sighand(p, &flags)) {
1116 ret = send_signal(sig, info, p, group);
1117 unlock_task_sighand(p, &flags);
1124 * Force a signal that the process can't ignore: if necessary
1125 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1127 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1128 * since we do not want to have a signal handler that was blocked
1129 * be invoked when user space had explicitly blocked it.
1131 * We don't want to have recursive SIGSEGV's etc, for example,
1132 * that is why we also clear SIGNAL_UNKILLABLE.
1135 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1137 unsigned long int flags;
1138 int ret, blocked, ignored;
1139 struct k_sigaction *action;
1141 spin_lock_irqsave(&t->sighand->siglock, flags);
1142 action = &t->sighand->action[sig-1];
1143 ignored = action->sa.sa_handler == SIG_IGN;
1144 blocked = sigismember(&t->blocked, sig);
1145 if (blocked || ignored) {
1146 action->sa.sa_handler = SIG_DFL;
1148 sigdelset(&t->blocked, sig);
1149 recalc_sigpending_and_wake(t);
1152 if (action->sa.sa_handler == SIG_DFL)
1153 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1154 ret = specific_send_sig_info(sig, info, t);
1155 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1161 * Nuke all other threads in the group.
1163 int zap_other_threads(struct task_struct *p)
1165 struct task_struct *t = p;
1168 p->signal->group_stop_count = 0;
1170 while_each_thread(p, t) {
1171 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
1174 /* Don't bother with already dead threads */
1177 sigaddset(&t->pending.signal, SIGKILL);
1178 signal_wake_up(t, 1);
1184 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1185 unsigned long *flags)
1187 struct sighand_struct *sighand;
1191 sighand = rcu_dereference(tsk->sighand);
1192 if (unlikely(sighand == NULL))
1195 spin_lock_irqsave(&sighand->siglock, *flags);
1196 if (likely(sighand == tsk->sighand))
1198 spin_unlock_irqrestore(&sighand->siglock, *flags);
1206 * send signal info to all the members of a group
1208 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1213 ret = check_kill_permission(sig, info, p);
1217 ret = do_send_sig_info(sig, info, p, true);
1223 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1224 * control characters do (^C, ^Z etc)
1225 * - the caller must hold at least a readlock on tasklist_lock
1227 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1229 struct task_struct *p = NULL;
1230 int retval, success;
1234 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1235 int err = group_send_sig_info(sig, info, p);
1238 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1239 return success ? 0 : retval;
1242 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1245 struct task_struct *p;
1249 p = pid_task(pid, PIDTYPE_PID);
1251 error = group_send_sig_info(sig, info, p);
1252 if (unlikely(error == -ESRCH))
1254 * The task was unhashed in between, try again.
1255 * If it is dead, pid_task() will return NULL,
1256 * if we race with de_thread() it will find the
1266 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1270 error = kill_pid_info(sig, info, find_vpid(pid));
1275 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1276 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1277 uid_t uid, uid_t euid, u32 secid)
1280 struct task_struct *p;
1281 const struct cred *pcred;
1282 unsigned long flags;
1284 if (!valid_signal(sig))
1288 p = pid_task(pid, PIDTYPE_PID);
1293 pcred = __task_cred(p);
1294 if (si_fromuser(info) &&
1295 euid != pcred->suid && euid != pcred->uid &&
1296 uid != pcred->suid && uid != pcred->uid) {
1300 ret = security_task_kill(p, info, sig, secid);
1305 if (lock_task_sighand(p, &flags)) {
1306 ret = __send_signal(sig, info, p, 1, 0);
1307 unlock_task_sighand(p, &flags);
1315 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1318 * kill_something_info() interprets pid in interesting ways just like kill(2).
1320 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1321 * is probably wrong. Should make it like BSD or SYSV.
1324 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1330 ret = kill_pid_info(sig, info, find_vpid(pid));
1335 read_lock(&tasklist_lock);
1337 ret = __kill_pgrp_info(sig, info,
1338 pid ? find_vpid(-pid) : task_pgrp(current));
1340 int retval = 0, count = 0;
1341 struct task_struct * p;
1343 for_each_process(p) {
1344 if (task_pid_vnr(p) > 1 &&
1345 !same_thread_group(p, current)) {
1346 int err = group_send_sig_info(sig, info, p);
1352 ret = count ? retval : -ESRCH;
1354 read_unlock(&tasklist_lock);
1360 * These are for backward compatibility with the rest of the kernel source.
1363 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1366 * Make sure legacy kernel users don't send in bad values
1367 * (normal paths check this in check_kill_permission).
1369 if (!valid_signal(sig))
1372 return do_send_sig_info(sig, info, p, false);
1375 #define __si_special(priv) \
1376 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1379 send_sig(int sig, struct task_struct *p, int priv)
1381 return send_sig_info(sig, __si_special(priv), p);
1385 force_sig(int sig, struct task_struct *p)
1387 force_sig_info(sig, SEND_SIG_PRIV, p);
1391 * When things go south during signal handling, we
1392 * will force a SIGSEGV. And if the signal that caused
1393 * the problem was already a SIGSEGV, we'll want to
1394 * make sure we don't even try to deliver the signal..
1397 force_sigsegv(int sig, struct task_struct *p)
1399 if (sig == SIGSEGV) {
1400 unsigned long flags;
1401 spin_lock_irqsave(&p->sighand->siglock, flags);
1402 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1403 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1405 force_sig(SIGSEGV, p);
1409 int kill_pgrp(struct pid *pid, int sig, int priv)
1413 read_lock(&tasklist_lock);
1414 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1415 read_unlock(&tasklist_lock);
1419 EXPORT_SYMBOL(kill_pgrp);
1421 int kill_pid(struct pid *pid, int sig, int priv)
1423 return kill_pid_info(sig, __si_special(priv), pid);
1425 EXPORT_SYMBOL(kill_pid);
1428 * These functions support sending signals using preallocated sigqueue
1429 * structures. This is needed "because realtime applications cannot
1430 * afford to lose notifications of asynchronous events, like timer
1431 * expirations or I/O completions". In the case of POSIX Timers
1432 * we allocate the sigqueue structure from the timer_create. If this
1433 * allocation fails we are able to report the failure to the application
1434 * with an EAGAIN error.
1436 struct sigqueue *sigqueue_alloc(void)
1438 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1441 q->flags |= SIGQUEUE_PREALLOC;
1446 void sigqueue_free(struct sigqueue *q)
1448 unsigned long flags;
1449 spinlock_t *lock = ¤t->sighand->siglock;
1451 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1453 * We must hold ->siglock while testing q->list
1454 * to serialize with collect_signal() or with
1455 * __exit_signal()->flush_sigqueue().
1457 spin_lock_irqsave(lock, flags);
1458 q->flags &= ~SIGQUEUE_PREALLOC;
1460 * If it is queued it will be freed when dequeued,
1461 * like the "regular" sigqueue.
1463 if (!list_empty(&q->list))
1465 spin_unlock_irqrestore(lock, flags);
1471 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1473 int sig = q->info.si_signo;
1474 struct sigpending *pending;
1475 unsigned long flags;
1478 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1481 if (!likely(lock_task_sighand(t, &flags)))
1484 ret = 1; /* the signal is ignored */
1485 if (!prepare_signal(sig, t, 0))
1489 if (unlikely(!list_empty(&q->list))) {
1491 * If an SI_TIMER entry is already queue just increment
1492 * the overrun count.
1494 BUG_ON(q->info.si_code != SI_TIMER);
1495 q->info.si_overrun++;
1498 q->info.si_overrun = 0;
1500 signalfd_notify(t, sig);
1501 pending = group ? &t->signal->shared_pending : &t->pending;
1502 list_add_tail(&q->list, &pending->list);
1503 sigaddset(&pending->signal, sig);
1504 complete_signal(sig, t, group);
1506 unlock_task_sighand(t, &flags);
1512 * Let a parent know about the death of a child.
1513 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1515 * Returns -1 if our parent ignored us and so we've switched to
1516 * self-reaping, or else @sig.
1518 int do_notify_parent(struct task_struct *tsk, int sig)
1520 struct siginfo info;
1521 unsigned long flags;
1522 struct sighand_struct *psig;
1527 /* do_notify_parent_cldstop should have been called instead. */
1528 BUG_ON(task_is_stopped_or_traced(tsk));
1530 BUG_ON(!task_ptrace(tsk) &&
1531 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1533 info.si_signo = sig;
1536 * we are under tasklist_lock here so our parent is tied to
1537 * us and cannot exit and release its namespace.
1539 * the only it can is to switch its nsproxy with sys_unshare,
1540 * bu uncharing pid namespaces is not allowed, so we'll always
1541 * see relevant namespace
1543 * write_lock() currently calls preempt_disable() which is the
1544 * same as rcu_read_lock(), but according to Oleg, this is not
1545 * correct to rely on this
1548 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1549 info.si_uid = __task_cred(tsk)->uid;
1552 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1553 tsk->signal->utime));
1554 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1555 tsk->signal->stime));
1557 info.si_status = tsk->exit_code & 0x7f;
1558 if (tsk->exit_code & 0x80)
1559 info.si_code = CLD_DUMPED;
1560 else if (tsk->exit_code & 0x7f)
1561 info.si_code = CLD_KILLED;
1563 info.si_code = CLD_EXITED;
1564 info.si_status = tsk->exit_code >> 8;
1567 psig = tsk->parent->sighand;
1568 spin_lock_irqsave(&psig->siglock, flags);
1569 if (!task_ptrace(tsk) && sig == SIGCHLD &&
1570 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1571 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1573 * We are exiting and our parent doesn't care. POSIX.1
1574 * defines special semantics for setting SIGCHLD to SIG_IGN
1575 * or setting the SA_NOCLDWAIT flag: we should be reaped
1576 * automatically and not left for our parent's wait4 call.
1577 * Rather than having the parent do it as a magic kind of
1578 * signal handler, we just set this to tell do_exit that we
1579 * can be cleaned up without becoming a zombie. Note that
1580 * we still call __wake_up_parent in this case, because a
1581 * blocked sys_wait4 might now return -ECHILD.
1583 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1584 * is implementation-defined: we do (if you don't want
1585 * it, just use SIG_IGN instead).
1587 ret = tsk->exit_signal = -1;
1588 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1591 if (valid_signal(sig) && sig > 0)
1592 __group_send_sig_info(sig, &info, tsk->parent);
1593 __wake_up_parent(tsk, tsk->parent);
1594 spin_unlock_irqrestore(&psig->siglock, flags);
1600 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1601 * @tsk: task reporting the state change
1602 * @for_ptracer: the notification is for ptracer
1603 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1605 * Notify @tsk's parent that the stopped/continued state has changed. If
1606 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1607 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1610 * Must be called with tasklist_lock at least read locked.
1612 static void do_notify_parent_cldstop(struct task_struct *tsk,
1613 bool for_ptracer, int why)
1615 struct siginfo info;
1616 unsigned long flags;
1617 struct task_struct *parent;
1618 struct sighand_struct *sighand;
1621 parent = tsk->parent;
1623 tsk = tsk->group_leader;
1624 parent = tsk->real_parent;
1627 info.si_signo = SIGCHLD;
1630 * see comment in do_notify_parent() about the following 4 lines
1633 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1634 info.si_uid = __task_cred(tsk)->uid;
1637 info.si_utime = cputime_to_clock_t(tsk->utime);
1638 info.si_stime = cputime_to_clock_t(tsk->stime);
1643 info.si_status = SIGCONT;
1646 info.si_status = tsk->signal->group_exit_code & 0x7f;
1649 info.si_status = tsk->exit_code & 0x7f;
1655 sighand = parent->sighand;
1656 spin_lock_irqsave(&sighand->siglock, flags);
1657 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1658 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1659 __group_send_sig_info(SIGCHLD, &info, parent);
1661 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1663 __wake_up_parent(tsk, parent);
1664 spin_unlock_irqrestore(&sighand->siglock, flags);
1667 static inline int may_ptrace_stop(void)
1669 if (!likely(task_ptrace(current)))
1672 * Are we in the middle of do_coredump?
1673 * If so and our tracer is also part of the coredump stopping
1674 * is a deadlock situation, and pointless because our tracer
1675 * is dead so don't allow us to stop.
1676 * If SIGKILL was already sent before the caller unlocked
1677 * ->siglock we must see ->core_state != NULL. Otherwise it
1678 * is safe to enter schedule().
1680 if (unlikely(current->mm->core_state) &&
1681 unlikely(current->mm == current->parent->mm))
1688 * Return non-zero if there is a SIGKILL that should be waking us up.
1689 * Called with the siglock held.
1691 static int sigkill_pending(struct task_struct *tsk)
1693 return sigismember(&tsk->pending.signal, SIGKILL) ||
1694 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1698 * Test whether the target task of the usual cldstop notification - the
1699 * real_parent of @child - is in the same group as the ptracer.
1701 static bool real_parent_is_ptracer(struct task_struct *child)
1703 return same_thread_group(child->parent, child->real_parent);
1707 * This must be called with current->sighand->siglock held.
1709 * This should be the path for all ptrace stops.
1710 * We always set current->last_siginfo while stopped here.
1711 * That makes it a way to test a stopped process for
1712 * being ptrace-stopped vs being job-control-stopped.
1714 * If we actually decide not to stop at all because the tracer
1715 * is gone, we keep current->exit_code unless clear_code.
1717 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1718 __releases(¤t->sighand->siglock)
1719 __acquires(¤t->sighand->siglock)
1721 bool gstop_done = false;
1723 if (arch_ptrace_stop_needed(exit_code, info)) {
1725 * The arch code has something special to do before a
1726 * ptrace stop. This is allowed to block, e.g. for faults
1727 * on user stack pages. We can't keep the siglock while
1728 * calling arch_ptrace_stop, so we must release it now.
1729 * To preserve proper semantics, we must do this before
1730 * any signal bookkeeping like checking group_stop_count.
1731 * Meanwhile, a SIGKILL could come in before we retake the
1732 * siglock. That must prevent us from sleeping in TASK_TRACED.
1733 * So after regaining the lock, we must check for SIGKILL.
1735 spin_unlock_irq(¤t->sighand->siglock);
1736 arch_ptrace_stop(exit_code, info);
1737 spin_lock_irq(¤t->sighand->siglock);
1738 if (sigkill_pending(current))
1743 * We're committing to trapping. TRACED should be visible before
1744 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1745 * Also, transition to TRACED and updates to ->jobctl should be
1746 * atomic with respect to siglock and should be done after the arch
1747 * hook as siglock is released and regrabbed across it.
1749 set_current_state(TASK_TRACED);
1751 current->last_siginfo = info;
1752 current->exit_code = exit_code;
1755 * If @why is CLD_STOPPED, we're trapping to participate in a group
1756 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1757 * while siglock was released for the arch hook, PENDING could be
1758 * clear now. We act as if SIGCONT is received after TASK_TRACED
1759 * is entered - ignore it.
1761 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1762 gstop_done = task_participate_group_stop(current);
1764 /* entering a trap, clear TRAPPING */
1765 task_clear_jobctl_trapping(current);
1767 spin_unlock_irq(¤t->sighand->siglock);
1768 read_lock(&tasklist_lock);
1769 if (may_ptrace_stop()) {
1771 * Notify parents of the stop.
1773 * While ptraced, there are two parents - the ptracer and
1774 * the real_parent of the group_leader. The ptracer should
1775 * know about every stop while the real parent is only
1776 * interested in the completion of group stop. The states
1777 * for the two don't interact with each other. Notify
1778 * separately unless they're gonna be duplicates.
1780 do_notify_parent_cldstop(current, true, why);
1781 if (gstop_done && !real_parent_is_ptracer(current))
1782 do_notify_parent_cldstop(current, false, why);
1785 * Don't want to allow preemption here, because
1786 * sys_ptrace() needs this task to be inactive.
1788 * XXX: implement read_unlock_no_resched().
1791 read_unlock(&tasklist_lock);
1792 preempt_enable_no_resched();
1796 * By the time we got the lock, our tracer went away.
1797 * Don't drop the lock yet, another tracer may come.
1799 * If @gstop_done, the ptracer went away between group stop
1800 * completion and here. During detach, it would have set
1801 * JOBCTL_STOP_PENDING on us and we'll re-enter
1802 * TASK_STOPPED in do_signal_stop() on return, so notifying
1803 * the real parent of the group stop completion is enough.
1806 do_notify_parent_cldstop(current, false, why);
1808 __set_current_state(TASK_RUNNING);
1810 current->exit_code = 0;
1811 read_unlock(&tasklist_lock);
1815 * While in TASK_TRACED, we were considered "frozen enough".
1816 * Now that we woke up, it's crucial if we're supposed to be
1817 * frozen that we freeze now before running anything substantial.
1822 * We are back. Now reacquire the siglock before touching
1823 * last_siginfo, so that we are sure to have synchronized with
1824 * any signal-sending on another CPU that wants to examine it.
1826 spin_lock_irq(¤t->sighand->siglock);
1827 current->last_siginfo = NULL;
1830 * Queued signals ignored us while we were stopped for tracing.
1831 * So check for any that we should take before resuming user mode.
1832 * This sets TIF_SIGPENDING, but never clears it.
1834 recalc_sigpending_tsk(current);
1837 void ptrace_notify(int exit_code)
1841 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1843 memset(&info, 0, sizeof info);
1844 info.si_signo = SIGTRAP;
1845 info.si_code = exit_code;
1846 info.si_pid = task_pid_vnr(current);
1847 info.si_uid = current_uid();
1849 /* Let the debugger run. */
1850 spin_lock_irq(¤t->sighand->siglock);
1851 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1852 spin_unlock_irq(¤t->sighand->siglock);
1856 * This performs the stopping for SIGSTOP and other stop signals.
1857 * We have to stop all threads in the thread group.
1858 * Returns non-zero if we've actually stopped and released the siglock.
1859 * Returns zero if we didn't stop and still hold the siglock.
1861 static int do_signal_stop(int signr)
1863 struct signal_struct *sig = current->signal;
1865 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1866 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1867 struct task_struct *t;
1869 /* signr will be recorded in task->jobctl for retries */
1870 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1872 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1873 unlikely(signal_group_exit(sig)))
1876 * There is no group stop already in progress. We must
1879 * While ptraced, a task may be resumed while group stop is
1880 * still in effect and then receive a stop signal and
1881 * initiate another group stop. This deviates from the
1882 * usual behavior as two consecutive stop signals can't
1883 * cause two group stops when !ptraced. That is why we
1884 * also check !task_is_stopped(t) below.
1886 * The condition can be distinguished by testing whether
1887 * SIGNAL_STOP_STOPPED is already set. Don't generate
1888 * group_exit_code in such case.
1890 * This is not necessary for SIGNAL_STOP_CONTINUED because
1891 * an intervening stop signal is required to cause two
1892 * continued events regardless of ptrace.
1894 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1895 sig->group_exit_code = signr;
1897 WARN_ON_ONCE(!task_ptrace(current));
1899 current->jobctl &= ~JOBCTL_STOP_SIGMASK;
1900 current->jobctl |= signr | gstop;
1901 sig->group_stop_count = 1;
1902 for (t = next_thread(current); t != current;
1903 t = next_thread(t)) {
1904 t->jobctl &= ~JOBCTL_STOP_SIGMASK;
1906 * Setting state to TASK_STOPPED for a group
1907 * stop is always done with the siglock held,
1908 * so this check has no races.
1910 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
1911 t->jobctl |= signr | gstop;
1912 sig->group_stop_count++;
1913 signal_wake_up(t, 0);
1918 if (likely(!task_ptrace(current))) {
1922 * If there are no other threads in the group, or if there
1923 * is a group stop in progress and we are the last to stop,
1924 * report to the parent.
1926 if (task_participate_group_stop(current))
1927 notify = CLD_STOPPED;
1929 __set_current_state(TASK_STOPPED);
1930 spin_unlock_irq(¤t->sighand->siglock);
1933 * Notify the parent of the group stop completion. Because
1934 * we're not holding either the siglock or tasklist_lock
1935 * here, ptracer may attach inbetween; however, this is for
1936 * group stop and should always be delivered to the real
1937 * parent of the group leader. The new ptracer will get
1938 * its notification when this task transitions into
1942 read_lock(&tasklist_lock);
1943 do_notify_parent_cldstop(current, false, notify);
1944 read_unlock(&tasklist_lock);
1947 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1950 spin_lock_irq(¤t->sighand->siglock);
1952 ptrace_stop(current->jobctl & JOBCTL_STOP_SIGMASK,
1953 CLD_STOPPED, 0, NULL);
1954 current->exit_code = 0;
1958 * JOBCTL_STOP_PENDING could be set if another group stop has
1959 * started since being woken up or ptrace wants us to transit
1960 * between TASK_STOPPED and TRACED. Retry group stop.
1962 if (current->jobctl & JOBCTL_STOP_PENDING) {
1963 WARN_ON_ONCE(!(current->jobctl & JOBCTL_STOP_SIGMASK));
1967 /* PTRACE_ATTACH might have raced with task killing, clear trapping */
1968 task_clear_jobctl_trapping(current);
1970 spin_unlock_irq(¤t->sighand->siglock);
1972 tracehook_finish_jctl();
1977 static int ptrace_signal(int signr, siginfo_t *info,
1978 struct pt_regs *regs, void *cookie)
1980 if (!task_ptrace(current))
1983 ptrace_signal_deliver(regs, cookie);
1985 /* Let the debugger run. */
1986 ptrace_stop(signr, CLD_TRAPPED, 0, info);
1988 /* We're back. Did the debugger cancel the sig? */
1989 signr = current->exit_code;
1993 current->exit_code = 0;
1996 * Update the siginfo structure if the signal has
1997 * changed. If the debugger wanted something
1998 * specific in the siginfo structure then it should
1999 * have updated *info via PTRACE_SETSIGINFO.
2001 if (signr != info->si_signo) {
2002 info->si_signo = signr;
2004 info->si_code = SI_USER;
2005 info->si_pid = task_pid_vnr(current->parent);
2006 info->si_uid = task_uid(current->parent);
2009 /* If the (new) signal is now blocked, requeue it. */
2010 if (sigismember(¤t->blocked, signr)) {
2011 specific_send_sig_info(signr, info, current);
2018 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2019 struct pt_regs *regs, void *cookie)
2021 struct sighand_struct *sighand = current->sighand;
2022 struct signal_struct *signal = current->signal;
2027 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2028 * While in TASK_STOPPED, we were considered "frozen enough".
2029 * Now that we woke up, it's crucial if we're supposed to be
2030 * frozen that we freeze now before running anything substantial.
2034 spin_lock_irq(&sighand->siglock);
2036 * Every stopped thread goes here after wakeup. Check to see if
2037 * we should notify the parent, prepare_signal(SIGCONT) encodes
2038 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2040 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2041 struct task_struct *leader;
2044 if (signal->flags & SIGNAL_CLD_CONTINUED)
2045 why = CLD_CONTINUED;
2049 signal->flags &= ~SIGNAL_CLD_MASK;
2051 spin_unlock_irq(&sighand->siglock);
2054 * Notify the parent that we're continuing. This event is
2055 * always per-process and doesn't make whole lot of sense
2056 * for ptracers, who shouldn't consume the state via
2057 * wait(2) either, but, for backward compatibility, notify
2058 * the ptracer of the group leader too unless it's gonna be
2061 read_lock(&tasklist_lock);
2063 do_notify_parent_cldstop(current, false, why);
2065 leader = current->group_leader;
2066 if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
2067 do_notify_parent_cldstop(leader, true, why);
2069 read_unlock(&tasklist_lock);
2075 struct k_sigaction *ka;
2077 * Tracing can induce an artificial signal and choose sigaction.
2078 * The return value in @signr determines the default action,
2079 * but @info->si_signo is the signal number we will report.
2081 signr = tracehook_get_signal(current, regs, info, return_ka);
2082 if (unlikely(signr < 0))
2084 if (unlikely(signr != 0))
2087 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2091 signr = dequeue_signal(current, ¤t->blocked,
2095 break; /* will return 0 */
2097 if (signr != SIGKILL) {
2098 signr = ptrace_signal(signr, info,
2104 ka = &sighand->action[signr-1];
2107 /* Trace actually delivered signals. */
2108 trace_signal_deliver(signr, info, ka);
2110 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2112 if (ka->sa.sa_handler != SIG_DFL) {
2113 /* Run the handler. */
2116 if (ka->sa.sa_flags & SA_ONESHOT)
2117 ka->sa.sa_handler = SIG_DFL;
2119 break; /* will return non-zero "signr" value */
2123 * Now we are doing the default action for this signal.
2125 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2129 * Global init gets no signals it doesn't want.
2130 * Container-init gets no signals it doesn't want from same
2133 * Note that if global/container-init sees a sig_kernel_only()
2134 * signal here, the signal must have been generated internally
2135 * or must have come from an ancestor namespace. In either
2136 * case, the signal cannot be dropped.
2138 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2139 !sig_kernel_only(signr))
2142 if (sig_kernel_stop(signr)) {
2144 * The default action is to stop all threads in
2145 * the thread group. The job control signals
2146 * do nothing in an orphaned pgrp, but SIGSTOP
2147 * always works. Note that siglock needs to be
2148 * dropped during the call to is_orphaned_pgrp()
2149 * because of lock ordering with tasklist_lock.
2150 * This allows an intervening SIGCONT to be posted.
2151 * We need to check for that and bail out if necessary.
2153 if (signr != SIGSTOP) {
2154 spin_unlock_irq(&sighand->siglock);
2156 /* signals can be posted during this window */
2158 if (is_current_pgrp_orphaned())
2161 spin_lock_irq(&sighand->siglock);
2164 if (likely(do_signal_stop(info->si_signo))) {
2165 /* It released the siglock. */
2170 * We didn't actually stop, due to a race
2171 * with SIGCONT or something like that.
2176 spin_unlock_irq(&sighand->siglock);
2179 * Anything else is fatal, maybe with a core dump.
2181 current->flags |= PF_SIGNALED;
2183 if (sig_kernel_coredump(signr)) {
2184 if (print_fatal_signals)
2185 print_fatal_signal(regs, info->si_signo);
2187 * If it was able to dump core, this kills all
2188 * other threads in the group and synchronizes with
2189 * their demise. If we lost the race with another
2190 * thread getting here, it set group_exit_code
2191 * first and our do_group_exit call below will use
2192 * that value and ignore the one we pass it.
2194 do_coredump(info->si_signo, info->si_signo, regs);
2198 * Death signals, no core dump.
2200 do_group_exit(info->si_signo);
2203 spin_unlock_irq(&sighand->siglock);
2208 * It could be that complete_signal() picked us to notify about the
2209 * group-wide signal. Other threads should be notified now to take
2210 * the shared signals in @which since we will not.
2212 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2215 struct task_struct *t;
2217 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2218 if (sigisemptyset(&retarget))
2222 while_each_thread(tsk, t) {
2223 if (t->flags & PF_EXITING)
2226 if (!has_pending_signals(&retarget, &t->blocked))
2228 /* Remove the signals this thread can handle. */
2229 sigandsets(&retarget, &retarget, &t->blocked);
2231 if (!signal_pending(t))
2232 signal_wake_up(t, 0);
2234 if (sigisemptyset(&retarget))
2239 void exit_signals(struct task_struct *tsk)
2244 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2245 tsk->flags |= PF_EXITING;
2249 spin_lock_irq(&tsk->sighand->siglock);
2251 * From now this task is not visible for group-wide signals,
2252 * see wants_signal(), do_signal_stop().
2254 tsk->flags |= PF_EXITING;
2255 if (!signal_pending(tsk))
2258 unblocked = tsk->blocked;
2259 signotset(&unblocked);
2260 retarget_shared_pending(tsk, &unblocked);
2262 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2263 task_participate_group_stop(tsk))
2264 group_stop = CLD_STOPPED;
2266 spin_unlock_irq(&tsk->sighand->siglock);
2269 * If group stop has completed, deliver the notification. This
2270 * should always go to the real parent of the group leader.
2272 if (unlikely(group_stop)) {
2273 read_lock(&tasklist_lock);
2274 do_notify_parent_cldstop(tsk, false, group_stop);
2275 read_unlock(&tasklist_lock);
2279 EXPORT_SYMBOL(recalc_sigpending);
2280 EXPORT_SYMBOL_GPL(dequeue_signal);
2281 EXPORT_SYMBOL(flush_signals);
2282 EXPORT_SYMBOL(force_sig);
2283 EXPORT_SYMBOL(send_sig);
2284 EXPORT_SYMBOL(send_sig_info);
2285 EXPORT_SYMBOL(sigprocmask);
2286 EXPORT_SYMBOL(block_all_signals);
2287 EXPORT_SYMBOL(unblock_all_signals);
2291 * System call entry points.
2295 * sys_restart_syscall - restart a system call
2297 SYSCALL_DEFINE0(restart_syscall)
2299 struct restart_block *restart = ¤t_thread_info()->restart_block;
2300 return restart->fn(restart);
2303 long do_no_restart_syscall(struct restart_block *param)
2308 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2310 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2311 sigset_t newblocked;
2312 /* A set of now blocked but previously unblocked signals. */
2313 sigandnsets(&newblocked, newset, ¤t->blocked);
2314 retarget_shared_pending(tsk, &newblocked);
2316 tsk->blocked = *newset;
2317 recalc_sigpending();
2321 * set_current_blocked - change current->blocked mask
2324 * It is wrong to change ->blocked directly, this helper should be used
2325 * to ensure the process can't miss a shared signal we are going to block.
2327 void set_current_blocked(const sigset_t *newset)
2329 struct task_struct *tsk = current;
2331 spin_lock_irq(&tsk->sighand->siglock);
2332 __set_task_blocked(tsk, newset);
2333 spin_unlock_irq(&tsk->sighand->siglock);
2337 * This is also useful for kernel threads that want to temporarily
2338 * (or permanently) block certain signals.
2340 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2341 * interface happily blocks "unblockable" signals like SIGKILL
2344 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2346 struct task_struct *tsk = current;
2349 /* Lockless, only current can change ->blocked, never from irq */
2351 *oldset = tsk->blocked;
2355 sigorsets(&newset, &tsk->blocked, set);
2358 sigandnsets(&newset, &tsk->blocked, set);
2367 set_current_blocked(&newset);
2372 * sys_rt_sigprocmask - change the list of currently blocked signals
2373 * @how: whether to add, remove, or set signals
2374 * @set: stores pending signals
2375 * @oset: previous value of signal mask if non-null
2376 * @sigsetsize: size of sigset_t type
2378 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2379 sigset_t __user *, oset, size_t, sigsetsize)
2381 sigset_t old_set, new_set;
2384 /* XXX: Don't preclude handling different sized sigset_t's. */
2385 if (sigsetsize != sizeof(sigset_t))
2388 old_set = current->blocked;
2391 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2393 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2395 error = sigprocmask(how, &new_set, NULL);
2401 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2408 long do_sigpending(void __user *set, unsigned long sigsetsize)
2410 long error = -EINVAL;
2413 if (sigsetsize > sizeof(sigset_t))
2416 spin_lock_irq(¤t->sighand->siglock);
2417 sigorsets(&pending, ¤t->pending.signal,
2418 ¤t->signal->shared_pending.signal);
2419 spin_unlock_irq(¤t->sighand->siglock);
2421 /* Outside the lock because only this thread touches it. */
2422 sigandsets(&pending, ¤t->blocked, &pending);
2425 if (!copy_to_user(set, &pending, sigsetsize))
2433 * sys_rt_sigpending - examine a pending signal that has been raised
2435 * @set: stores pending signals
2436 * @sigsetsize: size of sigset_t type or larger
2438 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2440 return do_sigpending(set, sigsetsize);
2443 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2445 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2449 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2451 if (from->si_code < 0)
2452 return __copy_to_user(to, from, sizeof(siginfo_t))
2455 * If you change siginfo_t structure, please be sure
2456 * this code is fixed accordingly.
2457 * Please remember to update the signalfd_copyinfo() function
2458 * inside fs/signalfd.c too, in case siginfo_t changes.
2459 * It should never copy any pad contained in the structure
2460 * to avoid security leaks, but must copy the generic
2461 * 3 ints plus the relevant union member.
2463 err = __put_user(from->si_signo, &to->si_signo);
2464 err |= __put_user(from->si_errno, &to->si_errno);
2465 err |= __put_user((short)from->si_code, &to->si_code);
2466 switch (from->si_code & __SI_MASK) {
2468 err |= __put_user(from->si_pid, &to->si_pid);
2469 err |= __put_user(from->si_uid, &to->si_uid);
2472 err |= __put_user(from->si_tid, &to->si_tid);
2473 err |= __put_user(from->si_overrun, &to->si_overrun);
2474 err |= __put_user(from->si_ptr, &to->si_ptr);
2477 err |= __put_user(from->si_band, &to->si_band);
2478 err |= __put_user(from->si_fd, &to->si_fd);
2481 err |= __put_user(from->si_addr, &to->si_addr);
2482 #ifdef __ARCH_SI_TRAPNO
2483 err |= __put_user(from->si_trapno, &to->si_trapno);
2485 #ifdef BUS_MCEERR_AO
2487 * Other callers might not initialize the si_lsb field,
2488 * so check explicitly for the right codes here.
2490 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2491 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2495 err |= __put_user(from->si_pid, &to->si_pid);
2496 err |= __put_user(from->si_uid, &to->si_uid);
2497 err |= __put_user(from->si_status, &to->si_status);
2498 err |= __put_user(from->si_utime, &to->si_utime);
2499 err |= __put_user(from->si_stime, &to->si_stime);
2501 case __SI_RT: /* This is not generated by the kernel as of now. */
2502 case __SI_MESGQ: /* But this is */
2503 err |= __put_user(from->si_pid, &to->si_pid);
2504 err |= __put_user(from->si_uid, &to->si_uid);
2505 err |= __put_user(from->si_ptr, &to->si_ptr);
2507 default: /* this is just in case for now ... */
2508 err |= __put_user(from->si_pid, &to->si_pid);
2509 err |= __put_user(from->si_uid, &to->si_uid);
2518 * do_sigtimedwait - wait for queued signals specified in @which
2519 * @which: queued signals to wait for
2520 * @info: if non-null, the signal's siginfo is returned here
2521 * @ts: upper bound on process time suspension
2523 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2524 const struct timespec *ts)
2526 struct task_struct *tsk = current;
2527 long timeout = MAX_SCHEDULE_TIMEOUT;
2528 sigset_t mask = *which;
2532 if (!timespec_valid(ts))
2534 timeout = timespec_to_jiffies(ts);
2536 * We can be close to the next tick, add another one
2537 * to ensure we will wait at least the time asked for.
2539 if (ts->tv_sec || ts->tv_nsec)
2544 * Invert the set of allowed signals to get those we want to block.
2546 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2549 spin_lock_irq(&tsk->sighand->siglock);
2550 sig = dequeue_signal(tsk, &mask, info);
2551 if (!sig && timeout) {
2553 * None ready, temporarily unblock those we're interested
2554 * while we are sleeping in so that we'll be awakened when
2555 * they arrive. Unblocking is always fine, we can avoid
2556 * set_current_blocked().
2558 tsk->real_blocked = tsk->blocked;
2559 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2560 recalc_sigpending();
2561 spin_unlock_irq(&tsk->sighand->siglock);
2563 timeout = schedule_timeout_interruptible(timeout);
2565 spin_lock_irq(&tsk->sighand->siglock);
2566 __set_task_blocked(tsk, &tsk->real_blocked);
2567 siginitset(&tsk->real_blocked, 0);
2568 sig = dequeue_signal(tsk, &mask, info);
2570 spin_unlock_irq(&tsk->sighand->siglock);
2574 return timeout ? -EINTR : -EAGAIN;
2578 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2580 * @uthese: queued signals to wait for
2581 * @uinfo: if non-null, the signal's siginfo is returned here
2582 * @uts: upper bound on process time suspension
2583 * @sigsetsize: size of sigset_t type
2585 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2586 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2594 /* XXX: Don't preclude handling different sized sigset_t's. */
2595 if (sigsetsize != sizeof(sigset_t))
2598 if (copy_from_user(&these, uthese, sizeof(these)))
2602 if (copy_from_user(&ts, uts, sizeof(ts)))
2606 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2608 if (ret > 0 && uinfo) {
2609 if (copy_siginfo_to_user(uinfo, &info))
2617 * sys_kill - send a signal to a process
2618 * @pid: the PID of the process
2619 * @sig: signal to be sent
2621 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2623 struct siginfo info;
2625 info.si_signo = sig;
2627 info.si_code = SI_USER;
2628 info.si_pid = task_tgid_vnr(current);
2629 info.si_uid = current_uid();
2631 return kill_something_info(sig, &info, pid);
2635 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2637 struct task_struct *p;
2641 p = find_task_by_vpid(pid);
2642 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2643 error = check_kill_permission(sig, info, p);
2645 * The null signal is a permissions and process existence
2646 * probe. No signal is actually delivered.
2648 if (!error && sig) {
2649 error = do_send_sig_info(sig, info, p, false);
2651 * If lock_task_sighand() failed we pretend the task
2652 * dies after receiving the signal. The window is tiny,
2653 * and the signal is private anyway.
2655 if (unlikely(error == -ESRCH))
2664 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2666 struct siginfo info;
2668 info.si_signo = sig;
2670 info.si_code = SI_TKILL;
2671 info.si_pid = task_tgid_vnr(current);
2672 info.si_uid = current_uid();
2674 return do_send_specific(tgid, pid, sig, &info);
2678 * sys_tgkill - send signal to one specific thread
2679 * @tgid: the thread group ID of the thread
2680 * @pid: the PID of the thread
2681 * @sig: signal to be sent
2683 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2684 * exists but it's not belonging to the target process anymore. This
2685 * method solves the problem of threads exiting and PIDs getting reused.
2687 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2689 /* This is only valid for single tasks */
2690 if (pid <= 0 || tgid <= 0)
2693 return do_tkill(tgid, pid, sig);
2697 * sys_tkill - send signal to one specific task
2698 * @pid: the PID of the task
2699 * @sig: signal to be sent
2701 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2703 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2705 /* This is only valid for single tasks */
2709 return do_tkill(0, pid, sig);
2713 * sys_rt_sigqueueinfo - send signal information to a signal
2714 * @pid: the PID of the thread
2715 * @sig: signal to be sent
2716 * @uinfo: signal info to be sent
2718 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2719 siginfo_t __user *, uinfo)
2723 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2726 /* Not even root can pretend to send signals from the kernel.
2727 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2729 if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2730 /* We used to allow any < 0 si_code */
2731 WARN_ON_ONCE(info.si_code < 0);
2734 info.si_signo = sig;
2736 /* POSIX.1b doesn't mention process groups. */
2737 return kill_proc_info(sig, &info, pid);
2740 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2742 /* This is only valid for single tasks */
2743 if (pid <= 0 || tgid <= 0)
2746 /* Not even root can pretend to send signals from the kernel.
2747 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2749 if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2750 /* We used to allow any < 0 si_code */
2751 WARN_ON_ONCE(info->si_code < 0);
2754 info->si_signo = sig;
2756 return do_send_specific(tgid, pid, sig, info);
2759 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2760 siginfo_t __user *, uinfo)
2764 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2767 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2770 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2772 struct task_struct *t = current;
2773 struct k_sigaction *k;
2776 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2779 k = &t->sighand->action[sig-1];
2781 spin_lock_irq(¤t->sighand->siglock);
2786 sigdelsetmask(&act->sa.sa_mask,
2787 sigmask(SIGKILL) | sigmask(SIGSTOP));
2791 * "Setting a signal action to SIG_IGN for a signal that is
2792 * pending shall cause the pending signal to be discarded,
2793 * whether or not it is blocked."
2795 * "Setting a signal action to SIG_DFL for a signal that is
2796 * pending and whose default action is to ignore the signal
2797 * (for example, SIGCHLD), shall cause the pending signal to
2798 * be discarded, whether or not it is blocked"
2800 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2802 sigaddset(&mask, sig);
2803 rm_from_queue_full(&mask, &t->signal->shared_pending);
2805 rm_from_queue_full(&mask, &t->pending);
2807 } while (t != current);
2811 spin_unlock_irq(¤t->sighand->siglock);
2816 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2821 oss.ss_sp = (void __user *) current->sas_ss_sp;
2822 oss.ss_size = current->sas_ss_size;
2823 oss.ss_flags = sas_ss_flags(sp);
2831 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2833 error = __get_user(ss_sp, &uss->ss_sp) |
2834 __get_user(ss_flags, &uss->ss_flags) |
2835 __get_user(ss_size, &uss->ss_size);
2840 if (on_sig_stack(sp))
2845 * Note - this code used to test ss_flags incorrectly:
2846 * old code may have been written using ss_flags==0
2847 * to mean ss_flags==SS_ONSTACK (as this was the only
2848 * way that worked) - this fix preserves that older
2851 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2854 if (ss_flags == SS_DISABLE) {
2859 if (ss_size < MINSIGSTKSZ)
2863 current->sas_ss_sp = (unsigned long) ss_sp;
2864 current->sas_ss_size = ss_size;
2870 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2872 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2873 __put_user(oss.ss_size, &uoss->ss_size) |
2874 __put_user(oss.ss_flags, &uoss->ss_flags);
2881 #ifdef __ARCH_WANT_SYS_SIGPENDING
2884 * sys_sigpending - examine pending signals
2885 * @set: where mask of pending signal is returned
2887 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2889 return do_sigpending(set, sizeof(*set));
2894 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2896 * sys_sigprocmask - examine and change blocked signals
2897 * @how: whether to add, remove, or set signals
2898 * @nset: signals to add or remove (if non-null)
2899 * @oset: previous value of signal mask if non-null
2901 * Some platforms have their own version with special arguments;
2902 * others support only sys_rt_sigprocmask.
2905 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
2906 old_sigset_t __user *, oset)
2908 old_sigset_t old_set, new_set;
2909 sigset_t new_blocked;
2911 old_set = current->blocked.sig[0];
2914 if (copy_from_user(&new_set, nset, sizeof(*nset)))
2916 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2918 new_blocked = current->blocked;
2922 sigaddsetmask(&new_blocked, new_set);
2925 sigdelsetmask(&new_blocked, new_set);
2928 new_blocked.sig[0] = new_set;
2934 set_current_blocked(&new_blocked);
2938 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2944 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2946 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2948 * sys_rt_sigaction - alter an action taken by a process
2949 * @sig: signal to be sent
2950 * @act: new sigaction
2951 * @oact: used to save the previous sigaction
2952 * @sigsetsize: size of sigset_t type
2954 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2955 const struct sigaction __user *, act,
2956 struct sigaction __user *, oact,
2959 struct k_sigaction new_sa, old_sa;
2962 /* XXX: Don't preclude handling different sized sigset_t's. */
2963 if (sigsetsize != sizeof(sigset_t))
2967 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2971 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2974 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2980 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2982 #ifdef __ARCH_WANT_SYS_SGETMASK
2985 * For backwards compatibility. Functionality superseded by sigprocmask.
2987 SYSCALL_DEFINE0(sgetmask)
2990 return current->blocked.sig[0];
2993 SYSCALL_DEFINE1(ssetmask, int, newmask)
2997 spin_lock_irq(¤t->sighand->siglock);
2998 old = current->blocked.sig[0];
3000 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
3002 recalc_sigpending();
3003 spin_unlock_irq(¤t->sighand->siglock);
3007 #endif /* __ARCH_WANT_SGETMASK */
3009 #ifdef __ARCH_WANT_SYS_SIGNAL
3011 * For backwards compatibility. Functionality superseded by sigaction.
3013 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3015 struct k_sigaction new_sa, old_sa;
3018 new_sa.sa.sa_handler = handler;
3019 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3020 sigemptyset(&new_sa.sa.sa_mask);
3022 ret = do_sigaction(sig, &new_sa, &old_sa);
3024 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3026 #endif /* __ARCH_WANT_SYS_SIGNAL */
3028 #ifdef __ARCH_WANT_SYS_PAUSE
3030 SYSCALL_DEFINE0(pause)
3032 while (!signal_pending(current)) {
3033 current->state = TASK_INTERRUPTIBLE;
3036 return -ERESTARTNOHAND;
3041 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3043 * sys_rt_sigsuspend - replace the signal mask for a value with the
3044 * @unewset value until a signal is received
3045 * @unewset: new signal mask value
3046 * @sigsetsize: size of sigset_t type
3048 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3052 /* XXX: Don't preclude handling different sized sigset_t's. */
3053 if (sigsetsize != sizeof(sigset_t))
3056 if (copy_from_user(&newset, unewset, sizeof(newset)))
3058 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3060 spin_lock_irq(¤t->sighand->siglock);
3061 current->saved_sigmask = current->blocked;
3062 current->blocked = newset;
3063 recalc_sigpending();
3064 spin_unlock_irq(¤t->sighand->siglock);
3066 current->state = TASK_INTERRUPTIBLE;
3068 set_restore_sigmask();
3069 return -ERESTARTNOHAND;
3071 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3073 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3078 void __init signals_init(void)
3080 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3083 #ifdef CONFIG_KGDB_KDB
3084 #include <linux/kdb.h>
3086 * kdb_send_sig_info - Allows kdb to send signals without exposing
3087 * signal internals. This function checks if the required locks are
3088 * available before calling the main signal code, to avoid kdb
3092 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3094 static struct task_struct *kdb_prev_t;
3096 if (!spin_trylock(&t->sighand->siglock)) {
3097 kdb_printf("Can't do kill command now.\n"
3098 "The sigmask lock is held somewhere else in "
3099 "kernel, try again later\n");
3102 spin_unlock(&t->sighand->siglock);
3103 new_t = kdb_prev_t != t;
3105 if (t->state != TASK_RUNNING && new_t) {
3106 kdb_printf("Process is not RUNNING, sending a signal from "
3107 "kdb risks deadlock\n"
3108 "on the run queue locks. "
3109 "The signal has _not_ been sent.\n"
3110 "Reissue the kill command if you want to risk "
3114 sig = info->si_signo;
3115 if (send_sig_info(sig, info, t))
3116 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3119 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3121 #endif /* CONFIG_KGDB_KDB */