2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache *sigqueue_cachep;
42 static int __sig_ignored(struct task_struct *t, int sig)
46 /* Is it explicitly or implicitly ignored? */
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig));
53 static int sig_ignored(struct task_struct *t, int sig)
56 * Tracers always want to know about signals..
58 if (t->ptrace & PT_PTRACED)
62 * Blocked signals are never ignored, since the
63 * signal handler may change by the time it is
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
69 return __sig_ignored(t, sig);
73 * Re-calculate pending state from the set of locally pending
74 * signals, globally pending signals, and blocked signals.
76 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
81 switch (_NSIG_WORDS) {
83 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
84 ready |= signal->sig[i] &~ blocked->sig[i];
87 case 4: ready = signal->sig[3] &~ blocked->sig[3];
88 ready |= signal->sig[2] &~ blocked->sig[2];
89 ready |= signal->sig[1] &~ blocked->sig[1];
90 ready |= signal->sig[0] &~ blocked->sig[0];
93 case 2: ready = signal->sig[1] &~ blocked->sig[1];
94 ready |= signal->sig[0] &~ blocked->sig[0];
97 case 1: ready = signal->sig[0] &~ blocked->sig[0];
102 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
104 static int recalc_sigpending_tsk(struct task_struct *t)
106 if (t->signal->group_stop_count > 0 ||
107 PENDING(&t->pending, &t->blocked) ||
108 PENDING(&t->signal->shared_pending, &t->blocked)) {
109 set_tsk_thread_flag(t, TIF_SIGPENDING);
113 * We must never clear the flag in another thread, or in current
114 * when it's possible the current syscall is returning -ERESTART*.
115 * So we don't clear it here, and only callers who know they should do.
121 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
122 * This is superfluous when called on current, the wakeup is a harmless no-op.
124 void recalc_sigpending_and_wake(struct task_struct *t)
126 if (recalc_sigpending_tsk(t))
127 signal_wake_up(t, 0);
130 void recalc_sigpending(void)
132 if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING);
137 /* Given the mask, find the first available signal that should be serviced. */
139 int next_signal(struct sigpending *pending, sigset_t *mask)
141 unsigned long i, *s, *m, x;
144 s = pending->signal.sig;
146 switch (_NSIG_WORDS) {
148 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
149 if ((x = *s &~ *m) != 0) {
150 sig = ffz(~x) + i*_NSIG_BPW + 1;
155 case 2: if ((x = s[0] &~ m[0]) != 0)
157 else if ((x = s[1] &~ m[1]) != 0)
164 case 1: if ((x = *s &~ *m) != 0)
172 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
175 struct sigqueue *q = NULL;
176 struct user_struct *user;
179 * In order to avoid problems with "switch_user()", we want to make
180 * sure that the compiler doesn't re-load "t->user"
184 atomic_inc(&user->sigpending);
185 if (override_rlimit ||
186 atomic_read(&user->sigpending) <=
187 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
188 q = kmem_cache_alloc(sigqueue_cachep, flags);
189 if (unlikely(q == NULL)) {
190 atomic_dec(&user->sigpending);
192 INIT_LIST_HEAD(&q->list);
194 q->user = get_uid(user);
199 static void __sigqueue_free(struct sigqueue *q)
201 if (q->flags & SIGQUEUE_PREALLOC)
203 atomic_dec(&q->user->sigpending);
205 kmem_cache_free(sigqueue_cachep, q);
208 void flush_sigqueue(struct sigpending *queue)
212 sigemptyset(&queue->signal);
213 while (!list_empty(&queue->list)) {
214 q = list_entry(queue->list.next, struct sigqueue , list);
215 list_del_init(&q->list);
221 * Flush all pending signals for a task.
223 void flush_signals(struct task_struct *t)
227 spin_lock_irqsave(&t->sighand->siglock, flags);
228 clear_tsk_thread_flag(t, TIF_SIGPENDING);
229 flush_sigqueue(&t->pending);
230 flush_sigqueue(&t->signal->shared_pending);
231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
234 void ignore_signals(struct task_struct *t)
238 for (i = 0; i < _NSIG; ++i)
239 t->sighand->action[i].sa.sa_handler = SIG_IGN;
245 * Flush all handlers for a task.
249 flush_signal_handlers(struct task_struct *t, int force_default)
252 struct k_sigaction *ka = &t->sighand->action[0];
253 for (i = _NSIG ; i != 0 ; i--) {
254 if (force_default || ka->sa.sa_handler != SIG_IGN)
255 ka->sa.sa_handler = SIG_DFL;
257 sigemptyset(&ka->sa.sa_mask);
262 int unhandled_signal(struct task_struct *tsk, int sig)
264 if (is_global_init(tsk))
266 if (tsk->ptrace & PT_PTRACED)
268 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
269 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
273 /* Notify the system that a driver wants to block all signals for this
274 * process, and wants to be notified if any signals at all were to be
275 * sent/acted upon. If the notifier routine returns non-zero, then the
276 * signal will be acted upon after all. If the notifier routine returns 0,
277 * then then signal will be blocked. Only one block per process is
278 * allowed. priv is a pointer to private data that the notifier routine
279 * can use to determine if the signal should be blocked or not. */
282 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
286 spin_lock_irqsave(¤t->sighand->siglock, flags);
287 current->notifier_mask = mask;
288 current->notifier_data = priv;
289 current->notifier = notifier;
290 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
293 /* Notify the system that blocking has ended. */
296 unblock_all_signals(void)
300 spin_lock_irqsave(¤t->sighand->siglock, flags);
301 current->notifier = NULL;
302 current->notifier_data = NULL;
304 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
307 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
309 struct sigqueue *q, *first = NULL;
310 int still_pending = 0;
312 if (unlikely(!sigismember(&list->signal, sig)))
316 * Collect the siginfo appropriate to this signal. Check if
317 * there is another siginfo for the same signal.
319 list_for_each_entry(q, &list->list, list) {
320 if (q->info.si_signo == sig) {
329 list_del_init(&first->list);
330 copy_siginfo(info, &first->info);
331 __sigqueue_free(first);
333 sigdelset(&list->signal, sig);
336 /* Ok, it wasn't in the queue. This must be
337 a fast-pathed signal or we must have been
338 out of queue space. So zero out the info.
340 sigdelset(&list->signal, sig);
341 info->si_signo = sig;
350 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
353 int sig = next_signal(pending, mask);
356 if (current->notifier) {
357 if (sigismember(current->notifier_mask, sig)) {
358 if (!(current->notifier)(current->notifier_data)) {
359 clear_thread_flag(TIF_SIGPENDING);
365 if (!collect_signal(sig, pending, info))
373 * Dequeue a signal and return the element to the caller, which is
374 * expected to free it.
376 * All callers have to hold the siglock.
378 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
382 /* We only dequeue private signals from ourselves, we don't let
383 * signalfd steal them
385 signr = __dequeue_signal(&tsk->pending, mask, info);
387 signr = __dequeue_signal(&tsk->signal->shared_pending,
392 * itimers are process shared and we restart periodic
393 * itimers in the signal delivery path to prevent DoS
394 * attacks in the high resolution timer case. This is
395 * compliant with the old way of self restarting
396 * itimers, as the SIGALRM is a legacy signal and only
397 * queued once. Changing the restart behaviour to
398 * restart the timer in the signal dequeue path is
399 * reducing the timer noise on heavy loaded !highres
402 if (unlikely(signr == SIGALRM)) {
403 struct hrtimer *tmr = &tsk->signal->real_timer;
405 if (!hrtimer_is_queued(tmr) &&
406 tsk->signal->it_real_incr.tv64 != 0) {
407 hrtimer_forward(tmr, tmr->base->get_time(),
408 tsk->signal->it_real_incr);
409 hrtimer_restart(tmr);
418 if (unlikely(sig_kernel_stop(signr))) {
420 * Set a marker that we have dequeued a stop signal. Our
421 * caller might release the siglock and then the pending
422 * stop signal it is about to process is no longer in the
423 * pending bitmasks, but must still be cleared by a SIGCONT
424 * (and overruled by a SIGKILL). So those cases clear this
425 * shared flag after we've set it. Note that this flag may
426 * remain set after the signal we return is ignored or
427 * handled. That doesn't matter because its only purpose
428 * is to alert stop-signal processing code when another
429 * processor has come along and cleared the flag.
431 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
432 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
434 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
436 * Release the siglock to ensure proper locking order
437 * of timer locks outside of siglocks. Note, we leave
438 * irqs disabled here, since the posix-timers code is
439 * about to disable them again anyway.
441 spin_unlock(&tsk->sighand->siglock);
442 do_schedule_next_timer(info);
443 spin_lock(&tsk->sighand->siglock);
449 * Tell a process that it has a new active signal..
451 * NOTE! we rely on the previous spin_lock to
452 * lock interrupts for us! We can only be called with
453 * "siglock" held, and the local interrupt must
454 * have been disabled when that got acquired!
456 * No need to set need_resched since signal event passing
457 * goes through ->blocked
459 void signal_wake_up(struct task_struct *t, int resume)
463 set_tsk_thread_flag(t, TIF_SIGPENDING);
466 * For SIGKILL, we want to wake it up in the stopped/traced/killable
467 * case. We don't check t->state here because there is a race with it
468 * executing another processor and just now entering stopped state.
469 * By using wake_up_state, we ensure the process will wake up and
470 * handle its death signal.
472 mask = TASK_INTERRUPTIBLE;
474 mask |= TASK_WAKEKILL;
475 if (!wake_up_state(t, mask))
480 * Remove signals in mask from the pending set and queue.
481 * Returns 1 if any signals were found.
483 * All callers must be holding the siglock.
485 * This version takes a sigset mask and looks at all signals,
486 * not just those in the first mask word.
488 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
490 struct sigqueue *q, *n;
493 sigandsets(&m, mask, &s->signal);
494 if (sigisemptyset(&m))
497 signandsets(&s->signal, &s->signal, mask);
498 list_for_each_entry_safe(q, n, &s->list, list) {
499 if (sigismember(mask, q->info.si_signo)) {
500 list_del_init(&q->list);
507 * Remove signals in mask from the pending set and queue.
508 * Returns 1 if any signals were found.
510 * All callers must be holding the siglock.
512 static int rm_from_queue(unsigned long mask, struct sigpending *s)
514 struct sigqueue *q, *n;
516 if (!sigtestsetmask(&s->signal, mask))
519 sigdelsetmask(&s->signal, mask);
520 list_for_each_entry_safe(q, n, &s->list, list) {
521 if (q->info.si_signo < SIGRTMIN &&
522 (mask & sigmask(q->info.si_signo))) {
523 list_del_init(&q->list);
531 * Bad permissions for sending the signal
533 static int check_kill_permission(int sig, struct siginfo *info,
534 struct task_struct *t)
538 if (!valid_signal(sig))
541 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
544 error = audit_signal_info(sig, t); /* Let audit system see the signal */
548 if (((sig != SIGCONT) || (task_session_nr(current) != task_session_nr(t)))
549 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
550 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
551 && !capable(CAP_KILL))
554 return security_task_kill(t, info, sig, 0);
558 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
561 * Handle magic process-wide effects of stop/continue signals.
562 * Unlike the signal actions, these happen immediately at signal-generation
563 * time regardless of blocking, ignoring, or handling. This does the
564 * actual continuing for SIGCONT, but not the actual stopping for stop
565 * signals. The process stop is done as a signal action for SIG_DFL.
567 static void handle_stop_signal(int sig, struct task_struct *p)
569 struct signal_struct *signal = p->signal;
570 struct task_struct *t;
572 if (signal->flags & SIGNAL_GROUP_EXIT)
574 * The process is in the middle of dying already.
578 if (sig_kernel_stop(sig)) {
580 * This is a stop signal. Remove SIGCONT from all queues.
582 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
585 rm_from_queue(sigmask(SIGCONT), &t->pending);
586 } while_each_thread(p, t);
587 } else if (sig == SIGCONT) {
590 * Remove all stop signals from all queues,
591 * and wake all threads.
593 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
597 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
599 * If there is a handler for SIGCONT, we must make
600 * sure that no thread returns to user mode before
601 * we post the signal, in case it was the only
602 * thread eligible to run the signal handler--then
603 * it must not do anything between resuming and
604 * running the handler. With the TIF_SIGPENDING
605 * flag set, the thread will pause and acquire the
606 * siglock that we hold now and until we've queued
607 * the pending signal.
609 * Wake up the stopped thread _after_ setting
612 state = __TASK_STOPPED;
613 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
614 set_tsk_thread_flag(t, TIF_SIGPENDING);
615 state |= TASK_INTERRUPTIBLE;
617 wake_up_state(t, state);
618 } while_each_thread(p, t);
621 * Notify the parent with CLD_CONTINUED if we were stopped.
623 * If we were in the middle of a group stop, we pretend it
624 * was already finished, and then continued. Since SIGCHLD
625 * doesn't queue we report only CLD_STOPPED, as if the next
626 * CLD_CONTINUED was dropped.
629 if (signal->flags & SIGNAL_STOP_STOPPED)
630 why |= SIGNAL_CLD_CONTINUED;
631 else if (signal->group_stop_count)
632 why |= SIGNAL_CLD_STOPPED;
635 signal->flags = why | SIGNAL_STOP_CONTINUED;
636 signal->group_stop_count = 0;
637 signal->group_exit_code = 0;
640 * We are not stopped, but there could be a stop
641 * signal in the middle of being processed after
642 * being removed from the queue. Clear that too.
644 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
646 } else if (sig == SIGKILL) {
648 * Make sure that any pending stop signal already dequeued
649 * is undone by the wakeup for SIGKILL.
651 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
655 static inline int legacy_queue(struct sigpending *signals, int sig)
657 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
660 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
661 struct sigpending *signals)
665 assert_spin_locked(&t->sighand->siglock);
666 handle_stop_signal(sig, t);
668 * Short-circuit ignored signals and support queuing
669 * exactly one non-rt signal, so that we can get more
670 * detailed information about the cause of the signal.
672 if (sig_ignored(t, sig) || legacy_queue(signals, sig))
676 * Deliver the signal to listening signalfds. This must be called
677 * with the sighand lock held.
679 signalfd_notify(t, sig);
682 * fast-pathed signals for kernel-internal things like SIGSTOP
685 if (info == SEND_SIG_FORCED)
688 /* Real-time signals must be queued if sent by sigqueue, or
689 some other real-time mechanism. It is implementation
690 defined whether kill() does so. We attempt to do so, on
691 the principle of least surprise, but since kill is not
692 allowed to fail with EAGAIN when low on memory we just
693 make sure at least one signal gets delivered and don't
694 pass on the info struct. */
696 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
697 (is_si_special(info) ||
698 info->si_code >= 0)));
700 list_add_tail(&q->list, &signals->list);
701 switch ((unsigned long) info) {
702 case (unsigned long) SEND_SIG_NOINFO:
703 q->info.si_signo = sig;
704 q->info.si_errno = 0;
705 q->info.si_code = SI_USER;
706 q->info.si_pid = task_pid_vnr(current);
707 q->info.si_uid = current->uid;
709 case (unsigned long) SEND_SIG_PRIV:
710 q->info.si_signo = sig;
711 q->info.si_errno = 0;
712 q->info.si_code = SI_KERNEL;
717 copy_siginfo(&q->info, info);
720 } else if (!is_si_special(info)) {
721 if (sig >= SIGRTMIN && info->si_code != SI_USER)
723 * Queue overflow, abort. We may abort if the signal was rt
724 * and sent by user using something other than kill().
730 sigaddset(&signals->signal, sig);
734 int print_fatal_signals;
736 static void print_fatal_signal(struct pt_regs *regs, int signr)
738 printk("%s/%d: potentially unexpected fatal signal %d.\n",
739 current->comm, task_pid_nr(current), signr);
741 #if defined(__i386__) && !defined(__arch_um__)
742 printk("code at %08lx: ", regs->ip);
745 for (i = 0; i < 16; i++) {
748 __get_user(insn, (unsigned char *)(regs->ip + i));
749 printk("%02x ", insn);
757 static int __init setup_print_fatal_signals(char *str)
759 get_option (&str, &print_fatal_signals);
764 __setup("print-fatal-signals=", setup_print_fatal_signals);
767 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
771 ret = send_signal(sig, info, t, &t->pending);
775 if (!sigismember(&t->blocked, sig))
776 signal_wake_up(t, sig == SIGKILL);
781 * Force a signal that the process can't ignore: if necessary
782 * we unblock the signal and change any SIG_IGN to SIG_DFL.
784 * Note: If we unblock the signal, we always reset it to SIG_DFL,
785 * since we do not want to have a signal handler that was blocked
786 * be invoked when user space had explicitly blocked it.
788 * We don't want to have recursive SIGSEGV's etc, for example.
791 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
793 unsigned long int flags;
794 int ret, blocked, ignored;
795 struct k_sigaction *action;
797 spin_lock_irqsave(&t->sighand->siglock, flags);
798 action = &t->sighand->action[sig-1];
799 ignored = action->sa.sa_handler == SIG_IGN;
800 blocked = sigismember(&t->blocked, sig);
801 if (blocked || ignored) {
802 action->sa.sa_handler = SIG_DFL;
804 sigdelset(&t->blocked, sig);
805 recalc_sigpending_and_wake(t);
808 ret = specific_send_sig_info(sig, info, t);
809 spin_unlock_irqrestore(&t->sighand->siglock, flags);
815 force_sig_specific(int sig, struct task_struct *t)
817 force_sig_info(sig, SEND_SIG_FORCED, t);
821 * Test if P wants to take SIG. After we've checked all threads with this,
822 * it's equivalent to finding no threads not blocking SIG. Any threads not
823 * blocking SIG were ruled out because they are not running and already
824 * have pending signals. Such threads will dequeue from the shared queue
825 * as soon as they're available, so putting the signal on the shared queue
826 * will be equivalent to sending it to one such thread.
828 static inline int wants_signal(int sig, struct task_struct *p)
830 if (sigismember(&p->blocked, sig))
832 if (p->flags & PF_EXITING)
836 if (task_is_stopped_or_traced(p))
838 return task_curr(p) || !signal_pending(p);
842 __group_complete_signal(int sig, struct task_struct *p)
844 struct signal_struct *signal = p->signal;
845 struct task_struct *t;
848 * Now find a thread we can wake up to take the signal off the queue.
850 * If the main thread wants the signal, it gets first crack.
851 * Probably the least surprising to the average bear.
853 if (wants_signal(sig, p))
855 else if (thread_group_empty(p))
857 * There is just one thread and it does not need to be woken.
858 * It will dequeue unblocked signals before it runs again.
863 * Otherwise try to find a suitable thread.
865 t = signal->curr_target;
866 while (!wants_signal(sig, t)) {
868 if (t == signal->curr_target)
870 * No thread needs to be woken.
871 * Any eligible threads will see
872 * the signal in the queue soon.
876 signal->curr_target = t;
880 * Found a killable thread. If the signal will be fatal,
881 * then start taking the whole group down immediately.
883 if (sig_fatal(p, sig) && !(signal->flags & SIGNAL_GROUP_EXIT) &&
884 !sigismember(&t->real_blocked, sig) &&
885 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
887 * This signal will be fatal to the whole group.
889 if (!sig_kernel_coredump(sig)) {
891 * Start a group exit and wake everybody up.
892 * This way we don't have other threads
893 * running and doing things after a slower
894 * thread has the fatal signal pending.
896 signal->flags = SIGNAL_GROUP_EXIT;
897 signal->group_exit_code = sig;
898 signal->group_stop_count = 0;
901 sigaddset(&t->pending.signal, SIGKILL);
902 signal_wake_up(t, 1);
903 } while_each_thread(p, t);
909 * The signal is already in the shared-pending queue.
910 * Tell the chosen thread to wake up and dequeue it.
912 signal_wake_up(t, sig == SIGKILL);
917 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
922 * Put this signal on the shared-pending queue, or fail with EAGAIN.
923 * We always use the shared queue for process-wide signals,
924 * to avoid several races.
926 ret = send_signal(sig, info, p, &p->signal->shared_pending);
930 __group_complete_signal(sig, p);
935 * Nuke all other threads in the group.
937 void zap_other_threads(struct task_struct *p)
939 struct task_struct *t;
941 p->signal->group_stop_count = 0;
943 for (t = next_thread(p); t != p; t = next_thread(t)) {
945 * Don't bother with already dead threads
950 /* SIGKILL will be handled before any pending SIGSTOP */
951 sigaddset(&t->pending.signal, SIGKILL);
952 signal_wake_up(t, 1);
956 int __fatal_signal_pending(struct task_struct *tsk)
958 return sigismember(&tsk->pending.signal, SIGKILL);
960 EXPORT_SYMBOL(__fatal_signal_pending);
962 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
964 struct sighand_struct *sighand;
968 sighand = rcu_dereference(tsk->sighand);
969 if (unlikely(sighand == NULL))
972 spin_lock_irqsave(&sighand->siglock, *flags);
973 if (likely(sighand == tsk->sighand))
975 spin_unlock_irqrestore(&sighand->siglock, *flags);
982 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
987 ret = check_kill_permission(sig, info, p);
991 if (lock_task_sighand(p, &flags)) {
992 ret = __group_send_sig_info(sig, info, p);
993 unlock_task_sighand(p, &flags);
1001 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1002 * control characters do (^C, ^Z etc)
1005 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1007 struct task_struct *p = NULL;
1008 int retval, success;
1012 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1013 int err = group_send_sig_info(sig, info, p);
1016 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1017 return success ? 0 : retval;
1020 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1023 struct task_struct *p;
1027 p = pid_task(pid, PIDTYPE_PID);
1029 error = group_send_sig_info(sig, info, p);
1030 if (unlikely(error == -ESRCH))
1032 * The task was unhashed in between, try again.
1033 * If it is dead, pid_task() will return NULL,
1034 * if we race with de_thread() it will find the
1045 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1049 error = kill_pid_info(sig, info, find_vpid(pid));
1054 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1055 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1056 uid_t uid, uid_t euid, u32 secid)
1059 struct task_struct *p;
1061 if (!valid_signal(sig))
1064 read_lock(&tasklist_lock);
1065 p = pid_task(pid, PIDTYPE_PID);
1070 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1071 && (euid != p->suid) && (euid != p->uid)
1072 && (uid != p->suid) && (uid != p->uid)) {
1076 ret = security_task_kill(p, info, sig, secid);
1079 if (sig && p->sighand) {
1080 unsigned long flags;
1081 spin_lock_irqsave(&p->sighand->siglock, flags);
1082 ret = __group_send_sig_info(sig, info, p);
1083 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1086 read_unlock(&tasklist_lock);
1089 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1092 * kill_something_info() interprets pid in interesting ways just like kill(2).
1094 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1095 * is probably wrong. Should make it like BSD or SYSV.
1098 static int kill_something_info(int sig, struct siginfo *info, int pid)
1104 ret = kill_pid_info(sig, info, find_vpid(pid));
1109 read_lock(&tasklist_lock);
1111 ret = __kill_pgrp_info(sig, info,
1112 pid ? find_vpid(-pid) : task_pgrp(current));
1114 int retval = 0, count = 0;
1115 struct task_struct * p;
1117 for_each_process(p) {
1118 if (p->pid > 1 && !same_thread_group(p, current)) {
1119 int err = group_send_sig_info(sig, info, p);
1125 ret = count ? retval : -ESRCH;
1127 read_unlock(&tasklist_lock);
1133 * These are for backward compatibility with the rest of the kernel source.
1137 * The caller must ensure the task can't exit.
1140 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1143 unsigned long flags;
1146 * Make sure legacy kernel users don't send in bad values
1147 * (normal paths check this in check_kill_permission).
1149 if (!valid_signal(sig))
1152 spin_lock_irqsave(&p->sighand->siglock, flags);
1153 ret = specific_send_sig_info(sig, info, p);
1154 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1158 #define __si_special(priv) \
1159 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1162 send_sig(int sig, struct task_struct *p, int priv)
1164 return send_sig_info(sig, __si_special(priv), p);
1168 force_sig(int sig, struct task_struct *p)
1170 force_sig_info(sig, SEND_SIG_PRIV, p);
1174 * When things go south during signal handling, we
1175 * will force a SIGSEGV. And if the signal that caused
1176 * the problem was already a SIGSEGV, we'll want to
1177 * make sure we don't even try to deliver the signal..
1180 force_sigsegv(int sig, struct task_struct *p)
1182 if (sig == SIGSEGV) {
1183 unsigned long flags;
1184 spin_lock_irqsave(&p->sighand->siglock, flags);
1185 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1186 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1188 force_sig(SIGSEGV, p);
1192 int kill_pgrp(struct pid *pid, int sig, int priv)
1196 read_lock(&tasklist_lock);
1197 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1198 read_unlock(&tasklist_lock);
1202 EXPORT_SYMBOL(kill_pgrp);
1204 int kill_pid(struct pid *pid, int sig, int priv)
1206 return kill_pid_info(sig, __si_special(priv), pid);
1208 EXPORT_SYMBOL(kill_pid);
1211 kill_proc(pid_t pid, int sig, int priv)
1216 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1222 * These functions support sending signals using preallocated sigqueue
1223 * structures. This is needed "because realtime applications cannot
1224 * afford to lose notifications of asynchronous events, like timer
1225 * expirations or I/O completions". In the case of Posix Timers
1226 * we allocate the sigqueue structure from the timer_create. If this
1227 * allocation fails we are able to report the failure to the application
1228 * with an EAGAIN error.
1231 struct sigqueue *sigqueue_alloc(void)
1235 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1236 q->flags |= SIGQUEUE_PREALLOC;
1240 void sigqueue_free(struct sigqueue *q)
1242 unsigned long flags;
1243 spinlock_t *lock = ¤t->sighand->siglock;
1245 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1247 * If the signal is still pending remove it from the
1248 * pending queue. We must hold ->siglock while testing
1249 * q->list to serialize with collect_signal().
1251 spin_lock_irqsave(lock, flags);
1252 if (!list_empty(&q->list))
1253 list_del_init(&q->list);
1254 spin_unlock_irqrestore(lock, flags);
1256 q->flags &= ~SIGQUEUE_PREALLOC;
1260 static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t,
1261 struct sigpending *pending)
1263 handle_stop_signal(sig, t);
1265 if (unlikely(!list_empty(&q->list))) {
1267 * If an SI_TIMER entry is already queue just increment
1268 * the overrun count.
1271 BUG_ON(q->info.si_code != SI_TIMER);
1272 q->info.si_overrun++;
1276 if (sig_ignored(t, sig))
1279 signalfd_notify(t, sig);
1280 list_add_tail(&q->list, &pending->list);
1281 sigaddset(&pending->signal, sig);
1285 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1287 unsigned long flags;
1290 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1293 * The rcu based delayed sighand destroy makes it possible to
1294 * run this without tasklist lock held. The task struct itself
1295 * cannot go away as create_timer did get_task_struct().
1297 * We return -1, when the task is marked exiting, so
1298 * posix_timer_event can redirect it to the group leader
1300 if (!likely(lock_task_sighand(p, &flags)))
1303 ret = do_send_sigqueue(sig, q, p, &p->pending);
1305 if (!sigismember(&p->blocked, sig))
1306 signal_wake_up(p, sig == SIGKILL);
1308 unlock_task_sighand(p, &flags);
1314 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1316 unsigned long flags;
1319 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1321 /* Since it_lock is held, p->sighand cannot be NULL. */
1322 spin_lock_irqsave(&p->sighand->siglock, flags);
1324 ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending);
1326 __group_complete_signal(sig, p);
1328 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1334 * Wake up any threads in the parent blocked in wait* syscalls.
1336 static inline void __wake_up_parent(struct task_struct *p,
1337 struct task_struct *parent)
1339 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1343 * Let a parent know about the death of a child.
1344 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1347 void do_notify_parent(struct task_struct *tsk, int sig)
1349 struct siginfo info;
1350 unsigned long flags;
1351 struct sighand_struct *psig;
1355 /* do_notify_parent_cldstop should have been called instead. */
1356 BUG_ON(task_is_stopped_or_traced(tsk));
1358 BUG_ON(!tsk->ptrace &&
1359 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1361 info.si_signo = sig;
1364 * we are under tasklist_lock here so our parent is tied to
1365 * us and cannot exit and release its namespace.
1367 * the only it can is to switch its nsproxy with sys_unshare,
1368 * bu uncharing pid namespaces is not allowed, so we'll always
1369 * see relevant namespace
1371 * write_lock() currently calls preempt_disable() which is the
1372 * same as rcu_read_lock(), but according to Oleg, this is not
1373 * correct to rely on this
1376 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1379 info.si_uid = tsk->uid;
1381 /* FIXME: find out whether or not this is supposed to be c*time. */
1382 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1383 tsk->signal->utime));
1384 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1385 tsk->signal->stime));
1387 info.si_status = tsk->exit_code & 0x7f;
1388 if (tsk->exit_code & 0x80)
1389 info.si_code = CLD_DUMPED;
1390 else if (tsk->exit_code & 0x7f)
1391 info.si_code = CLD_KILLED;
1393 info.si_code = CLD_EXITED;
1394 info.si_status = tsk->exit_code >> 8;
1397 psig = tsk->parent->sighand;
1398 spin_lock_irqsave(&psig->siglock, flags);
1399 if (!tsk->ptrace && sig == SIGCHLD &&
1400 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1401 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1403 * We are exiting and our parent doesn't care. POSIX.1
1404 * defines special semantics for setting SIGCHLD to SIG_IGN
1405 * or setting the SA_NOCLDWAIT flag: we should be reaped
1406 * automatically and not left for our parent's wait4 call.
1407 * Rather than having the parent do it as a magic kind of
1408 * signal handler, we just set this to tell do_exit that we
1409 * can be cleaned up without becoming a zombie. Note that
1410 * we still call __wake_up_parent in this case, because a
1411 * blocked sys_wait4 might now return -ECHILD.
1413 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1414 * is implementation-defined: we do (if you don't want
1415 * it, just use SIG_IGN instead).
1417 tsk->exit_signal = -1;
1418 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1421 if (valid_signal(sig) && sig > 0)
1422 __group_send_sig_info(sig, &info, tsk->parent);
1423 __wake_up_parent(tsk, tsk->parent);
1424 spin_unlock_irqrestore(&psig->siglock, flags);
1427 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1429 struct siginfo info;
1430 unsigned long flags;
1431 struct task_struct *parent;
1432 struct sighand_struct *sighand;
1434 if (tsk->ptrace & PT_PTRACED)
1435 parent = tsk->parent;
1437 tsk = tsk->group_leader;
1438 parent = tsk->real_parent;
1441 info.si_signo = SIGCHLD;
1444 * see comment in do_notify_parent() abot the following 3 lines
1447 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1450 info.si_uid = tsk->uid;
1452 /* FIXME: find out whether or not this is supposed to be c*time. */
1453 info.si_utime = cputime_to_jiffies(tsk->utime);
1454 info.si_stime = cputime_to_jiffies(tsk->stime);
1459 info.si_status = SIGCONT;
1462 info.si_status = tsk->signal->group_exit_code & 0x7f;
1465 info.si_status = tsk->exit_code & 0x7f;
1471 sighand = parent->sighand;
1472 spin_lock_irqsave(&sighand->siglock, flags);
1473 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1474 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1475 __group_send_sig_info(SIGCHLD, &info, parent);
1477 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1479 __wake_up_parent(tsk, parent);
1480 spin_unlock_irqrestore(&sighand->siglock, flags);
1483 static inline int may_ptrace_stop(void)
1485 if (!likely(current->ptrace & PT_PTRACED))
1488 * Are we in the middle of do_coredump?
1489 * If so and our tracer is also part of the coredump stopping
1490 * is a deadlock situation, and pointless because our tracer
1491 * is dead so don't allow us to stop.
1492 * If SIGKILL was already sent before the caller unlocked
1493 * ->siglock we must see ->core_waiters != 0. Otherwise it
1494 * is safe to enter schedule().
1496 if (unlikely(current->mm->core_waiters) &&
1497 unlikely(current->mm == current->parent->mm))
1504 * Return nonzero if there is a SIGKILL that should be waking us up.
1505 * Called with the siglock held.
1507 static int sigkill_pending(struct task_struct *tsk)
1509 return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1510 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1511 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1515 * This must be called with current->sighand->siglock held.
1517 * This should be the path for all ptrace stops.
1518 * We always set current->last_siginfo while stopped here.
1519 * That makes it a way to test a stopped process for
1520 * being ptrace-stopped vs being job-control-stopped.
1522 * If we actually decide not to stop at all because the tracer
1523 * is gone, we keep current->exit_code unless clear_code.
1525 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1529 if (arch_ptrace_stop_needed(exit_code, info)) {
1531 * The arch code has something special to do before a
1532 * ptrace stop. This is allowed to block, e.g. for faults
1533 * on user stack pages. We can't keep the siglock while
1534 * calling arch_ptrace_stop, so we must release it now.
1535 * To preserve proper semantics, we must do this before
1536 * any signal bookkeeping like checking group_stop_count.
1537 * Meanwhile, a SIGKILL could come in before we retake the
1538 * siglock. That must prevent us from sleeping in TASK_TRACED.
1539 * So after regaining the lock, we must check for SIGKILL.
1541 spin_unlock_irq(¤t->sighand->siglock);
1542 arch_ptrace_stop(exit_code, info);
1543 spin_lock_irq(¤t->sighand->siglock);
1544 killed = sigkill_pending(current);
1548 * If there is a group stop in progress,
1549 * we must participate in the bookkeeping.
1551 if (current->signal->group_stop_count > 0)
1552 --current->signal->group_stop_count;
1554 current->last_siginfo = info;
1555 current->exit_code = exit_code;
1557 /* Let the debugger run. */
1558 __set_current_state(TASK_TRACED);
1559 spin_unlock_irq(¤t->sighand->siglock);
1560 read_lock(&tasklist_lock);
1561 if (!unlikely(killed) && may_ptrace_stop()) {
1562 do_notify_parent_cldstop(current, CLD_TRAPPED);
1563 read_unlock(&tasklist_lock);
1567 * By the time we got the lock, our tracer went away.
1568 * Don't drop the lock yet, another tracer may come.
1570 __set_current_state(TASK_RUNNING);
1572 current->exit_code = 0;
1573 read_unlock(&tasklist_lock);
1577 * While in TASK_TRACED, we were considered "frozen enough".
1578 * Now that we woke up, it's crucial if we're supposed to be
1579 * frozen that we freeze now before running anything substantial.
1584 * We are back. Now reacquire the siglock before touching
1585 * last_siginfo, so that we are sure to have synchronized with
1586 * any signal-sending on another CPU that wants to examine it.
1588 spin_lock_irq(¤t->sighand->siglock);
1589 current->last_siginfo = NULL;
1592 * Queued signals ignored us while we were stopped for tracing.
1593 * So check for any that we should take before resuming user mode.
1594 * This sets TIF_SIGPENDING, but never clears it.
1596 recalc_sigpending_tsk(current);
1599 void ptrace_notify(int exit_code)
1603 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1605 memset(&info, 0, sizeof info);
1606 info.si_signo = SIGTRAP;
1607 info.si_code = exit_code;
1608 info.si_pid = task_pid_vnr(current);
1609 info.si_uid = current->uid;
1611 /* Let the debugger run. */
1612 spin_lock_irq(¤t->sighand->siglock);
1613 ptrace_stop(exit_code, 1, &info);
1614 spin_unlock_irq(¤t->sighand->siglock);
1618 finish_stop(int stop_count)
1621 * If there are no other threads in the group, or if there is
1622 * a group stop in progress and we are the last to stop,
1623 * report to the parent. When ptraced, every thread reports itself.
1625 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1626 read_lock(&tasklist_lock);
1627 do_notify_parent_cldstop(current, CLD_STOPPED);
1628 read_unlock(&tasklist_lock);
1633 } while (try_to_freeze());
1635 * Now we don't run again until continued.
1637 current->exit_code = 0;
1641 * This performs the stopping for SIGSTOP and other stop signals.
1642 * We have to stop all threads in the thread group.
1643 * Returns nonzero if we've actually stopped and released the siglock.
1644 * Returns zero if we didn't stop and still hold the siglock.
1646 static int do_signal_stop(int signr)
1648 struct signal_struct *sig = current->signal;
1651 if (sig->group_stop_count > 0) {
1653 * There is a group stop in progress. We don't need to
1654 * start another one.
1656 stop_count = --sig->group_stop_count;
1658 struct task_struct *t;
1660 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1661 unlikely(signal_group_exit(sig)))
1664 * There is no group stop already in progress.
1665 * We must initiate one now.
1667 sig->group_exit_code = signr;
1670 for (t = next_thread(current); t != current; t = next_thread(t))
1672 * Setting state to TASK_STOPPED for a group
1673 * stop is always done with the siglock held,
1674 * so this check has no races.
1676 if (!(t->flags & PF_EXITING) &&
1677 !task_is_stopped_or_traced(t)) {
1679 signal_wake_up(t, 0);
1681 sig->group_stop_count = stop_count;
1684 if (stop_count == 0)
1685 sig->flags = SIGNAL_STOP_STOPPED;
1686 current->exit_code = sig->group_exit_code;
1687 __set_current_state(TASK_STOPPED);
1689 spin_unlock_irq(¤t->sighand->siglock);
1690 finish_stop(stop_count);
1694 static int ptrace_signal(int signr, siginfo_t *info,
1695 struct pt_regs *regs, void *cookie)
1697 if (!(current->ptrace & PT_PTRACED))
1700 ptrace_signal_deliver(regs, cookie);
1702 /* Let the debugger run. */
1703 ptrace_stop(signr, 0, info);
1705 /* We're back. Did the debugger cancel the sig? */
1706 signr = current->exit_code;
1710 current->exit_code = 0;
1712 /* Update the siginfo structure if the signal has
1713 changed. If the debugger wanted something
1714 specific in the siginfo structure then it should
1715 have updated *info via PTRACE_SETSIGINFO. */
1716 if (signr != info->si_signo) {
1717 info->si_signo = signr;
1719 info->si_code = SI_USER;
1720 info->si_pid = task_pid_vnr(current->parent);
1721 info->si_uid = current->parent->uid;
1724 /* If the (new) signal is now blocked, requeue it. */
1725 if (sigismember(¤t->blocked, signr)) {
1726 specific_send_sig_info(signr, info, current);
1733 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1734 struct pt_regs *regs, void *cookie)
1736 struct sighand_struct *sighand = current->sighand;
1737 struct signal_struct *signal = current->signal;
1742 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1743 * While in TASK_STOPPED, we were considered "frozen enough".
1744 * Now that we woke up, it's crucial if we're supposed to be
1745 * frozen that we freeze now before running anything substantial.
1749 spin_lock_irq(&sighand->siglock);
1751 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1752 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1753 ? CLD_CONTINUED : CLD_STOPPED;
1754 signal->flags &= ~SIGNAL_CLD_MASK;
1755 spin_unlock_irq(&sighand->siglock);
1757 read_lock(&tasklist_lock);
1758 do_notify_parent_cldstop(current->group_leader, why);
1759 read_unlock(&tasklist_lock);
1764 struct k_sigaction *ka;
1766 if (unlikely(signal->group_stop_count > 0) &&
1770 signr = dequeue_signal(current, ¤t->blocked, info);
1772 break; /* will return 0 */
1774 if (signr != SIGKILL) {
1775 signr = ptrace_signal(signr, info, regs, cookie);
1780 ka = &sighand->action[signr-1];
1781 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1783 if (ka->sa.sa_handler != SIG_DFL) {
1784 /* Run the handler. */
1787 if (ka->sa.sa_flags & SA_ONESHOT)
1788 ka->sa.sa_handler = SIG_DFL;
1790 break; /* will return non-zero "signr" value */
1794 * Now we are doing the default action for this signal.
1796 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1800 * Global init gets no signals it doesn't want.
1802 if (is_global_init(current))
1805 if (sig_kernel_stop(signr)) {
1807 * The default action is to stop all threads in
1808 * the thread group. The job control signals
1809 * do nothing in an orphaned pgrp, but SIGSTOP
1810 * always works. Note that siglock needs to be
1811 * dropped during the call to is_orphaned_pgrp()
1812 * because of lock ordering with tasklist_lock.
1813 * This allows an intervening SIGCONT to be posted.
1814 * We need to check for that and bail out if necessary.
1816 if (signr != SIGSTOP) {
1817 spin_unlock_irq(&sighand->siglock);
1819 /* signals can be posted during this window */
1821 if (is_current_pgrp_orphaned())
1824 spin_lock_irq(&sighand->siglock);
1827 if (likely(do_signal_stop(signr))) {
1828 /* It released the siglock. */
1833 * We didn't actually stop, due to a race
1834 * with SIGCONT or something like that.
1839 spin_unlock_irq(&sighand->siglock);
1842 * Anything else is fatal, maybe with a core dump.
1844 current->flags |= PF_SIGNALED;
1845 if ((signr != SIGKILL) && print_fatal_signals)
1846 print_fatal_signal(regs, signr);
1847 if (sig_kernel_coredump(signr)) {
1849 * If it was able to dump core, this kills all
1850 * other threads in the group and synchronizes with
1851 * their demise. If we lost the race with another
1852 * thread getting here, it set group_exit_code
1853 * first and our do_group_exit call below will use
1854 * that value and ignore the one we pass it.
1856 do_coredump((long)signr, signr, regs);
1860 * Death signals, no core dump.
1862 do_group_exit(signr);
1865 spin_unlock_irq(&sighand->siglock);
1869 void exit_signals(struct task_struct *tsk)
1872 struct task_struct *t;
1874 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1875 tsk->flags |= PF_EXITING;
1879 spin_lock_irq(&tsk->sighand->siglock);
1881 * From now this task is not visible for group-wide signals,
1882 * see wants_signal(), do_signal_stop().
1884 tsk->flags |= PF_EXITING;
1885 if (!signal_pending(tsk))
1888 /* It could be that __group_complete_signal() choose us to
1889 * notify about group-wide signal. Another thread should be
1890 * woken now to take the signal since we will not.
1892 for (t = tsk; (t = next_thread(t)) != tsk; )
1893 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1894 recalc_sigpending_and_wake(t);
1896 if (unlikely(tsk->signal->group_stop_count) &&
1897 !--tsk->signal->group_stop_count) {
1898 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1902 spin_unlock_irq(&tsk->sighand->siglock);
1904 if (unlikely(group_stop)) {
1905 read_lock(&tasklist_lock);
1906 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1907 read_unlock(&tasklist_lock);
1911 EXPORT_SYMBOL(recalc_sigpending);
1912 EXPORT_SYMBOL_GPL(dequeue_signal);
1913 EXPORT_SYMBOL(flush_signals);
1914 EXPORT_SYMBOL(force_sig);
1915 EXPORT_SYMBOL(kill_proc);
1916 EXPORT_SYMBOL(ptrace_notify);
1917 EXPORT_SYMBOL(send_sig);
1918 EXPORT_SYMBOL(send_sig_info);
1919 EXPORT_SYMBOL(sigprocmask);
1920 EXPORT_SYMBOL(block_all_signals);
1921 EXPORT_SYMBOL(unblock_all_signals);
1925 * System call entry points.
1928 asmlinkage long sys_restart_syscall(void)
1930 struct restart_block *restart = ¤t_thread_info()->restart_block;
1931 return restart->fn(restart);
1934 long do_no_restart_syscall(struct restart_block *param)
1940 * We don't need to get the kernel lock - this is all local to this
1941 * particular thread.. (and that's good, because this is _heavily_
1942 * used by various programs)
1946 * This is also useful for kernel threads that want to temporarily
1947 * (or permanently) block certain signals.
1949 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1950 * interface happily blocks "unblockable" signals like SIGKILL
1953 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1957 spin_lock_irq(¤t->sighand->siglock);
1959 *oldset = current->blocked;
1964 sigorsets(¤t->blocked, ¤t->blocked, set);
1967 signandsets(¤t->blocked, ¤t->blocked, set);
1970 current->blocked = *set;
1975 recalc_sigpending();
1976 spin_unlock_irq(¤t->sighand->siglock);
1982 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1984 int error = -EINVAL;
1985 sigset_t old_set, new_set;
1987 /* XXX: Don't preclude handling different sized sigset_t's. */
1988 if (sigsetsize != sizeof(sigset_t))
1993 if (copy_from_user(&new_set, set, sizeof(*set)))
1995 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1997 error = sigprocmask(how, &new_set, &old_set);
2003 spin_lock_irq(¤t->sighand->siglock);
2004 old_set = current->blocked;
2005 spin_unlock_irq(¤t->sighand->siglock);
2009 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2017 long do_sigpending(void __user *set, unsigned long sigsetsize)
2019 long error = -EINVAL;
2022 if (sigsetsize > sizeof(sigset_t))
2025 spin_lock_irq(¤t->sighand->siglock);
2026 sigorsets(&pending, ¤t->pending.signal,
2027 ¤t->signal->shared_pending.signal);
2028 spin_unlock_irq(¤t->sighand->siglock);
2030 /* Outside the lock because only this thread touches it. */
2031 sigandsets(&pending, ¤t->blocked, &pending);
2034 if (!copy_to_user(set, &pending, sigsetsize))
2042 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2044 return do_sigpending(set, sigsetsize);
2047 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2049 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2053 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2055 if (from->si_code < 0)
2056 return __copy_to_user(to, from, sizeof(siginfo_t))
2059 * If you change siginfo_t structure, please be sure
2060 * this code is fixed accordingly.
2061 * Please remember to update the signalfd_copyinfo() function
2062 * inside fs/signalfd.c too, in case siginfo_t changes.
2063 * It should never copy any pad contained in the structure
2064 * to avoid security leaks, but must copy the generic
2065 * 3 ints plus the relevant union member.
2067 err = __put_user(from->si_signo, &to->si_signo);
2068 err |= __put_user(from->si_errno, &to->si_errno);
2069 err |= __put_user((short)from->si_code, &to->si_code);
2070 switch (from->si_code & __SI_MASK) {
2072 err |= __put_user(from->si_pid, &to->si_pid);
2073 err |= __put_user(from->si_uid, &to->si_uid);
2076 err |= __put_user(from->si_tid, &to->si_tid);
2077 err |= __put_user(from->si_overrun, &to->si_overrun);
2078 err |= __put_user(from->si_ptr, &to->si_ptr);
2081 err |= __put_user(from->si_band, &to->si_band);
2082 err |= __put_user(from->si_fd, &to->si_fd);
2085 err |= __put_user(from->si_addr, &to->si_addr);
2086 #ifdef __ARCH_SI_TRAPNO
2087 err |= __put_user(from->si_trapno, &to->si_trapno);
2091 err |= __put_user(from->si_pid, &to->si_pid);
2092 err |= __put_user(from->si_uid, &to->si_uid);
2093 err |= __put_user(from->si_status, &to->si_status);
2094 err |= __put_user(from->si_utime, &to->si_utime);
2095 err |= __put_user(from->si_stime, &to->si_stime);
2097 case __SI_RT: /* This is not generated by the kernel as of now. */
2098 case __SI_MESGQ: /* But this is */
2099 err |= __put_user(from->si_pid, &to->si_pid);
2100 err |= __put_user(from->si_uid, &to->si_uid);
2101 err |= __put_user(from->si_ptr, &to->si_ptr);
2103 default: /* this is just in case for now ... */
2104 err |= __put_user(from->si_pid, &to->si_pid);
2105 err |= __put_user(from->si_uid, &to->si_uid);
2114 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2115 siginfo_t __user *uinfo,
2116 const struct timespec __user *uts,
2125 /* XXX: Don't preclude handling different sized sigset_t's. */
2126 if (sigsetsize != sizeof(sigset_t))
2129 if (copy_from_user(&these, uthese, sizeof(these)))
2133 * Invert the set of allowed signals to get those we
2136 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2140 if (copy_from_user(&ts, uts, sizeof(ts)))
2142 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2147 spin_lock_irq(¤t->sighand->siglock);
2148 sig = dequeue_signal(current, &these, &info);
2150 timeout = MAX_SCHEDULE_TIMEOUT;
2152 timeout = (timespec_to_jiffies(&ts)
2153 + (ts.tv_sec || ts.tv_nsec));
2156 /* None ready -- temporarily unblock those we're
2157 * interested while we are sleeping in so that we'll
2158 * be awakened when they arrive. */
2159 current->real_blocked = current->blocked;
2160 sigandsets(¤t->blocked, ¤t->blocked, &these);
2161 recalc_sigpending();
2162 spin_unlock_irq(¤t->sighand->siglock);
2164 timeout = schedule_timeout_interruptible(timeout);
2166 spin_lock_irq(¤t->sighand->siglock);
2167 sig = dequeue_signal(current, &these, &info);
2168 current->blocked = current->real_blocked;
2169 siginitset(¤t->real_blocked, 0);
2170 recalc_sigpending();
2173 spin_unlock_irq(¤t->sighand->siglock);
2178 if (copy_siginfo_to_user(uinfo, &info))
2191 sys_kill(int pid, int sig)
2193 struct siginfo info;
2195 info.si_signo = sig;
2197 info.si_code = SI_USER;
2198 info.si_pid = task_tgid_vnr(current);
2199 info.si_uid = current->uid;
2201 return kill_something_info(sig, &info, pid);
2204 static int do_tkill(int tgid, int pid, int sig)
2207 struct siginfo info;
2208 struct task_struct *p;
2209 unsigned long flags;
2212 info.si_signo = sig;
2214 info.si_code = SI_TKILL;
2215 info.si_pid = task_tgid_vnr(current);
2216 info.si_uid = current->uid;
2219 p = find_task_by_vpid(pid);
2220 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2221 error = check_kill_permission(sig, &info, p);
2223 * The null signal is a permissions and process existence
2224 * probe. No signal is actually delivered.
2226 * If lock_task_sighand() fails we pretend the task dies
2227 * after receiving the signal. The window is tiny, and the
2228 * signal is private anyway.
2230 if (!error && sig && lock_task_sighand(p, &flags)) {
2231 error = specific_send_sig_info(sig, &info, p);
2232 unlock_task_sighand(p, &flags);
2241 * sys_tgkill - send signal to one specific thread
2242 * @tgid: the thread group ID of the thread
2243 * @pid: the PID of the thread
2244 * @sig: signal to be sent
2246 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2247 * exists but it's not belonging to the target process anymore. This
2248 * method solves the problem of threads exiting and PIDs getting reused.
2250 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2252 /* This is only valid for single tasks */
2253 if (pid <= 0 || tgid <= 0)
2256 return do_tkill(tgid, pid, sig);
2260 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2263 sys_tkill(int pid, int sig)
2265 /* This is only valid for single tasks */
2269 return do_tkill(0, pid, sig);
2273 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2277 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2280 /* Not even root can pretend to send signals from the kernel.
2281 Nor can they impersonate a kill(), which adds source info. */
2282 if (info.si_code >= 0)
2284 info.si_signo = sig;
2286 /* POSIX.1b doesn't mention process groups. */
2287 return kill_proc_info(sig, &info, pid);
2290 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2292 struct task_struct *t = current;
2293 struct k_sigaction *k;
2296 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2299 k = &t->sighand->action[sig-1];
2301 spin_lock_irq(¤t->sighand->siglock);
2306 sigdelsetmask(&act->sa.sa_mask,
2307 sigmask(SIGKILL) | sigmask(SIGSTOP));
2311 * "Setting a signal action to SIG_IGN for a signal that is
2312 * pending shall cause the pending signal to be discarded,
2313 * whether or not it is blocked."
2315 * "Setting a signal action to SIG_DFL for a signal that is
2316 * pending and whose default action is to ignore the signal
2317 * (for example, SIGCHLD), shall cause the pending signal to
2318 * be discarded, whether or not it is blocked"
2320 if (__sig_ignored(t, sig)) {
2322 sigaddset(&mask, sig);
2323 rm_from_queue_full(&mask, &t->signal->shared_pending);
2325 rm_from_queue_full(&mask, &t->pending);
2327 } while (t != current);
2331 spin_unlock_irq(¤t->sighand->siglock);
2336 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2342 oss.ss_sp = (void __user *) current->sas_ss_sp;
2343 oss.ss_size = current->sas_ss_size;
2344 oss.ss_flags = sas_ss_flags(sp);
2353 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2354 || __get_user(ss_sp, &uss->ss_sp)
2355 || __get_user(ss_flags, &uss->ss_flags)
2356 || __get_user(ss_size, &uss->ss_size))
2360 if (on_sig_stack(sp))
2366 * Note - this code used to test ss_flags incorrectly
2367 * old code may have been written using ss_flags==0
2368 * to mean ss_flags==SS_ONSTACK (as this was the only
2369 * way that worked) - this fix preserves that older
2372 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2375 if (ss_flags == SS_DISABLE) {
2380 if (ss_size < MINSIGSTKSZ)
2384 current->sas_ss_sp = (unsigned long) ss_sp;
2385 current->sas_ss_size = ss_size;
2390 if (copy_to_user(uoss, &oss, sizeof(oss)))
2399 #ifdef __ARCH_WANT_SYS_SIGPENDING
2402 sys_sigpending(old_sigset_t __user *set)
2404 return do_sigpending(set, sizeof(*set));
2409 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2410 /* Some platforms have their own version with special arguments others
2411 support only sys_rt_sigprocmask. */
2414 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2417 old_sigset_t old_set, new_set;
2421 if (copy_from_user(&new_set, set, sizeof(*set)))
2423 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2425 spin_lock_irq(¤t->sighand->siglock);
2426 old_set = current->blocked.sig[0];
2434 sigaddsetmask(¤t->blocked, new_set);
2437 sigdelsetmask(¤t->blocked, new_set);
2440 current->blocked.sig[0] = new_set;
2444 recalc_sigpending();
2445 spin_unlock_irq(¤t->sighand->siglock);
2451 old_set = current->blocked.sig[0];
2454 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2461 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2463 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2465 sys_rt_sigaction(int sig,
2466 const struct sigaction __user *act,
2467 struct sigaction __user *oact,
2470 struct k_sigaction new_sa, old_sa;
2473 /* XXX: Don't preclude handling different sized sigset_t's. */
2474 if (sigsetsize != sizeof(sigset_t))
2478 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2482 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2485 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2491 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2493 #ifdef __ARCH_WANT_SYS_SGETMASK
2496 * For backwards compatibility. Functionality superseded by sigprocmask.
2502 return current->blocked.sig[0];
2506 sys_ssetmask(int newmask)
2510 spin_lock_irq(¤t->sighand->siglock);
2511 old = current->blocked.sig[0];
2513 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2515 recalc_sigpending();
2516 spin_unlock_irq(¤t->sighand->siglock);
2520 #endif /* __ARCH_WANT_SGETMASK */
2522 #ifdef __ARCH_WANT_SYS_SIGNAL
2524 * For backwards compatibility. Functionality superseded by sigaction.
2526 asmlinkage unsigned long
2527 sys_signal(int sig, __sighandler_t handler)
2529 struct k_sigaction new_sa, old_sa;
2532 new_sa.sa.sa_handler = handler;
2533 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2534 sigemptyset(&new_sa.sa.sa_mask);
2536 ret = do_sigaction(sig, &new_sa, &old_sa);
2538 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2540 #endif /* __ARCH_WANT_SYS_SIGNAL */
2542 #ifdef __ARCH_WANT_SYS_PAUSE
2547 current->state = TASK_INTERRUPTIBLE;
2549 return -ERESTARTNOHAND;
2554 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2555 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2559 /* XXX: Don't preclude handling different sized sigset_t's. */
2560 if (sigsetsize != sizeof(sigset_t))
2563 if (copy_from_user(&newset, unewset, sizeof(newset)))
2565 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2567 spin_lock_irq(¤t->sighand->siglock);
2568 current->saved_sigmask = current->blocked;
2569 current->blocked = newset;
2570 recalc_sigpending();
2571 spin_unlock_irq(¤t->sighand->siglock);
2573 current->state = TASK_INTERRUPTIBLE;
2575 set_thread_flag(TIF_RESTORE_SIGMASK);
2576 return -ERESTARTNOHAND;
2578 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2580 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2585 void __init signals_init(void)
2587 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);