2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
34 * SLAB caches for signal bits.
37 static kmem_cache_t *sigqueue_cachep;
40 * In POSIX a signal is sent either to a specific thread (Linux task)
41 * or to the process as a whole (Linux thread group). How the signal
42 * is sent determines whether it's to one thread or the whole group,
43 * which determines which signal mask(s) are involved in blocking it
44 * from being delivered until later. When the signal is delivered,
45 * either it's caught or ignored by a user handler or it has a default
46 * effect that applies to the whole thread group (POSIX process).
48 * The possible effects an unblocked signal set to SIG_DFL can have are:
49 * ignore - Nothing Happens
50 * terminate - kill the process, i.e. all threads in the group,
51 * similar to exit_group. The group leader (only) reports
52 * WIFSIGNALED status to its parent.
53 * coredump - write a core dump file describing all threads using
54 * the same mm and then kill all those threads
55 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
57 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58 * Other signals when not blocked and set to SIG_DFL behaves as follows.
59 * The job control signals also have other special effects.
61 * +--------------------+------------------+
62 * | POSIX signal | default action |
63 * +--------------------+------------------+
64 * | SIGHUP | terminate |
65 * | SIGINT | terminate |
66 * | SIGQUIT | coredump |
67 * | SIGILL | coredump |
68 * | SIGTRAP | coredump |
69 * | SIGABRT/SIGIOT | coredump |
70 * | SIGBUS | coredump |
71 * | SIGFPE | coredump |
72 * | SIGKILL | terminate(+) |
73 * | SIGUSR1 | terminate |
74 * | SIGSEGV | coredump |
75 * | SIGUSR2 | terminate |
76 * | SIGPIPE | terminate |
77 * | SIGALRM | terminate |
78 * | SIGTERM | terminate |
79 * | SIGCHLD | ignore |
80 * | SIGCONT | ignore(*) |
81 * | SIGSTOP | stop(*)(+) |
82 * | SIGTSTP | stop(*) |
83 * | SIGTTIN | stop(*) |
84 * | SIGTTOU | stop(*) |
86 * | SIGXCPU | coredump |
87 * | SIGXFSZ | coredump |
88 * | SIGVTALRM | terminate |
89 * | SIGPROF | terminate |
90 * | SIGPOLL/SIGIO | terminate |
91 * | SIGSYS/SIGUNUSED | coredump |
92 * | SIGSTKFLT | terminate |
93 * | SIGWINCH | ignore |
94 * | SIGPWR | terminate |
95 * | SIGRTMIN-SIGRTMAX | terminate |
96 * +--------------------+------------------+
97 * | non-POSIX signal | default action |
98 * +--------------------+------------------+
99 * | SIGEMT | coredump |
100 * +--------------------+------------------+
102 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103 * (*) Special job control effects:
104 * When SIGCONT is sent, it resumes the process (all threads in the group)
105 * from TASK_STOPPED state and also clears any pending/queued stop signals
106 * (any of those marked with "stop(*)"). This happens regardless of blocking,
107 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
108 * any pending/queued SIGCONT signals; this happens regardless of blocking,
109 * catching, or ignored the stop signal, though (except for SIGSTOP) the
110 * default action of stopping the process may happen later or never.
114 #define M_SIGEMT M(SIGEMT)
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
122 #define M(sig) (1UL << ((sig)-1))
124 #define T(sig, mask) (M(sig) & (mask))
126 #define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
129 #define SIG_KERNEL_STOP_MASK (\
130 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
132 #define SIG_KERNEL_COREDUMP_MASK (\
133 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
134 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
135 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
137 #define SIG_KERNEL_IGNORE_MASK (\
138 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
140 #define sig_kernel_only(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149 #define sig_user_defined(t, signr) \
150 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
151 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
153 #define sig_fatal(t, signr) \
154 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
157 static int sig_ignored(struct task_struct *t, int sig)
159 void __user * handler;
162 * Tracers always want to know about signals..
164 if (t->ptrace & PT_PTRACED)
168 * Blocked signals are never ignored, since the
169 * signal handler may change by the time it is
172 if (sigismember(&t->blocked, sig))
175 /* Is it explicitly or implicitly ignored? */
176 handler = t->sighand->action[sig-1].sa.sa_handler;
177 return handler == SIG_IGN ||
178 (handler == SIG_DFL && sig_kernel_ignore(sig));
182 * Re-calculate pending state from the set of locally pending
183 * signals, globally pending signals, and blocked signals.
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
190 switch (_NSIG_WORDS) {
192 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193 ready |= signal->sig[i] &~ blocked->sig[i];
196 case 4: ready = signal->sig[3] &~ blocked->sig[3];
197 ready |= signal->sig[2] &~ blocked->sig[2];
198 ready |= signal->sig[1] &~ blocked->sig[1];
199 ready |= signal->sig[0] &~ blocked->sig[0];
202 case 2: ready = signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
206 case 1: ready = signal->sig[0] &~ blocked->sig[0];
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
215 if (t->signal->group_stop_count > 0 ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 void recalc_sigpending(void)
226 recalc_sigpending_tsk(current);
229 /* Given the mask, find the first available signal that should be serviced. */
232 next_signal(struct sigpending *pending, sigset_t *mask)
234 unsigned long i, *s, *m, x;
237 s = pending->signal.sig;
239 switch (_NSIG_WORDS) {
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 case 2: if ((x = s[0] &~ m[0]) != 0)
250 else if ((x = s[1] &~ m[1]) != 0)
257 case 1: if ((x = *s &~ *m) != 0)
265 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
268 struct sigqueue *q = NULL;
270 atomic_inc(&t->user->sigpending);
271 if (override_rlimit ||
272 atomic_read(&t->user->sigpending) <=
273 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274 q = kmem_cache_alloc(sigqueue_cachep, flags);
275 if (unlikely(q == NULL)) {
276 atomic_dec(&t->user->sigpending);
278 INIT_LIST_HEAD(&q->list);
280 q->user = get_uid(t->user);
285 static inline void __sigqueue_free(struct sigqueue *q)
287 if (q->flags & SIGQUEUE_PREALLOC)
289 atomic_dec(&q->user->sigpending);
291 kmem_cache_free(sigqueue_cachep, q);
294 static void flush_sigqueue(struct sigpending *queue)
298 sigemptyset(&queue->signal);
299 while (!list_empty(&queue->list)) {
300 q = list_entry(queue->list.next, struct sigqueue , list);
301 list_del_init(&q->list);
307 * Flush all pending signals for a task.
311 flush_signals(struct task_struct *t)
315 spin_lock_irqsave(&t->sighand->siglock, flags);
316 clear_tsk_thread_flag(t,TIF_SIGPENDING);
317 flush_sigqueue(&t->pending);
318 flush_sigqueue(&t->signal->shared_pending);
319 spin_unlock_irqrestore(&t->sighand->siglock, flags);
323 * This function expects the tasklist_lock write-locked.
325 void __exit_sighand(struct task_struct *tsk)
327 struct sighand_struct * sighand = tsk->sighand;
329 /* Ok, we're done with the signal handlers */
331 if (atomic_dec_and_test(&sighand->count))
332 sighand_free(sighand);
335 void exit_sighand(struct task_struct *tsk)
337 write_lock_irq(&tasklist_lock);
339 if (tsk->sighand != NULL) {
340 struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
341 spin_lock(&sighand->siglock);
343 spin_unlock(&sighand->siglock);
346 write_unlock_irq(&tasklist_lock);
350 * This function expects the tasklist_lock write-locked.
352 void __exit_signal(struct task_struct *tsk)
354 struct signal_struct * sig = tsk->signal;
355 struct sighand_struct * sighand;
359 if (!atomic_read(&sig->count))
362 sighand = rcu_dereference(tsk->sighand);
363 spin_lock(&sighand->siglock);
364 posix_cpu_timers_exit(tsk);
365 if (atomic_dec_and_test(&sig->count)) {
366 posix_cpu_timers_exit_group(tsk);
367 if (tsk == sig->curr_target)
368 sig->curr_target = next_thread(tsk);
371 spin_unlock(&sighand->siglock);
372 flush_sigqueue(&sig->shared_pending);
375 * If there is any task waiting for the group exit
378 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
379 wake_up_process(sig->group_exit_task);
380 sig->group_exit_task = NULL;
382 if (tsk == sig->curr_target)
383 sig->curr_target = next_thread(tsk);
386 * Accumulate here the counters for all threads but the
387 * group leader as they die, so they can be added into
388 * the process-wide totals when those are taken.
389 * The group leader stays around as a zombie as long
390 * as there are other threads. When it gets reaped,
391 * the exit.c code will add its counts into these totals.
392 * We won't ever get here for the group leader, since it
393 * will have been the last reference on the signal_struct.
395 sig->utime = cputime_add(sig->utime, tsk->utime);
396 sig->stime = cputime_add(sig->stime, tsk->stime);
397 sig->min_flt += tsk->min_flt;
398 sig->maj_flt += tsk->maj_flt;
399 sig->nvcsw += tsk->nvcsw;
400 sig->nivcsw += tsk->nivcsw;
401 sig->sched_time += tsk->sched_time;
403 spin_unlock(&sighand->siglock);
404 sig = NULL; /* Marker for below. */
407 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
408 flush_sigqueue(&tsk->pending);
411 * We are cleaning up the signal_struct here.
413 exit_thread_group_keys(sig);
414 kmem_cache_free(signal_cachep, sig);
418 void exit_signal(struct task_struct *tsk)
420 atomic_dec(&tsk->signal->live);
422 write_lock_irq(&tasklist_lock);
424 write_unlock_irq(&tasklist_lock);
428 * Flush all handlers for a task.
432 flush_signal_handlers(struct task_struct *t, int force_default)
435 struct k_sigaction *ka = &t->sighand->action[0];
436 for (i = _NSIG ; i != 0 ; i--) {
437 if (force_default || ka->sa.sa_handler != SIG_IGN)
438 ka->sa.sa_handler = SIG_DFL;
440 sigemptyset(&ka->sa.sa_mask);
446 /* Notify the system that a driver wants to block all signals for this
447 * process, and wants to be notified if any signals at all were to be
448 * sent/acted upon. If the notifier routine returns non-zero, then the
449 * signal will be acted upon after all. If the notifier routine returns 0,
450 * then then signal will be blocked. Only one block per process is
451 * allowed. priv is a pointer to private data that the notifier routine
452 * can use to determine if the signal should be blocked or not. */
455 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
459 spin_lock_irqsave(¤t->sighand->siglock, flags);
460 current->notifier_mask = mask;
461 current->notifier_data = priv;
462 current->notifier = notifier;
463 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
466 /* Notify the system that blocking has ended. */
469 unblock_all_signals(void)
473 spin_lock_irqsave(¤t->sighand->siglock, flags);
474 current->notifier = NULL;
475 current->notifier_data = NULL;
477 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
480 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
482 struct sigqueue *q, *first = NULL;
483 int still_pending = 0;
485 if (unlikely(!sigismember(&list->signal, sig)))
489 * Collect the siginfo appropriate to this signal. Check if
490 * there is another siginfo for the same signal.
492 list_for_each_entry(q, &list->list, list) {
493 if (q->info.si_signo == sig) {
502 list_del_init(&first->list);
503 copy_siginfo(info, &first->info);
504 __sigqueue_free(first);
506 sigdelset(&list->signal, sig);
509 /* Ok, it wasn't in the queue. This must be
510 a fast-pathed signal or we must have been
511 out of queue space. So zero out the info.
513 sigdelset(&list->signal, sig);
514 info->si_signo = sig;
523 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
528 sig = next_signal(pending, mask);
530 if (current->notifier) {
531 if (sigismember(current->notifier_mask, sig)) {
532 if (!(current->notifier)(current->notifier_data)) {
533 clear_thread_flag(TIF_SIGPENDING);
539 if (!collect_signal(sig, pending, info))
549 * Dequeue a signal and return the element to the caller, which is
550 * expected to free it.
552 * All callers have to hold the siglock.
554 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
556 int signr = __dequeue_signal(&tsk->pending, mask, info);
558 signr = __dequeue_signal(&tsk->signal->shared_pending,
560 if (signr && unlikely(sig_kernel_stop(signr))) {
562 * Set a marker that we have dequeued a stop signal. Our
563 * caller might release the siglock and then the pending
564 * stop signal it is about to process is no longer in the
565 * pending bitmasks, but must still be cleared by a SIGCONT
566 * (and overruled by a SIGKILL). So those cases clear this
567 * shared flag after we've set it. Note that this flag may
568 * remain set after the signal we return is ignored or
569 * handled. That doesn't matter because its only purpose
570 * is to alert stop-signal processing code when another
571 * processor has come along and cleared the flag.
573 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
574 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
577 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
578 info->si_sys_private){
580 * Release the siglock to ensure proper locking order
581 * of timer locks outside of siglocks. Note, we leave
582 * irqs disabled here, since the posix-timers code is
583 * about to disable them again anyway.
585 spin_unlock(&tsk->sighand->siglock);
586 do_schedule_next_timer(info);
587 spin_lock(&tsk->sighand->siglock);
593 * Tell a process that it has a new active signal..
595 * NOTE! we rely on the previous spin_lock to
596 * lock interrupts for us! We can only be called with
597 * "siglock" held, and the local interrupt must
598 * have been disabled when that got acquired!
600 * No need to set need_resched since signal event passing
601 * goes through ->blocked
603 void signal_wake_up(struct task_struct *t, int resume)
607 set_tsk_thread_flag(t, TIF_SIGPENDING);
610 * For SIGKILL, we want to wake it up in the stopped/traced case.
611 * We don't check t->state here because there is a race with it
612 * executing another processor and just now entering stopped state.
613 * By using wake_up_state, we ensure the process will wake up and
614 * handle its death signal.
616 mask = TASK_INTERRUPTIBLE;
618 mask |= TASK_STOPPED | TASK_TRACED;
619 if (!wake_up_state(t, mask))
624 * Remove signals in mask from the pending set and queue.
625 * Returns 1 if any signals were found.
627 * All callers must be holding the siglock.
629 * This version takes a sigset mask and looks at all signals,
630 * not just those in the first mask word.
632 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
634 struct sigqueue *q, *n;
637 sigandsets(&m, mask, &s->signal);
638 if (sigisemptyset(&m))
641 signandsets(&s->signal, &s->signal, mask);
642 list_for_each_entry_safe(q, n, &s->list, list) {
643 if (sigismember(mask, q->info.si_signo)) {
644 list_del_init(&q->list);
651 * Remove signals in mask from the pending set and queue.
652 * Returns 1 if any signals were found.
654 * All callers must be holding the siglock.
656 static int rm_from_queue(unsigned long mask, struct sigpending *s)
658 struct sigqueue *q, *n;
660 if (!sigtestsetmask(&s->signal, mask))
663 sigdelsetmask(&s->signal, mask);
664 list_for_each_entry_safe(q, n, &s->list, list) {
665 if (q->info.si_signo < SIGRTMIN &&
666 (mask & sigmask(q->info.si_signo))) {
667 list_del_init(&q->list);
675 * Bad permissions for sending the signal
677 static int check_kill_permission(int sig, struct siginfo *info,
678 struct task_struct *t)
681 if (!valid_signal(sig))
684 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
685 && ((sig != SIGCONT) ||
686 (current->signal->session != t->signal->session))
687 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
688 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
689 && !capable(CAP_KILL))
692 error = security_task_kill(t, info, sig);
694 audit_signal_info(sig, t); /* Let audit system see the signal */
699 static void do_notify_parent_cldstop(struct task_struct *tsk,
704 * Handle magic process-wide effects of stop/continue signals.
705 * Unlike the signal actions, these happen immediately at signal-generation
706 * time regardless of blocking, ignoring, or handling. This does the
707 * actual continuing for SIGCONT, but not the actual stopping for stop
708 * signals. The process stop is done as a signal action for SIG_DFL.
710 static void handle_stop_signal(int sig, struct task_struct *p)
712 struct task_struct *t;
714 if (p->signal->flags & SIGNAL_GROUP_EXIT)
716 * The process is in the middle of dying already.
720 if (sig_kernel_stop(sig)) {
722 * This is a stop signal. Remove SIGCONT from all queues.
724 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
727 rm_from_queue(sigmask(SIGCONT), &t->pending);
730 } else if (sig == SIGCONT) {
732 * Remove all stop signals from all queues,
733 * and wake all threads.
735 if (unlikely(p->signal->group_stop_count > 0)) {
737 * There was a group stop in progress. We'll
738 * pretend it finished before we got here. We are
739 * obliged to report it to the parent: if the
740 * SIGSTOP happened "after" this SIGCONT, then it
741 * would have cleared this pending SIGCONT. If it
742 * happened "before" this SIGCONT, then the parent
743 * got the SIGCHLD about the stop finishing before
744 * the continue happened. We do the notification
745 * now, and it's as if the stop had finished and
746 * the SIGCHLD was pending on entry to this kill.
748 p->signal->group_stop_count = 0;
749 p->signal->flags = SIGNAL_STOP_CONTINUED;
750 spin_unlock(&p->sighand->siglock);
751 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
752 spin_lock(&p->sighand->siglock);
754 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
758 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
761 * If there is a handler for SIGCONT, we must make
762 * sure that no thread returns to user mode before
763 * we post the signal, in case it was the only
764 * thread eligible to run the signal handler--then
765 * it must not do anything between resuming and
766 * running the handler. With the TIF_SIGPENDING
767 * flag set, the thread will pause and acquire the
768 * siglock that we hold now and until we've queued
769 * the pending signal.
771 * Wake up the stopped thread _after_ setting
774 state = TASK_STOPPED;
775 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
776 set_tsk_thread_flag(t, TIF_SIGPENDING);
777 state |= TASK_INTERRUPTIBLE;
779 wake_up_state(t, state);
784 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
786 * We were in fact stopped, and are now continued.
787 * Notify the parent with CLD_CONTINUED.
789 p->signal->flags = SIGNAL_STOP_CONTINUED;
790 p->signal->group_exit_code = 0;
791 spin_unlock(&p->sighand->siglock);
792 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
793 spin_lock(&p->sighand->siglock);
796 * We are not stopped, but there could be a stop
797 * signal in the middle of being processed after
798 * being removed from the queue. Clear that too.
800 p->signal->flags = 0;
802 } else if (sig == SIGKILL) {
804 * Make sure that any pending stop signal already dequeued
805 * is undone by the wakeup for SIGKILL.
807 p->signal->flags = 0;
811 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
812 struct sigpending *signals)
814 struct sigqueue * q = NULL;
818 * fast-pathed signals for kernel-internal things like SIGSTOP
821 if (info == SEND_SIG_FORCED)
824 /* Real-time signals must be queued if sent by sigqueue, or
825 some other real-time mechanism. It is implementation
826 defined whether kill() does so. We attempt to do so, on
827 the principle of least surprise, but since kill is not
828 allowed to fail with EAGAIN when low on memory we just
829 make sure at least one signal gets delivered and don't
830 pass on the info struct. */
832 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
833 (is_si_special(info) ||
834 info->si_code >= 0)));
836 list_add_tail(&q->list, &signals->list);
837 switch ((unsigned long) info) {
838 case (unsigned long) SEND_SIG_NOINFO:
839 q->info.si_signo = sig;
840 q->info.si_errno = 0;
841 q->info.si_code = SI_USER;
842 q->info.si_pid = current->pid;
843 q->info.si_uid = current->uid;
845 case (unsigned long) SEND_SIG_PRIV:
846 q->info.si_signo = sig;
847 q->info.si_errno = 0;
848 q->info.si_code = SI_KERNEL;
853 copy_siginfo(&q->info, info);
856 } else if (!is_si_special(info)) {
857 if (sig >= SIGRTMIN && info->si_code != SI_USER)
859 * Queue overflow, abort. We may abort if the signal was rt
860 * and sent by user using something other than kill().
866 sigaddset(&signals->signal, sig);
870 #define LEGACY_QUEUE(sigptr, sig) \
871 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
875 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
879 if (!irqs_disabled())
881 assert_spin_locked(&t->sighand->siglock);
883 /* Short-circuit ignored signals. */
884 if (sig_ignored(t, sig))
887 /* Support queueing exactly one non-rt signal, so that we
888 can get more detailed information about the cause of
890 if (LEGACY_QUEUE(&t->pending, sig))
893 ret = send_signal(sig, info, t, &t->pending);
894 if (!ret && !sigismember(&t->blocked, sig))
895 signal_wake_up(t, sig == SIGKILL);
901 * Force a signal that the process can't ignore: if necessary
902 * we unblock the signal and change any SIG_IGN to SIG_DFL.
906 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
908 unsigned long int flags;
911 spin_lock_irqsave(&t->sighand->siglock, flags);
912 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
913 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
915 if (sigismember(&t->blocked, sig)) {
916 sigdelset(&t->blocked, sig);
918 recalc_sigpending_tsk(t);
919 ret = specific_send_sig_info(sig, info, t);
920 spin_unlock_irqrestore(&t->sighand->siglock, flags);
926 force_sig_specific(int sig, struct task_struct *t)
928 force_sig_info(sig, SEND_SIG_FORCED, t);
932 * Test if P wants to take SIG. After we've checked all threads with this,
933 * it's equivalent to finding no threads not blocking SIG. Any threads not
934 * blocking SIG were ruled out because they are not running and already
935 * have pending signals. Such threads will dequeue from the shared queue
936 * as soon as they're available, so putting the signal on the shared queue
937 * will be equivalent to sending it to one such thread.
939 static inline int wants_signal(int sig, struct task_struct *p)
941 if (sigismember(&p->blocked, sig))
943 if (p->flags & PF_EXITING)
947 if (p->state & (TASK_STOPPED | TASK_TRACED))
949 return task_curr(p) || !signal_pending(p);
953 __group_complete_signal(int sig, struct task_struct *p)
955 struct task_struct *t;
958 * Now find a thread we can wake up to take the signal off the queue.
960 * If the main thread wants the signal, it gets first crack.
961 * Probably the least surprising to the average bear.
963 if (wants_signal(sig, p))
965 else if (thread_group_empty(p))
967 * There is just one thread and it does not need to be woken.
968 * It will dequeue unblocked signals before it runs again.
973 * Otherwise try to find a suitable thread.
975 t = p->signal->curr_target;
977 /* restart balancing at this thread */
978 t = p->signal->curr_target = p;
979 BUG_ON(t->tgid != p->tgid);
981 while (!wants_signal(sig, t)) {
983 if (t == p->signal->curr_target)
985 * No thread needs to be woken.
986 * Any eligible threads will see
987 * the signal in the queue soon.
991 p->signal->curr_target = t;
995 * Found a killable thread. If the signal will be fatal,
996 * then start taking the whole group down immediately.
998 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
999 !sigismember(&t->real_blocked, sig) &&
1000 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1002 * This signal will be fatal to the whole group.
1004 if (!sig_kernel_coredump(sig)) {
1006 * Start a group exit and wake everybody up.
1007 * This way we don't have other threads
1008 * running and doing things after a slower
1009 * thread has the fatal signal pending.
1011 p->signal->flags = SIGNAL_GROUP_EXIT;
1012 p->signal->group_exit_code = sig;
1013 p->signal->group_stop_count = 0;
1016 sigaddset(&t->pending.signal, SIGKILL);
1017 signal_wake_up(t, 1);
1024 * There will be a core dump. We make all threads other
1025 * than the chosen one go into a group stop so that nothing
1026 * happens until it gets scheduled, takes the signal off
1027 * the shared queue, and does the core dump. This is a
1028 * little more complicated than strictly necessary, but it
1029 * keeps the signal state that winds up in the core dump
1030 * unchanged from the death state, e.g. which thread had
1031 * the core-dump signal unblocked.
1033 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1034 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1035 p->signal->group_stop_count = 0;
1036 p->signal->group_exit_task = t;
1039 p->signal->group_stop_count++;
1040 signal_wake_up(t, 0);
1043 wake_up_process(p->signal->group_exit_task);
1048 * The signal is already in the shared-pending queue.
1049 * Tell the chosen thread to wake up and dequeue it.
1051 signal_wake_up(t, sig == SIGKILL);
1056 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1060 assert_spin_locked(&p->sighand->siglock);
1061 handle_stop_signal(sig, p);
1063 /* Short-circuit ignored signals. */
1064 if (sig_ignored(p, sig))
1067 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1068 /* This is a non-RT signal and we already have one queued. */
1072 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1073 * We always use the shared queue for process-wide signals,
1074 * to avoid several races.
1076 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1080 __group_complete_signal(sig, p);
1085 * Nuke all other threads in the group.
1087 void zap_other_threads(struct task_struct *p)
1089 struct task_struct *t;
1091 p->signal->flags = SIGNAL_GROUP_EXIT;
1092 p->signal->group_stop_count = 0;
1094 if (thread_group_empty(p))
1097 for (t = next_thread(p); t != p; t = next_thread(t)) {
1099 * Don't bother with already dead threads
1105 * We don't want to notify the parent, since we are
1106 * killed as part of a thread group due to another
1107 * thread doing an execve() or similar. So set the
1108 * exit signal to -1 to allow immediate reaping of
1109 * the process. But don't detach the thread group
1112 if (t != p->group_leader)
1113 t->exit_signal = -1;
1115 /* SIGKILL will be handled before any pending SIGSTOP */
1116 sigaddset(&t->pending.signal, SIGKILL);
1117 signal_wake_up(t, 1);
1122 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1124 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1126 unsigned long flags;
1127 struct sighand_struct *sp;
1131 ret = check_kill_permission(sig, info, p);
1132 if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
1133 spin_lock_irqsave(&sp->siglock, flags);
1134 if (p->sighand != sp) {
1135 spin_unlock_irqrestore(&sp->siglock, flags);
1138 if ((atomic_read(&sp->count) == 0) ||
1139 (atomic_read(&p->usage) == 0)) {
1140 spin_unlock_irqrestore(&sp->siglock, flags);
1143 ret = __group_send_sig_info(sig, info, p);
1144 spin_unlock_irqrestore(&sp->siglock, flags);
1151 * kill_pg_info() sends a signal to a process group: this is what the tty
1152 * control characters do (^C, ^Z etc)
1155 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1157 struct task_struct *p = NULL;
1158 int retval, success;
1165 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1166 int err = group_send_sig_info(sig, info, p);
1169 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1170 return success ? 0 : retval;
1174 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1178 read_lock(&tasklist_lock);
1179 retval = __kill_pg_info(sig, info, pgrp);
1180 read_unlock(&tasklist_lock);
1186 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1189 int acquired_tasklist_lock = 0;
1190 struct task_struct *p;
1193 if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
1194 read_lock(&tasklist_lock);
1195 acquired_tasklist_lock = 1;
1197 p = find_task_by_pid(pid);
1200 error = group_send_sig_info(sig, info, p);
1201 if (unlikely(acquired_tasklist_lock))
1202 read_unlock(&tasklist_lock);
1207 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1208 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1209 uid_t uid, uid_t euid)
1212 struct task_struct *p;
1214 if (!valid_signal(sig))
1217 read_lock(&tasklist_lock);
1218 p = find_task_by_pid(pid);
1223 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1224 && (euid != p->suid) && (euid != p->uid)
1225 && (uid != p->suid) && (uid != p->uid)) {
1229 if (sig && p->sighand) {
1230 unsigned long flags;
1231 spin_lock_irqsave(&p->sighand->siglock, flags);
1232 ret = __group_send_sig_info(sig, info, p);
1233 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1236 read_unlock(&tasklist_lock);
1239 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1242 * kill_something_info() interprets pid in interesting ways just like kill(2).
1244 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1245 * is probably wrong. Should make it like BSD or SYSV.
1248 static int kill_something_info(int sig, struct siginfo *info, int pid)
1251 return kill_pg_info(sig, info, process_group(current));
1252 } else if (pid == -1) {
1253 int retval = 0, count = 0;
1254 struct task_struct * p;
1256 read_lock(&tasklist_lock);
1257 for_each_process(p) {
1258 if (p->pid > 1 && p->tgid != current->tgid) {
1259 int err = group_send_sig_info(sig, info, p);
1265 read_unlock(&tasklist_lock);
1266 return count ? retval : -ESRCH;
1267 } else if (pid < 0) {
1268 return kill_pg_info(sig, info, -pid);
1270 return kill_proc_info(sig, info, pid);
1275 * These are for backward compatibility with the rest of the kernel source.
1279 * These two are the most common entry points. They send a signal
1280 * just to the specific thread.
1283 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1286 unsigned long flags;
1289 * Make sure legacy kernel users don't send in bad values
1290 * (normal paths check this in check_kill_permission).
1292 if (!valid_signal(sig))
1296 * We need the tasklist lock even for the specific
1297 * thread case (when we don't need to follow the group
1298 * lists) in order to avoid races with "p->sighand"
1299 * going away or changing from under us.
1301 read_lock(&tasklist_lock);
1302 spin_lock_irqsave(&p->sighand->siglock, flags);
1303 ret = specific_send_sig_info(sig, info, p);
1304 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1305 read_unlock(&tasklist_lock);
1309 #define __si_special(priv) \
1310 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1313 send_sig(int sig, struct task_struct *p, int priv)
1315 return send_sig_info(sig, __si_special(priv), p);
1319 * This is the entry point for "process-wide" signals.
1320 * They will go to an appropriate thread in the thread group.
1323 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1326 read_lock(&tasklist_lock);
1327 ret = group_send_sig_info(sig, info, p);
1328 read_unlock(&tasklist_lock);
1333 force_sig(int sig, struct task_struct *p)
1335 force_sig_info(sig, SEND_SIG_PRIV, p);
1339 * When things go south during signal handling, we
1340 * will force a SIGSEGV. And if the signal that caused
1341 * the problem was already a SIGSEGV, we'll want to
1342 * make sure we don't even try to deliver the signal..
1345 force_sigsegv(int sig, struct task_struct *p)
1347 if (sig == SIGSEGV) {
1348 unsigned long flags;
1349 spin_lock_irqsave(&p->sighand->siglock, flags);
1350 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1351 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1353 force_sig(SIGSEGV, p);
1358 kill_pg(pid_t pgrp, int sig, int priv)
1360 return kill_pg_info(sig, __si_special(priv), pgrp);
1364 kill_proc(pid_t pid, int sig, int priv)
1366 return kill_proc_info(sig, __si_special(priv), pid);
1370 * These functions support sending signals using preallocated sigqueue
1371 * structures. This is needed "because realtime applications cannot
1372 * afford to lose notifications of asynchronous events, like timer
1373 * expirations or I/O completions". In the case of Posix Timers
1374 * we allocate the sigqueue structure from the timer_create. If this
1375 * allocation fails we are able to report the failure to the application
1376 * with an EAGAIN error.
1379 struct sigqueue *sigqueue_alloc(void)
1383 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1384 q->flags |= SIGQUEUE_PREALLOC;
1388 void sigqueue_free(struct sigqueue *q)
1390 unsigned long flags;
1391 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1393 * If the signal is still pending remove it from the
1396 if (unlikely(!list_empty(&q->list))) {
1397 spinlock_t *lock = ¤t->sighand->siglock;
1398 read_lock(&tasklist_lock);
1399 spin_lock_irqsave(lock, flags);
1400 if (!list_empty(&q->list))
1401 list_del_init(&q->list);
1402 spin_unlock_irqrestore(lock, flags);
1403 read_unlock(&tasklist_lock);
1405 q->flags &= ~SIGQUEUE_PREALLOC;
1410 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1412 unsigned long flags;
1414 struct sighand_struct *sh;
1416 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1419 * The rcu based delayed sighand destroy makes it possible to
1420 * run this without tasklist lock held. The task struct itself
1421 * cannot go away as create_timer did get_task_struct().
1423 * We return -1, when the task is marked exiting, so
1424 * posix_timer_event can redirect it to the group leader
1428 if (unlikely(p->flags & PF_EXITING)) {
1434 sh = rcu_dereference(p->sighand);
1436 spin_lock_irqsave(&sh->siglock, flags);
1437 if (p->sighand != sh) {
1438 /* We raced with exec() in a multithreaded process... */
1439 spin_unlock_irqrestore(&sh->siglock, flags);
1444 * We do the check here again to handle the following scenario:
1449 * interrupt exit code running
1451 * lock sighand->siglock
1452 * unlock sighand->siglock
1454 * add(tsk->pending) flush_sigqueue(tsk->pending)
1458 if (unlikely(p->flags & PF_EXITING)) {
1463 if (unlikely(!list_empty(&q->list))) {
1465 * If an SI_TIMER entry is already queue just increment
1466 * the overrun count.
1468 if (q->info.si_code != SI_TIMER)
1470 q->info.si_overrun++;
1473 /* Short-circuit ignored signals. */
1474 if (sig_ignored(p, sig)) {
1479 list_add_tail(&q->list, &p->pending.list);
1480 sigaddset(&p->pending.signal, sig);
1481 if (!sigismember(&p->blocked, sig))
1482 signal_wake_up(p, sig == SIGKILL);
1485 spin_unlock_irqrestore(&sh->siglock, flags);
1493 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1495 unsigned long flags;
1498 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1500 read_lock(&tasklist_lock);
1501 /* Since it_lock is held, p->sighand cannot be NULL. */
1502 spin_lock_irqsave(&p->sighand->siglock, flags);
1503 handle_stop_signal(sig, p);
1505 /* Short-circuit ignored signals. */
1506 if (sig_ignored(p, sig)) {
1511 if (unlikely(!list_empty(&q->list))) {
1513 * If an SI_TIMER entry is already queue just increment
1514 * the overrun count. Other uses should not try to
1515 * send the signal multiple times.
1517 if (q->info.si_code != SI_TIMER)
1519 q->info.si_overrun++;
1524 * Put this signal on the shared-pending queue.
1525 * We always use the shared queue for process-wide signals,
1526 * to avoid several races.
1528 list_add_tail(&q->list, &p->signal->shared_pending.list);
1529 sigaddset(&p->signal->shared_pending.signal, sig);
1531 __group_complete_signal(sig, p);
1533 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1534 read_unlock(&tasklist_lock);
1539 * Wake up any threads in the parent blocked in wait* syscalls.
1541 static inline void __wake_up_parent(struct task_struct *p,
1542 struct task_struct *parent)
1544 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1548 * Let a parent know about the death of a child.
1549 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1552 void do_notify_parent(struct task_struct *tsk, int sig)
1554 struct siginfo info;
1555 unsigned long flags;
1556 struct sighand_struct *psig;
1560 /* do_notify_parent_cldstop should have been called instead. */
1561 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1563 BUG_ON(!tsk->ptrace &&
1564 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1566 info.si_signo = sig;
1568 info.si_pid = tsk->pid;
1569 info.si_uid = tsk->uid;
1571 /* FIXME: find out whether or not this is supposed to be c*time. */
1572 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1573 tsk->signal->utime));
1574 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1575 tsk->signal->stime));
1577 info.si_status = tsk->exit_code & 0x7f;
1578 if (tsk->exit_code & 0x80)
1579 info.si_code = CLD_DUMPED;
1580 else if (tsk->exit_code & 0x7f)
1581 info.si_code = CLD_KILLED;
1583 info.si_code = CLD_EXITED;
1584 info.si_status = tsk->exit_code >> 8;
1587 psig = tsk->parent->sighand;
1588 spin_lock_irqsave(&psig->siglock, flags);
1589 if (!tsk->ptrace && sig == SIGCHLD &&
1590 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1591 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1593 * We are exiting and our parent doesn't care. POSIX.1
1594 * defines special semantics for setting SIGCHLD to SIG_IGN
1595 * or setting the SA_NOCLDWAIT flag: we should be reaped
1596 * automatically and not left for our parent's wait4 call.
1597 * Rather than having the parent do it as a magic kind of
1598 * signal handler, we just set this to tell do_exit that we
1599 * can be cleaned up without becoming a zombie. Note that
1600 * we still call __wake_up_parent in this case, because a
1601 * blocked sys_wait4 might now return -ECHILD.
1603 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1604 * is implementation-defined: we do (if you don't want
1605 * it, just use SIG_IGN instead).
1607 tsk->exit_signal = -1;
1608 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1611 if (valid_signal(sig) && sig > 0)
1612 __group_send_sig_info(sig, &info, tsk->parent);
1613 __wake_up_parent(tsk, tsk->parent);
1614 spin_unlock_irqrestore(&psig->siglock, flags);
1617 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1619 struct siginfo info;
1620 unsigned long flags;
1621 struct task_struct *parent;
1622 struct sighand_struct *sighand;
1625 parent = tsk->parent;
1627 tsk = tsk->group_leader;
1628 parent = tsk->real_parent;
1631 info.si_signo = SIGCHLD;
1633 info.si_pid = tsk->pid;
1634 info.si_uid = tsk->uid;
1636 /* FIXME: find out whether or not this is supposed to be c*time. */
1637 info.si_utime = cputime_to_jiffies(tsk->utime);
1638 info.si_stime = cputime_to_jiffies(tsk->stime);
1643 info.si_status = SIGCONT;
1646 info.si_status = tsk->signal->group_exit_code & 0x7f;
1649 info.si_status = tsk->exit_code & 0x7f;
1655 sighand = parent->sighand;
1656 spin_lock_irqsave(&sighand->siglock, flags);
1657 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1658 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1659 __group_send_sig_info(SIGCHLD, &info, parent);
1661 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1663 __wake_up_parent(tsk, parent);
1664 spin_unlock_irqrestore(&sighand->siglock, flags);
1668 * This must be called with current->sighand->siglock held.
1670 * This should be the path for all ptrace stops.
1671 * We always set current->last_siginfo while stopped here.
1672 * That makes it a way to test a stopped process for
1673 * being ptrace-stopped vs being job-control-stopped.
1675 * If we actually decide not to stop at all because the tracer is gone,
1676 * we leave nostop_code in current->exit_code.
1678 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1681 * If there is a group stop in progress,
1682 * we must participate in the bookkeeping.
1684 if (current->signal->group_stop_count > 0)
1685 --current->signal->group_stop_count;
1687 current->last_siginfo = info;
1688 current->exit_code = exit_code;
1690 /* Let the debugger run. */
1691 set_current_state(TASK_TRACED);
1692 spin_unlock_irq(¤t->sighand->siglock);
1693 read_lock(&tasklist_lock);
1694 if (likely(current->ptrace & PT_PTRACED) &&
1695 likely(current->parent != current->real_parent ||
1696 !(current->ptrace & PT_ATTACHED)) &&
1697 (likely(current->parent->signal != current->signal) ||
1698 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1699 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1700 read_unlock(&tasklist_lock);
1704 * By the time we got the lock, our tracer went away.
1707 read_unlock(&tasklist_lock);
1708 set_current_state(TASK_RUNNING);
1709 current->exit_code = nostop_code;
1713 * We are back. Now reacquire the siglock before touching
1714 * last_siginfo, so that we are sure to have synchronized with
1715 * any signal-sending on another CPU that wants to examine it.
1717 spin_lock_irq(¤t->sighand->siglock);
1718 current->last_siginfo = NULL;
1721 * Queued signals ignored us while we were stopped for tracing.
1722 * So check for any that we should take before resuming user mode.
1724 recalc_sigpending();
1727 void ptrace_notify(int exit_code)
1731 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1733 memset(&info, 0, sizeof info);
1734 info.si_signo = SIGTRAP;
1735 info.si_code = exit_code;
1736 info.si_pid = current->pid;
1737 info.si_uid = current->uid;
1739 /* Let the debugger run. */
1740 spin_lock_irq(¤t->sighand->siglock);
1741 ptrace_stop(exit_code, 0, &info);
1742 spin_unlock_irq(¤t->sighand->siglock);
1746 finish_stop(int stop_count)
1751 * If there are no other threads in the group, or if there is
1752 * a group stop in progress and we are the last to stop,
1753 * report to the parent. When ptraced, every thread reports itself.
1755 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1757 else if (stop_count == 0)
1762 read_lock(&tasklist_lock);
1763 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1764 read_unlock(&tasklist_lock);
1769 * Now we don't run again until continued.
1771 current->exit_code = 0;
1775 * This performs the stopping for SIGSTOP and other stop signals.
1776 * We have to stop all threads in the thread group.
1777 * Returns nonzero if we've actually stopped and released the siglock.
1778 * Returns zero if we didn't stop and still hold the siglock.
1781 do_signal_stop(int signr)
1783 struct signal_struct *sig = current->signal;
1784 struct sighand_struct *sighand = current->sighand;
1785 int stop_count = -1;
1787 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1790 if (sig->group_stop_count > 0) {
1792 * There is a group stop in progress. We don't need to
1793 * start another one.
1795 signr = sig->group_exit_code;
1796 stop_count = --sig->group_stop_count;
1797 current->exit_code = signr;
1798 set_current_state(TASK_STOPPED);
1799 if (stop_count == 0)
1800 sig->flags = SIGNAL_STOP_STOPPED;
1801 spin_unlock_irq(&sighand->siglock);
1803 else if (thread_group_empty(current)) {
1805 * Lock must be held through transition to stopped state.
1807 current->exit_code = current->signal->group_exit_code = signr;
1808 set_current_state(TASK_STOPPED);
1809 sig->flags = SIGNAL_STOP_STOPPED;
1810 spin_unlock_irq(&sighand->siglock);
1814 * There is no group stop already in progress.
1815 * We must initiate one now, but that requires
1816 * dropping siglock to get both the tasklist lock
1817 * and siglock again in the proper order. Note that
1818 * this allows an intervening SIGCONT to be posted.
1819 * We need to check for that and bail out if necessary.
1821 struct task_struct *t;
1823 spin_unlock_irq(&sighand->siglock);
1825 /* signals can be posted during this window */
1827 read_lock(&tasklist_lock);
1828 spin_lock_irq(&sighand->siglock);
1830 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1832 * Another stop or continue happened while we
1833 * didn't have the lock. We can just swallow this
1834 * signal now. If we raced with a SIGCONT, that
1835 * should have just cleared it now. If we raced
1836 * with another processor delivering a stop signal,
1837 * then the SIGCONT that wakes us up should clear it.
1839 read_unlock(&tasklist_lock);
1843 if (sig->group_stop_count == 0) {
1844 sig->group_exit_code = signr;
1846 for (t = next_thread(current); t != current;
1849 * Setting state to TASK_STOPPED for a group
1850 * stop is always done with the siglock held,
1851 * so this check has no races.
1853 if (!t->exit_state &&
1854 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1856 signal_wake_up(t, 0);
1858 sig->group_stop_count = stop_count;
1861 /* A race with another thread while unlocked. */
1862 signr = sig->group_exit_code;
1863 stop_count = --sig->group_stop_count;
1866 current->exit_code = signr;
1867 set_current_state(TASK_STOPPED);
1868 if (stop_count == 0)
1869 sig->flags = SIGNAL_STOP_STOPPED;
1871 spin_unlock_irq(&sighand->siglock);
1872 read_unlock(&tasklist_lock);
1875 finish_stop(stop_count);
1880 * Do appropriate magic when group_stop_count > 0.
1881 * We return nonzero if we stopped, after releasing the siglock.
1882 * We return zero if we still hold the siglock and should look
1883 * for another signal without checking group_stop_count again.
1885 static inline int handle_group_stop(void)
1889 if (current->signal->group_exit_task == current) {
1891 * Group stop is so we can do a core dump,
1892 * We are the initiating thread, so get on with it.
1894 current->signal->group_exit_task = NULL;
1898 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1900 * Group stop is so another thread can do a core dump,
1901 * or else we are racing against a death signal.
1902 * Just punt the stop so we can get the next signal.
1907 * There is a group stop in progress. We stop
1908 * without any associated signal being in our queue.
1910 stop_count = --current->signal->group_stop_count;
1911 if (stop_count == 0)
1912 current->signal->flags = SIGNAL_STOP_STOPPED;
1913 current->exit_code = current->signal->group_exit_code;
1914 set_current_state(TASK_STOPPED);
1915 spin_unlock_irq(¤t->sighand->siglock);
1916 finish_stop(stop_count);
1920 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1921 struct pt_regs *regs, void *cookie)
1923 sigset_t *mask = ¤t->blocked;
1927 spin_lock_irq(¤t->sighand->siglock);
1929 struct k_sigaction *ka;
1931 if (unlikely(current->signal->group_stop_count > 0) &&
1932 handle_group_stop())
1935 signr = dequeue_signal(current, mask, info);
1938 break; /* will return 0 */
1940 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1941 ptrace_signal_deliver(regs, cookie);
1943 /* Let the debugger run. */
1944 ptrace_stop(signr, signr, info);
1946 /* We're back. Did the debugger cancel the sig or group_exit? */
1947 signr = current->exit_code;
1948 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1951 current->exit_code = 0;
1953 /* Update the siginfo structure if the signal has
1954 changed. If the debugger wanted something
1955 specific in the siginfo structure then it should
1956 have updated *info via PTRACE_SETSIGINFO. */
1957 if (signr != info->si_signo) {
1958 info->si_signo = signr;
1960 info->si_code = SI_USER;
1961 info->si_pid = current->parent->pid;
1962 info->si_uid = current->parent->uid;
1965 /* If the (new) signal is now blocked, requeue it. */
1966 if (sigismember(¤t->blocked, signr)) {
1967 specific_send_sig_info(signr, info, current);
1972 ka = ¤t->sighand->action[signr-1];
1973 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1975 if (ka->sa.sa_handler != SIG_DFL) {
1976 /* Run the handler. */
1979 if (ka->sa.sa_flags & SA_ONESHOT)
1980 ka->sa.sa_handler = SIG_DFL;
1982 break; /* will return non-zero "signr" value */
1986 * Now we are doing the default action for this signal.
1988 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1991 /* Init gets no signals it doesn't want. */
1992 if (current->pid == 1)
1995 if (sig_kernel_stop(signr)) {
1997 * The default action is to stop all threads in
1998 * the thread group. The job control signals
1999 * do nothing in an orphaned pgrp, but SIGSTOP
2000 * always works. Note that siglock needs to be
2001 * dropped during the call to is_orphaned_pgrp()
2002 * because of lock ordering with tasklist_lock.
2003 * This allows an intervening SIGCONT to be posted.
2004 * We need to check for that and bail out if necessary.
2006 if (signr != SIGSTOP) {
2007 spin_unlock_irq(¤t->sighand->siglock);
2009 /* signals can be posted during this window */
2011 if (is_orphaned_pgrp(process_group(current)))
2014 spin_lock_irq(¤t->sighand->siglock);
2017 if (likely(do_signal_stop(signr))) {
2018 /* It released the siglock. */
2023 * We didn't actually stop, due to a race
2024 * with SIGCONT or something like that.
2029 spin_unlock_irq(¤t->sighand->siglock);
2032 * Anything else is fatal, maybe with a core dump.
2034 current->flags |= PF_SIGNALED;
2035 if (sig_kernel_coredump(signr)) {
2037 * If it was able to dump core, this kills all
2038 * other threads in the group and synchronizes with
2039 * their demise. If we lost the race with another
2040 * thread getting here, it set group_exit_code
2041 * first and our do_group_exit call below will use
2042 * that value and ignore the one we pass it.
2044 do_coredump((long)signr, signr, regs);
2048 * Death signals, no core dump.
2050 do_group_exit(signr);
2053 spin_unlock_irq(¤t->sighand->siglock);
2057 EXPORT_SYMBOL(recalc_sigpending);
2058 EXPORT_SYMBOL_GPL(dequeue_signal);
2059 EXPORT_SYMBOL(flush_signals);
2060 EXPORT_SYMBOL(force_sig);
2061 EXPORT_SYMBOL(kill_pg);
2062 EXPORT_SYMBOL(kill_proc);
2063 EXPORT_SYMBOL(ptrace_notify);
2064 EXPORT_SYMBOL(send_sig);
2065 EXPORT_SYMBOL(send_sig_info);
2066 EXPORT_SYMBOL(sigprocmask);
2067 EXPORT_SYMBOL(block_all_signals);
2068 EXPORT_SYMBOL(unblock_all_signals);
2072 * System call entry points.
2075 asmlinkage long sys_restart_syscall(void)
2077 struct restart_block *restart = ¤t_thread_info()->restart_block;
2078 return restart->fn(restart);
2081 long do_no_restart_syscall(struct restart_block *param)
2087 * We don't need to get the kernel lock - this is all local to this
2088 * particular thread.. (and that's good, because this is _heavily_
2089 * used by various programs)
2093 * This is also useful for kernel threads that want to temporarily
2094 * (or permanently) block certain signals.
2096 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2097 * interface happily blocks "unblockable" signals like SIGKILL
2100 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2105 spin_lock_irq(¤t->sighand->siglock);
2106 old_block = current->blocked;
2110 sigorsets(¤t->blocked, ¤t->blocked, set);
2113 signandsets(¤t->blocked, ¤t->blocked, set);
2116 current->blocked = *set;
2121 recalc_sigpending();
2122 spin_unlock_irq(¤t->sighand->siglock);
2124 *oldset = old_block;
2129 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2131 int error = -EINVAL;
2132 sigset_t old_set, new_set;
2134 /* XXX: Don't preclude handling different sized sigset_t's. */
2135 if (sigsetsize != sizeof(sigset_t))
2140 if (copy_from_user(&new_set, set, sizeof(*set)))
2142 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2144 error = sigprocmask(how, &new_set, &old_set);
2150 spin_lock_irq(¤t->sighand->siglock);
2151 old_set = current->blocked;
2152 spin_unlock_irq(¤t->sighand->siglock);
2156 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2164 long do_sigpending(void __user *set, unsigned long sigsetsize)
2166 long error = -EINVAL;
2169 if (sigsetsize > sizeof(sigset_t))
2172 spin_lock_irq(¤t->sighand->siglock);
2173 sigorsets(&pending, ¤t->pending.signal,
2174 ¤t->signal->shared_pending.signal);
2175 spin_unlock_irq(¤t->sighand->siglock);
2177 /* Outside the lock because only this thread touches it. */
2178 sigandsets(&pending, ¤t->blocked, &pending);
2181 if (!copy_to_user(set, &pending, sigsetsize))
2189 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2191 return do_sigpending(set, sigsetsize);
2194 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2196 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2200 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2202 if (from->si_code < 0)
2203 return __copy_to_user(to, from, sizeof(siginfo_t))
2206 * If you change siginfo_t structure, please be sure
2207 * this code is fixed accordingly.
2208 * It should never copy any pad contained in the structure
2209 * to avoid security leaks, but must copy the generic
2210 * 3 ints plus the relevant union member.
2212 err = __put_user(from->si_signo, &to->si_signo);
2213 err |= __put_user(from->si_errno, &to->si_errno);
2214 err |= __put_user((short)from->si_code, &to->si_code);
2215 switch (from->si_code & __SI_MASK) {
2217 err |= __put_user(from->si_pid, &to->si_pid);
2218 err |= __put_user(from->si_uid, &to->si_uid);
2221 err |= __put_user(from->si_tid, &to->si_tid);
2222 err |= __put_user(from->si_overrun, &to->si_overrun);
2223 err |= __put_user(from->si_ptr, &to->si_ptr);
2226 err |= __put_user(from->si_band, &to->si_band);
2227 err |= __put_user(from->si_fd, &to->si_fd);
2230 err |= __put_user(from->si_addr, &to->si_addr);
2231 #ifdef __ARCH_SI_TRAPNO
2232 err |= __put_user(from->si_trapno, &to->si_trapno);
2236 err |= __put_user(from->si_pid, &to->si_pid);
2237 err |= __put_user(from->si_uid, &to->si_uid);
2238 err |= __put_user(from->si_status, &to->si_status);
2239 err |= __put_user(from->si_utime, &to->si_utime);
2240 err |= __put_user(from->si_stime, &to->si_stime);
2242 case __SI_RT: /* This is not generated by the kernel as of now. */
2243 case __SI_MESGQ: /* But this is */
2244 err |= __put_user(from->si_pid, &to->si_pid);
2245 err |= __put_user(from->si_uid, &to->si_uid);
2246 err |= __put_user(from->si_ptr, &to->si_ptr);
2248 default: /* this is just in case for now ... */
2249 err |= __put_user(from->si_pid, &to->si_pid);
2250 err |= __put_user(from->si_uid, &to->si_uid);
2259 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2260 siginfo_t __user *uinfo,
2261 const struct timespec __user *uts,
2270 /* XXX: Don't preclude handling different sized sigset_t's. */
2271 if (sigsetsize != sizeof(sigset_t))
2274 if (copy_from_user(&these, uthese, sizeof(these)))
2278 * Invert the set of allowed signals to get those we
2281 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2285 if (copy_from_user(&ts, uts, sizeof(ts)))
2287 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2292 spin_lock_irq(¤t->sighand->siglock);
2293 sig = dequeue_signal(current, &these, &info);
2295 timeout = MAX_SCHEDULE_TIMEOUT;
2297 timeout = (timespec_to_jiffies(&ts)
2298 + (ts.tv_sec || ts.tv_nsec));
2301 /* None ready -- temporarily unblock those we're
2302 * interested while we are sleeping in so that we'll
2303 * be awakened when they arrive. */
2304 current->real_blocked = current->blocked;
2305 sigandsets(¤t->blocked, ¤t->blocked, &these);
2306 recalc_sigpending();
2307 spin_unlock_irq(¤t->sighand->siglock);
2309 timeout = schedule_timeout_interruptible(timeout);
2312 spin_lock_irq(¤t->sighand->siglock);
2313 sig = dequeue_signal(current, &these, &info);
2314 current->blocked = current->real_blocked;
2315 siginitset(¤t->real_blocked, 0);
2316 recalc_sigpending();
2319 spin_unlock_irq(¤t->sighand->siglock);
2324 if (copy_siginfo_to_user(uinfo, &info))
2337 sys_kill(int pid, int sig)
2339 struct siginfo info;
2341 info.si_signo = sig;
2343 info.si_code = SI_USER;
2344 info.si_pid = current->tgid;
2345 info.si_uid = current->uid;
2347 return kill_something_info(sig, &info, pid);
2350 static int do_tkill(int tgid, int pid, int sig)
2353 struct siginfo info;
2354 struct task_struct *p;
2357 info.si_signo = sig;
2359 info.si_code = SI_TKILL;
2360 info.si_pid = current->tgid;
2361 info.si_uid = current->uid;
2363 read_lock(&tasklist_lock);
2364 p = find_task_by_pid(pid);
2365 if (p && (tgid <= 0 || p->tgid == tgid)) {
2366 error = check_kill_permission(sig, &info, p);
2368 * The null signal is a permissions and process existence
2369 * probe. No signal is actually delivered.
2371 if (!error && sig && p->sighand) {
2372 spin_lock_irq(&p->sighand->siglock);
2373 handle_stop_signal(sig, p);
2374 error = specific_send_sig_info(sig, &info, p);
2375 spin_unlock_irq(&p->sighand->siglock);
2378 read_unlock(&tasklist_lock);
2384 * sys_tgkill - send signal to one specific thread
2385 * @tgid: the thread group ID of the thread
2386 * @pid: the PID of the thread
2387 * @sig: signal to be sent
2389 * This syscall also checks the tgid and returns -ESRCH even if the PID
2390 * exists but it's not belonging to the target process anymore. This
2391 * method solves the problem of threads exiting and PIDs getting reused.
2393 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2395 /* This is only valid for single tasks */
2396 if (pid <= 0 || tgid <= 0)
2399 return do_tkill(tgid, pid, sig);
2403 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2406 sys_tkill(int pid, int sig)
2408 /* This is only valid for single tasks */
2412 return do_tkill(0, pid, sig);
2416 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2420 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2423 /* Not even root can pretend to send signals from the kernel.
2424 Nor can they impersonate a kill(), which adds source info. */
2425 if (info.si_code >= 0)
2427 info.si_signo = sig;
2429 /* POSIX.1b doesn't mention process groups. */
2430 return kill_proc_info(sig, &info, pid);
2434 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2436 struct k_sigaction *k;
2439 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2442 k = ¤t->sighand->action[sig-1];
2444 spin_lock_irq(¤t->sighand->siglock);
2445 if (signal_pending(current)) {
2447 * If there might be a fatal signal pending on multiple
2448 * threads, make sure we take it before changing the action.
2450 spin_unlock_irq(¤t->sighand->siglock);
2451 return -ERESTARTNOINTR;
2460 * "Setting a signal action to SIG_IGN for a signal that is
2461 * pending shall cause the pending signal to be discarded,
2462 * whether or not it is blocked."
2464 * "Setting a signal action to SIG_DFL for a signal that is
2465 * pending and whose default action is to ignore the signal
2466 * (for example, SIGCHLD), shall cause the pending signal to
2467 * be discarded, whether or not it is blocked"
2469 if (act->sa.sa_handler == SIG_IGN ||
2470 (act->sa.sa_handler == SIG_DFL &&
2471 sig_kernel_ignore(sig))) {
2473 * This is a fairly rare case, so we only take the
2474 * tasklist_lock once we're sure we'll need it.
2475 * Now we must do this little unlock and relock
2476 * dance to maintain the lock hierarchy.
2478 struct task_struct *t = current;
2479 spin_unlock_irq(&t->sighand->siglock);
2480 read_lock(&tasklist_lock);
2481 spin_lock_irq(&t->sighand->siglock);
2483 sigdelsetmask(&k->sa.sa_mask,
2484 sigmask(SIGKILL) | sigmask(SIGSTOP));
2486 sigaddset(&mask, sig);
2487 rm_from_queue_full(&mask, &t->signal->shared_pending);
2489 rm_from_queue_full(&mask, &t->pending);
2490 recalc_sigpending_tsk(t);
2492 } while (t != current);
2493 spin_unlock_irq(¤t->sighand->siglock);
2494 read_unlock(&tasklist_lock);
2499 sigdelsetmask(&k->sa.sa_mask,
2500 sigmask(SIGKILL) | sigmask(SIGSTOP));
2503 spin_unlock_irq(¤t->sighand->siglock);
2508 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2514 oss.ss_sp = (void __user *) current->sas_ss_sp;
2515 oss.ss_size = current->sas_ss_size;
2516 oss.ss_flags = sas_ss_flags(sp);
2525 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2526 || __get_user(ss_sp, &uss->ss_sp)
2527 || __get_user(ss_flags, &uss->ss_flags)
2528 || __get_user(ss_size, &uss->ss_size))
2532 if (on_sig_stack(sp))
2538 * Note - this code used to test ss_flags incorrectly
2539 * old code may have been written using ss_flags==0
2540 * to mean ss_flags==SS_ONSTACK (as this was the only
2541 * way that worked) - this fix preserves that older
2544 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2547 if (ss_flags == SS_DISABLE) {
2552 if (ss_size < MINSIGSTKSZ)
2556 current->sas_ss_sp = (unsigned long) ss_sp;
2557 current->sas_ss_size = ss_size;
2562 if (copy_to_user(uoss, &oss, sizeof(oss)))
2571 #ifdef __ARCH_WANT_SYS_SIGPENDING
2574 sys_sigpending(old_sigset_t __user *set)
2576 return do_sigpending(set, sizeof(*set));
2581 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2582 /* Some platforms have their own version with special arguments others
2583 support only sys_rt_sigprocmask. */
2586 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2589 old_sigset_t old_set, new_set;
2593 if (copy_from_user(&new_set, set, sizeof(*set)))
2595 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2597 spin_lock_irq(¤t->sighand->siglock);
2598 old_set = current->blocked.sig[0];
2606 sigaddsetmask(¤t->blocked, new_set);
2609 sigdelsetmask(¤t->blocked, new_set);
2612 current->blocked.sig[0] = new_set;
2616 recalc_sigpending();
2617 spin_unlock_irq(¤t->sighand->siglock);
2623 old_set = current->blocked.sig[0];
2626 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2633 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2635 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2637 sys_rt_sigaction(int sig,
2638 const struct sigaction __user *act,
2639 struct sigaction __user *oact,
2642 struct k_sigaction new_sa, old_sa;
2645 /* XXX: Don't preclude handling different sized sigset_t's. */
2646 if (sigsetsize != sizeof(sigset_t))
2650 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2654 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2657 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2663 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2665 #ifdef __ARCH_WANT_SYS_SGETMASK
2668 * For backwards compatibility. Functionality superseded by sigprocmask.
2674 return current->blocked.sig[0];
2678 sys_ssetmask(int newmask)
2682 spin_lock_irq(¤t->sighand->siglock);
2683 old = current->blocked.sig[0];
2685 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2687 recalc_sigpending();
2688 spin_unlock_irq(¤t->sighand->siglock);
2692 #endif /* __ARCH_WANT_SGETMASK */
2694 #ifdef __ARCH_WANT_SYS_SIGNAL
2696 * For backwards compatibility. Functionality superseded by sigaction.
2698 asmlinkage unsigned long
2699 sys_signal(int sig, __sighandler_t handler)
2701 struct k_sigaction new_sa, old_sa;
2704 new_sa.sa.sa_handler = handler;
2705 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2707 ret = do_sigaction(sig, &new_sa, &old_sa);
2709 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2711 #endif /* __ARCH_WANT_SYS_SIGNAL */
2713 #ifdef __ARCH_WANT_SYS_PAUSE
2718 current->state = TASK_INTERRUPTIBLE;
2720 return -ERESTARTNOHAND;
2725 void __init signals_init(void)
2728 kmem_cache_create("sigqueue",
2729 sizeof(struct sigqueue),
2730 __alignof__(struct sigqueue),
2731 SLAB_PANIC, NULL, NULL);