2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/signal.h>
41 #include <asm/param.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/siginfo.h>
45 #include <asm/cacheflush.h>
46 #include "audit.h" /* audit_signal_info() */
49 * SLAB caches for signal bits.
52 static struct kmem_cache *sigqueue_cachep;
54 int print_fatal_signals __read_mostly;
56 static void __user *sig_handler(struct task_struct *t, int sig)
58 return t->sighand->action[sig - 1].sa.sa_handler;
61 static int sig_handler_ignored(void __user *handler, int sig)
63 /* Is it explicitly or implicitly ignored? */
64 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
72 handler = sig_handler(t, sig);
74 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
75 handler == SIG_DFL && !force)
78 return sig_handler_ignored(handler, sig);
81 static int sig_ignored(struct task_struct *t, int sig, bool force)
84 * Blocked signals are never ignored, since the
85 * signal handler may change by the time it is
88 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
91 if (!sig_task_ignored(t, sig, force))
95 * Tracers may want to know about even ignored signals.
101 * Re-calculate pending state from the set of locally pending
102 * signals, globally pending signals, and blocked signals.
104 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
109 switch (_NSIG_WORDS) {
111 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
112 ready |= signal->sig[i] &~ blocked->sig[i];
115 case 4: ready = signal->sig[3] &~ blocked->sig[3];
116 ready |= signal->sig[2] &~ blocked->sig[2];
117 ready |= signal->sig[1] &~ blocked->sig[1];
118 ready |= signal->sig[0] &~ blocked->sig[0];
121 case 2: ready = signal->sig[1] &~ blocked->sig[1];
122 ready |= signal->sig[0] &~ blocked->sig[0];
125 case 1: ready = signal->sig[0] &~ blocked->sig[0];
130 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
132 static int recalc_sigpending_tsk(struct task_struct *t)
134 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
135 PENDING(&t->pending, &t->blocked) ||
136 PENDING(&t->signal->shared_pending, &t->blocked)) {
137 set_tsk_thread_flag(t, TIF_SIGPENDING);
141 * We must never clear the flag in another thread, or in current
142 * when it's possible the current syscall is returning -ERESTART*.
143 * So we don't clear it here, and only callers who know they should do.
149 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
150 * This is superfluous when called on current, the wakeup is a harmless no-op.
152 void recalc_sigpending_and_wake(struct task_struct *t)
154 if (recalc_sigpending_tsk(t))
155 signal_wake_up(t, 0);
158 void recalc_sigpending(void)
160 if (!recalc_sigpending_tsk(current) && !freezing(current))
161 clear_thread_flag(TIF_SIGPENDING);
165 /* Given the mask, find the first available signal that should be serviced. */
167 #define SYNCHRONOUS_MASK \
168 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
169 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
171 int next_signal(struct sigpending *pending, sigset_t *mask)
173 unsigned long i, *s, *m, x;
176 s = pending->signal.sig;
180 * Handle the first word specially: it contains the
181 * synchronous signals that need to be dequeued first.
185 if (x & SYNCHRONOUS_MASK)
186 x &= SYNCHRONOUS_MASK;
191 switch (_NSIG_WORDS) {
193 for (i = 1; i < _NSIG_WORDS; ++i) {
197 sig = ffz(~x) + i*_NSIG_BPW + 1;
206 sig = ffz(~x) + _NSIG_BPW + 1;
217 static inline void print_dropped_signal(int sig)
219 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
221 if (!print_fatal_signals)
224 if (!__ratelimit(&ratelimit_state))
227 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
228 current->comm, current->pid, sig);
232 * task_set_jobctl_pending - set jobctl pending bits
234 * @mask: pending bits to set
236 * Clear @mask from @task->jobctl. @mask must be subset of
237 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
238 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
239 * cleared. If @task is already being killed or exiting, this function
243 * Must be called with @task->sighand->siglock held.
246 * %true if @mask is set, %false if made noop because @task was dying.
248 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
250 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
251 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
252 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
254 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
257 if (mask & JOBCTL_STOP_SIGMASK)
258 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
260 task->jobctl |= mask;
265 * task_clear_jobctl_trapping - clear jobctl trapping bit
268 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
269 * Clear it and wake up the ptracer. Note that we don't need any further
270 * locking. @task->siglock guarantees that @task->parent points to the
274 * Must be called with @task->sighand->siglock held.
276 void task_clear_jobctl_trapping(struct task_struct *task)
278 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
279 task->jobctl &= ~JOBCTL_TRAPPING;
280 smp_mb(); /* advised by wake_up_bit() */
281 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
286 * task_clear_jobctl_pending - clear jobctl pending bits
288 * @mask: pending bits to clear
290 * Clear @mask from @task->jobctl. @mask must be subset of
291 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
292 * STOP bits are cleared together.
294 * If clearing of @mask leaves no stop or trap pending, this function calls
295 * task_clear_jobctl_trapping().
298 * Must be called with @task->sighand->siglock held.
300 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
302 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
304 if (mask & JOBCTL_STOP_PENDING)
305 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
307 task->jobctl &= ~mask;
309 if (!(task->jobctl & JOBCTL_PENDING_MASK))
310 task_clear_jobctl_trapping(task);
314 * task_participate_group_stop - participate in a group stop
315 * @task: task participating in a group stop
317 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
318 * Group stop states are cleared and the group stop count is consumed if
319 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
320 * stop, the appropriate %SIGNAL_* flags are set.
323 * Must be called with @task->sighand->siglock held.
326 * %true if group stop completion should be notified to the parent, %false
329 static bool task_participate_group_stop(struct task_struct *task)
331 struct signal_struct *sig = task->signal;
332 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
334 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
336 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
341 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
342 sig->group_stop_count--;
345 * Tell the caller to notify completion iff we are entering into a
346 * fresh group stop. Read comment in do_signal_stop() for details.
348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 sig->flags = SIGNAL_STOP_STOPPED;
356 * allocate a new signal queue record
357 * - this may be called without locks if and only if t == current, otherwise an
358 * appropriate lock must be held to stop the target task from exiting
360 static struct sigqueue *
361 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
363 struct sigqueue *q = NULL;
364 struct user_struct *user;
367 * Protect access to @t credentials. This can go away when all
368 * callers hold rcu read lock.
371 user = get_uid(__task_cred(t)->user);
372 atomic_inc(&user->sigpending);
375 if (override_rlimit ||
376 atomic_read(&user->sigpending) <=
377 task_rlimit(t, RLIMIT_SIGPENDING)) {
378 q = kmem_cache_alloc(sigqueue_cachep, flags);
380 print_dropped_signal(sig);
383 if (unlikely(q == NULL)) {
384 atomic_dec(&user->sigpending);
387 INIT_LIST_HEAD(&q->list);
395 static void __sigqueue_free(struct sigqueue *q)
397 if (q->flags & SIGQUEUE_PREALLOC)
399 atomic_dec(&q->user->sigpending);
401 kmem_cache_free(sigqueue_cachep, q);
404 void flush_sigqueue(struct sigpending *queue)
408 sigemptyset(&queue->signal);
409 while (!list_empty(&queue->list)) {
410 q = list_entry(queue->list.next, struct sigqueue , list);
411 list_del_init(&q->list);
417 * Flush all pending signals for a task.
419 void __flush_signals(struct task_struct *t)
421 clear_tsk_thread_flag(t, TIF_SIGPENDING);
422 flush_sigqueue(&t->pending);
423 flush_sigqueue(&t->signal->shared_pending);
426 void flush_signals(struct task_struct *t)
430 spin_lock_irqsave(&t->sighand->siglock, flags);
432 spin_unlock_irqrestore(&t->sighand->siglock, flags);
435 static void __flush_itimer_signals(struct sigpending *pending)
437 sigset_t signal, retain;
438 struct sigqueue *q, *n;
440 signal = pending->signal;
441 sigemptyset(&retain);
443 list_for_each_entry_safe(q, n, &pending->list, list) {
444 int sig = q->info.si_signo;
446 if (likely(q->info.si_code != SI_TIMER)) {
447 sigaddset(&retain, sig);
449 sigdelset(&signal, sig);
450 list_del_init(&q->list);
455 sigorsets(&pending->signal, &signal, &retain);
458 void flush_itimer_signals(void)
460 struct task_struct *tsk = current;
463 spin_lock_irqsave(&tsk->sighand->siglock, flags);
464 __flush_itimer_signals(&tsk->pending);
465 __flush_itimer_signals(&tsk->signal->shared_pending);
466 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
469 void ignore_signals(struct task_struct *t)
473 for (i = 0; i < _NSIG; ++i)
474 t->sighand->action[i].sa.sa_handler = SIG_IGN;
480 * Flush all handlers for a task.
484 flush_signal_handlers(struct task_struct *t, int force_default)
487 struct k_sigaction *ka = &t->sighand->action[0];
488 for (i = _NSIG ; i != 0 ; i--) {
489 if (force_default || ka->sa.sa_handler != SIG_IGN)
490 ka->sa.sa_handler = SIG_DFL;
492 #ifdef __ARCH_HAS_SA_RESTORER
493 ka->sa.sa_restorer = NULL;
495 sigemptyset(&ka->sa.sa_mask);
500 int unhandled_signal(struct task_struct *tsk, int sig)
502 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
503 if (is_global_init(tsk))
505 if (handler != SIG_IGN && handler != SIG_DFL)
507 /* if ptraced, let the tracer determine */
512 * Notify the system that a driver wants to block all signals for this
513 * process, and wants to be notified if any signals at all were to be
514 * sent/acted upon. If the notifier routine returns non-zero, then the
515 * signal will be acted upon after all. If the notifier routine returns 0,
516 * then then signal will be blocked. Only one block per process is
517 * allowed. priv is a pointer to private data that the notifier routine
518 * can use to determine if the signal should be blocked or not.
521 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
525 spin_lock_irqsave(¤t->sighand->siglock, flags);
526 current->notifier_mask = mask;
527 current->notifier_data = priv;
528 current->notifier = notifier;
529 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
532 /* Notify the system that blocking has ended. */
535 unblock_all_signals(void)
539 spin_lock_irqsave(¤t->sighand->siglock, flags);
540 current->notifier = NULL;
541 current->notifier_data = NULL;
543 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
546 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
548 struct sigqueue *q, *first = NULL;
551 * Collect the siginfo appropriate to this signal. Check if
552 * there is another siginfo for the same signal.
554 list_for_each_entry(q, &list->list, list) {
555 if (q->info.si_signo == sig) {
562 sigdelset(&list->signal, sig);
566 list_del_init(&first->list);
567 copy_siginfo(info, &first->info);
568 __sigqueue_free(first);
571 * Ok, it wasn't in the queue. This must be
572 * a fast-pathed signal or we must have been
573 * out of queue space. So zero out the info.
575 info->si_signo = sig;
577 info->si_code = SI_USER;
583 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
586 int sig = next_signal(pending, mask);
589 if (current->notifier) {
590 if (sigismember(current->notifier_mask, sig)) {
591 if (!(current->notifier)(current->notifier_data)) {
592 clear_thread_flag(TIF_SIGPENDING);
598 collect_signal(sig, pending, info);
605 * Dequeue a signal and return the element to the caller, which is
606 * expected to free it.
608 * All callers have to hold the siglock.
610 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
614 /* We only dequeue private signals from ourselves, we don't let
615 * signalfd steal them
617 signr = __dequeue_signal(&tsk->pending, mask, info);
619 signr = __dequeue_signal(&tsk->signal->shared_pending,
624 * itimers are process shared and we restart periodic
625 * itimers in the signal delivery path to prevent DoS
626 * attacks in the high resolution timer case. This is
627 * compliant with the old way of self-restarting
628 * itimers, as the SIGALRM is a legacy signal and only
629 * queued once. Changing the restart behaviour to
630 * restart the timer in the signal dequeue path is
631 * reducing the timer noise on heavy loaded !highres
634 if (unlikely(signr == SIGALRM)) {
635 struct hrtimer *tmr = &tsk->signal->real_timer;
637 if (!hrtimer_is_queued(tmr) &&
638 tsk->signal->it_real_incr.tv64 != 0) {
639 hrtimer_forward(tmr, tmr->base->get_time(),
640 tsk->signal->it_real_incr);
641 hrtimer_restart(tmr);
650 if (unlikely(sig_kernel_stop(signr))) {
652 * Set a marker that we have dequeued a stop signal. Our
653 * caller might release the siglock and then the pending
654 * stop signal it is about to process is no longer in the
655 * pending bitmasks, but must still be cleared by a SIGCONT
656 * (and overruled by a SIGKILL). So those cases clear this
657 * shared flag after we've set it. Note that this flag may
658 * remain set after the signal we return is ignored or
659 * handled. That doesn't matter because its only purpose
660 * is to alert stop-signal processing code when another
661 * processor has come along and cleared the flag.
663 current->jobctl |= JOBCTL_STOP_DEQUEUED;
665 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
667 * Release the siglock to ensure proper locking order
668 * of timer locks outside of siglocks. Note, we leave
669 * irqs disabled here, since the posix-timers code is
670 * about to disable them again anyway.
672 spin_unlock(&tsk->sighand->siglock);
673 do_schedule_next_timer(info);
674 spin_lock(&tsk->sighand->siglock);
680 * Tell a process that it has a new active signal..
682 * NOTE! we rely on the previous spin_lock to
683 * lock interrupts for us! We can only be called with
684 * "siglock" held, and the local interrupt must
685 * have been disabled when that got acquired!
687 * No need to set need_resched since signal event passing
688 * goes through ->blocked
690 void signal_wake_up_state(struct task_struct *t, unsigned int state)
692 set_tsk_thread_flag(t, TIF_SIGPENDING);
694 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
695 * case. We don't check t->state here because there is a race with it
696 * executing another processor and just now entering stopped state.
697 * By using wake_up_state, we ensure the process will wake up and
698 * handle its death signal.
700 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
705 * Remove signals in mask from the pending set and queue.
706 * Returns 1 if any signals were found.
708 * All callers must be holding the siglock.
710 * This version takes a sigset mask and looks at all signals,
711 * not just those in the first mask word.
713 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
715 struct sigqueue *q, *n;
718 sigandsets(&m, mask, &s->signal);
719 if (sigisemptyset(&m))
722 sigandnsets(&s->signal, &s->signal, mask);
723 list_for_each_entry_safe(q, n, &s->list, list) {
724 if (sigismember(mask, q->info.si_signo)) {
725 list_del_init(&q->list);
732 * Remove signals in mask from the pending set and queue.
733 * Returns 1 if any signals were found.
735 * All callers must be holding the siglock.
737 static int rm_from_queue(unsigned long mask, struct sigpending *s)
739 struct sigqueue *q, *n;
741 if (!sigtestsetmask(&s->signal, mask))
744 sigdelsetmask(&s->signal, mask);
745 list_for_each_entry_safe(q, n, &s->list, list) {
746 if (q->info.si_signo < SIGRTMIN &&
747 (mask & sigmask(q->info.si_signo))) {
748 list_del_init(&q->list);
755 static inline int is_si_special(const struct siginfo *info)
757 return info <= SEND_SIG_FORCED;
760 static inline bool si_fromuser(const struct siginfo *info)
762 return info == SEND_SIG_NOINFO ||
763 (!is_si_special(info) && SI_FROMUSER(info));
767 * called with RCU read lock from check_kill_permission()
769 static int kill_ok_by_cred(struct task_struct *t)
771 const struct cred *cred = current_cred();
772 const struct cred *tcred = __task_cred(t);
774 if (uid_eq(cred->euid, tcred->suid) ||
775 uid_eq(cred->euid, tcred->uid) ||
776 uid_eq(cred->uid, tcred->suid) ||
777 uid_eq(cred->uid, tcred->uid))
780 if (ns_capable(tcred->user_ns, CAP_KILL))
787 * Bad permissions for sending the signal
788 * - the caller must hold the RCU read lock
790 static int check_kill_permission(int sig, struct siginfo *info,
791 struct task_struct *t)
796 if (!valid_signal(sig))
799 if (!si_fromuser(info))
802 error = audit_signal_info(sig, t); /* Let audit system see the signal */
806 if (!same_thread_group(current, t) &&
807 !kill_ok_by_cred(t)) {
810 sid = task_session(t);
812 * We don't return the error if sid == NULL. The
813 * task was unhashed, the caller must notice this.
815 if (!sid || sid == task_session(current))
822 return security_task_kill(t, info, sig, 0);
826 * ptrace_trap_notify - schedule trap to notify ptracer
827 * @t: tracee wanting to notify tracer
829 * This function schedules sticky ptrace trap which is cleared on the next
830 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
833 * If @t is running, STOP trap will be taken. If trapped for STOP and
834 * ptracer is listening for events, tracee is woken up so that it can
835 * re-trap for the new event. If trapped otherwise, STOP trap will be
836 * eventually taken without returning to userland after the existing traps
837 * are finished by PTRACE_CONT.
840 * Must be called with @task->sighand->siglock held.
842 static void ptrace_trap_notify(struct task_struct *t)
844 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
845 assert_spin_locked(&t->sighand->siglock);
847 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
848 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
852 * Handle magic process-wide effects of stop/continue signals. Unlike
853 * the signal actions, these happen immediately at signal-generation
854 * time regardless of blocking, ignoring, or handling. This does the
855 * actual continuing for SIGCONT, but not the actual stopping for stop
856 * signals. The process stop is done as a signal action for SIG_DFL.
858 * Returns true if the signal should be actually delivered, otherwise
859 * it should be dropped.
861 static bool prepare_signal(int sig, struct task_struct *p, bool force)
863 struct signal_struct *signal = p->signal;
864 struct task_struct *t;
866 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
867 if (signal->flags & SIGNAL_GROUP_COREDUMP)
868 return sig == SIGKILL;
870 * The process is in the middle of dying, nothing to do.
872 } else if (sig_kernel_stop(sig)) {
874 * This is a stop signal. Remove SIGCONT from all queues.
876 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
879 rm_from_queue(sigmask(SIGCONT), &t->pending);
880 } while_each_thread(p, t);
881 } else if (sig == SIGCONT) {
884 * Remove all stop signals from all queues, wake all threads.
886 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
889 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
890 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
891 if (likely(!(t->ptrace & PT_SEIZED)))
892 wake_up_state(t, __TASK_STOPPED);
894 ptrace_trap_notify(t);
895 } while_each_thread(p, t);
898 * Notify the parent with CLD_CONTINUED if we were stopped.
900 * If we were in the middle of a group stop, we pretend it
901 * was already finished, and then continued. Since SIGCHLD
902 * doesn't queue we report only CLD_STOPPED, as if the next
903 * CLD_CONTINUED was dropped.
906 if (signal->flags & SIGNAL_STOP_STOPPED)
907 why |= SIGNAL_CLD_CONTINUED;
908 else if (signal->group_stop_count)
909 why |= SIGNAL_CLD_STOPPED;
913 * The first thread which returns from do_signal_stop()
914 * will take ->siglock, notice SIGNAL_CLD_MASK, and
915 * notify its parent. See get_signal_to_deliver().
917 signal->flags = why | SIGNAL_STOP_CONTINUED;
918 signal->group_stop_count = 0;
919 signal->group_exit_code = 0;
923 return !sig_ignored(p, sig, force);
927 * Test if P wants to take SIG. After we've checked all threads with this,
928 * it's equivalent to finding no threads not blocking SIG. Any threads not
929 * blocking SIG were ruled out because they are not running and already
930 * have pending signals. Such threads will dequeue from the shared queue
931 * as soon as they're available, so putting the signal on the shared queue
932 * will be equivalent to sending it to one such thread.
934 static inline int wants_signal(int sig, struct task_struct *p)
936 if (sigismember(&p->blocked, sig))
938 if (p->flags & PF_EXITING)
942 if (task_is_stopped_or_traced(p))
944 return task_curr(p) || !signal_pending(p);
947 static void complete_signal(int sig, struct task_struct *p, int group)
949 struct signal_struct *signal = p->signal;
950 struct task_struct *t;
953 * Now find a thread we can wake up to take the signal off the queue.
955 * If the main thread wants the signal, it gets first crack.
956 * Probably the least surprising to the average bear.
958 if (wants_signal(sig, p))
960 else if (!group || thread_group_empty(p))
962 * There is just one thread and it does not need to be woken.
963 * It will dequeue unblocked signals before it runs again.
968 * Otherwise try to find a suitable thread.
970 t = signal->curr_target;
971 while (!wants_signal(sig, t)) {
973 if (t == signal->curr_target)
975 * No thread needs to be woken.
976 * Any eligible threads will see
977 * the signal in the queue soon.
981 signal->curr_target = t;
985 * Found a killable thread. If the signal will be fatal,
986 * then start taking the whole group down immediately.
988 if (sig_fatal(p, sig) &&
989 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
990 !sigismember(&t->real_blocked, sig) &&
991 (sig == SIGKILL || !t->ptrace)) {
993 * This signal will be fatal to the whole group.
995 if (!sig_kernel_coredump(sig)) {
997 * Start a group exit and wake everybody up.
998 * This way we don't have other threads
999 * running and doing things after a slower
1000 * thread has the fatal signal pending.
1002 signal->flags = SIGNAL_GROUP_EXIT;
1003 signal->group_exit_code = sig;
1004 signal->group_stop_count = 0;
1007 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1008 sigaddset(&t->pending.signal, SIGKILL);
1009 signal_wake_up(t, 1);
1010 } while_each_thread(p, t);
1016 * The signal is already in the shared-pending queue.
1017 * Tell the chosen thread to wake up and dequeue it.
1019 signal_wake_up(t, sig == SIGKILL);
1023 static inline int legacy_queue(struct sigpending *signals, int sig)
1025 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1028 #ifdef CONFIG_USER_NS
1029 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1031 if (current_user_ns() == task_cred_xxx(t, user_ns))
1034 if (SI_FROMKERNEL(info))
1038 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1039 make_kuid(current_user_ns(), info->si_uid));
1043 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1049 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1050 int group, int from_ancestor_ns)
1052 struct sigpending *pending;
1054 int override_rlimit;
1055 int ret = 0, result;
1057 assert_spin_locked(&t->sighand->siglock);
1059 result = TRACE_SIGNAL_IGNORED;
1060 if (!prepare_signal(sig, t,
1061 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1064 pending = group ? &t->signal->shared_pending : &t->pending;
1066 * Short-circuit ignored signals and support queuing
1067 * exactly one non-rt signal, so that we can get more
1068 * detailed information about the cause of the signal.
1070 result = TRACE_SIGNAL_ALREADY_PENDING;
1071 if (legacy_queue(pending, sig))
1074 result = TRACE_SIGNAL_DELIVERED;
1076 * fast-pathed signals for kernel-internal things like SIGSTOP
1079 if (info == SEND_SIG_FORCED)
1083 * Real-time signals must be queued if sent by sigqueue, or
1084 * some other real-time mechanism. It is implementation
1085 * defined whether kill() does so. We attempt to do so, on
1086 * the principle of least surprise, but since kill is not
1087 * allowed to fail with EAGAIN when low on memory we just
1088 * make sure at least one signal gets delivered and don't
1089 * pass on the info struct.
1092 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1094 override_rlimit = 0;
1096 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1099 list_add_tail(&q->list, &pending->list);
1100 switch ((unsigned long) info) {
1101 case (unsigned long) SEND_SIG_NOINFO:
1102 q->info.si_signo = sig;
1103 q->info.si_errno = 0;
1104 q->info.si_code = SI_USER;
1105 q->info.si_pid = task_tgid_nr_ns(current,
1106 task_active_pid_ns(t));
1107 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1109 case (unsigned long) SEND_SIG_PRIV:
1110 q->info.si_signo = sig;
1111 q->info.si_errno = 0;
1112 q->info.si_code = SI_KERNEL;
1117 copy_siginfo(&q->info, info);
1118 if (from_ancestor_ns)
1123 userns_fixup_signal_uid(&q->info, t);
1125 } else if (!is_si_special(info)) {
1126 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1128 * Queue overflow, abort. We may abort if the
1129 * signal was rt and sent by user using something
1130 * other than kill().
1132 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1137 * This is a silent loss of information. We still
1138 * send the signal, but the *info bits are lost.
1140 result = TRACE_SIGNAL_LOSE_INFO;
1145 signalfd_notify(t, sig);
1146 sigaddset(&pending->signal, sig);
1147 complete_signal(sig, t, group);
1149 trace_signal_generate(sig, info, t, group, result);
1153 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1156 int from_ancestor_ns = 0;
1158 #ifdef CONFIG_PID_NS
1159 from_ancestor_ns = si_fromuser(info) &&
1160 !task_pid_nr_ns(current, task_active_pid_ns(t));
1163 return __send_signal(sig, info, t, group, from_ancestor_ns);
1166 static void print_fatal_signal(int signr)
1168 struct pt_regs *regs = signal_pt_regs();
1169 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1171 #if defined(__i386__) && !defined(__arch_um__)
1172 printk(KERN_INFO "code at %08lx: ", regs->ip);
1175 for (i = 0; i < 16; i++) {
1178 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1180 printk(KERN_CONT "%02x ", insn);
1183 printk(KERN_CONT "\n");
1190 static int __init setup_print_fatal_signals(char *str)
1192 get_option (&str, &print_fatal_signals);
1197 __setup("print-fatal-signals=", setup_print_fatal_signals);
1200 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1202 return send_signal(sig, info, p, 1);
1206 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1208 return send_signal(sig, info, t, 0);
1211 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1214 unsigned long flags;
1217 if (lock_task_sighand(p, &flags)) {
1218 ret = send_signal(sig, info, p, group);
1219 unlock_task_sighand(p, &flags);
1226 * Force a signal that the process can't ignore: if necessary
1227 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1229 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1230 * since we do not want to have a signal handler that was blocked
1231 * be invoked when user space had explicitly blocked it.
1233 * We don't want to have recursive SIGSEGV's etc, for example,
1234 * that is why we also clear SIGNAL_UNKILLABLE.
1237 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1239 unsigned long int flags;
1240 int ret, blocked, ignored;
1241 struct k_sigaction *action;
1243 spin_lock_irqsave(&t->sighand->siglock, flags);
1244 action = &t->sighand->action[sig-1];
1245 ignored = action->sa.sa_handler == SIG_IGN;
1246 blocked = sigismember(&t->blocked, sig);
1247 if (blocked || ignored) {
1248 action->sa.sa_handler = SIG_DFL;
1250 sigdelset(&t->blocked, sig);
1251 recalc_sigpending_and_wake(t);
1254 if (action->sa.sa_handler == SIG_DFL)
1255 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1256 ret = specific_send_sig_info(sig, info, t);
1257 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1263 * Nuke all other threads in the group.
1265 int zap_other_threads(struct task_struct *p)
1267 struct task_struct *t = p;
1270 p->signal->group_stop_count = 0;
1272 while_each_thread(p, t) {
1273 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1276 /* Don't bother with already dead threads */
1279 sigaddset(&t->pending.signal, SIGKILL);
1280 signal_wake_up(t, 1);
1286 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1287 unsigned long *flags)
1289 struct sighand_struct *sighand;
1292 local_irq_save(*flags);
1294 sighand = rcu_dereference(tsk->sighand);
1295 if (unlikely(sighand == NULL)) {
1297 local_irq_restore(*flags);
1301 spin_lock(&sighand->siglock);
1302 if (likely(sighand == tsk->sighand)) {
1306 spin_unlock(&sighand->siglock);
1308 local_irq_restore(*flags);
1315 * send signal info to all the members of a group
1317 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1322 ret = check_kill_permission(sig, info, p);
1326 ret = do_send_sig_info(sig, info, p, true);
1332 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1333 * control characters do (^C, ^Z etc)
1334 * - the caller must hold at least a readlock on tasklist_lock
1336 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1338 struct task_struct *p = NULL;
1339 int retval, success;
1343 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1344 int err = group_send_sig_info(sig, info, p);
1347 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1348 return success ? 0 : retval;
1351 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1354 struct task_struct *p;
1358 p = pid_task(pid, PIDTYPE_PID);
1360 error = group_send_sig_info(sig, info, p);
1361 if (unlikely(error == -ESRCH))
1363 * The task was unhashed in between, try again.
1364 * If it is dead, pid_task() will return NULL,
1365 * if we race with de_thread() it will find the
1375 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1379 error = kill_pid_info(sig, info, find_vpid(pid));
1384 static int kill_as_cred_perm(const struct cred *cred,
1385 struct task_struct *target)
1387 const struct cred *pcred = __task_cred(target);
1388 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1389 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1394 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1395 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1396 const struct cred *cred, u32 secid)
1399 struct task_struct *p;
1400 unsigned long flags;
1402 if (!valid_signal(sig))
1406 p = pid_task(pid, PIDTYPE_PID);
1411 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1415 ret = security_task_kill(p, info, sig, secid);
1420 if (lock_task_sighand(p, &flags)) {
1421 ret = __send_signal(sig, info, p, 1, 0);
1422 unlock_task_sighand(p, &flags);
1430 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1433 * kill_something_info() interprets pid in interesting ways just like kill(2).
1435 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1436 * is probably wrong. Should make it like BSD or SYSV.
1439 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1445 ret = kill_pid_info(sig, info, find_vpid(pid));
1450 read_lock(&tasklist_lock);
1452 ret = __kill_pgrp_info(sig, info,
1453 pid ? find_vpid(-pid) : task_pgrp(current));
1455 int retval = 0, count = 0;
1456 struct task_struct * p;
1458 for_each_process(p) {
1459 if (task_pid_vnr(p) > 1 &&
1460 !same_thread_group(p, current)) {
1461 int err = group_send_sig_info(sig, info, p);
1467 ret = count ? retval : -ESRCH;
1469 read_unlock(&tasklist_lock);
1475 * These are for backward compatibility with the rest of the kernel source.
1478 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1481 * Make sure legacy kernel users don't send in bad values
1482 * (normal paths check this in check_kill_permission).
1484 if (!valid_signal(sig))
1487 return do_send_sig_info(sig, info, p, false);
1490 #define __si_special(priv) \
1491 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1494 send_sig(int sig, struct task_struct *p, int priv)
1496 return send_sig_info(sig, __si_special(priv), p);
1500 force_sig(int sig, struct task_struct *p)
1502 force_sig_info(sig, SEND_SIG_PRIV, p);
1506 * When things go south during signal handling, we
1507 * will force a SIGSEGV. And if the signal that caused
1508 * the problem was already a SIGSEGV, we'll want to
1509 * make sure we don't even try to deliver the signal..
1512 force_sigsegv(int sig, struct task_struct *p)
1514 if (sig == SIGSEGV) {
1515 unsigned long flags;
1516 spin_lock_irqsave(&p->sighand->siglock, flags);
1517 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1518 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1520 force_sig(SIGSEGV, p);
1524 int kill_pgrp(struct pid *pid, int sig, int priv)
1528 read_lock(&tasklist_lock);
1529 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1530 read_unlock(&tasklist_lock);
1534 EXPORT_SYMBOL(kill_pgrp);
1536 int kill_pid(struct pid *pid, int sig, int priv)
1538 return kill_pid_info(sig, __si_special(priv), pid);
1540 EXPORT_SYMBOL(kill_pid);
1543 * These functions support sending signals using preallocated sigqueue
1544 * structures. This is needed "because realtime applications cannot
1545 * afford to lose notifications of asynchronous events, like timer
1546 * expirations or I/O completions". In the case of POSIX Timers
1547 * we allocate the sigqueue structure from the timer_create. If this
1548 * allocation fails we are able to report the failure to the application
1549 * with an EAGAIN error.
1551 struct sigqueue *sigqueue_alloc(void)
1553 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1556 q->flags |= SIGQUEUE_PREALLOC;
1561 void sigqueue_free(struct sigqueue *q)
1563 unsigned long flags;
1564 spinlock_t *lock = ¤t->sighand->siglock;
1566 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1568 * We must hold ->siglock while testing q->list
1569 * to serialize with collect_signal() or with
1570 * __exit_signal()->flush_sigqueue().
1572 spin_lock_irqsave(lock, flags);
1573 q->flags &= ~SIGQUEUE_PREALLOC;
1575 * If it is queued it will be freed when dequeued,
1576 * like the "regular" sigqueue.
1578 if (!list_empty(&q->list))
1580 spin_unlock_irqrestore(lock, flags);
1586 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1588 int sig = q->info.si_signo;
1589 struct sigpending *pending;
1590 unsigned long flags;
1593 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1596 if (!likely(lock_task_sighand(t, &flags)))
1599 ret = 1; /* the signal is ignored */
1600 result = TRACE_SIGNAL_IGNORED;
1601 if (!prepare_signal(sig, t, false))
1605 if (unlikely(!list_empty(&q->list))) {
1607 * If an SI_TIMER entry is already queue just increment
1608 * the overrun count.
1610 BUG_ON(q->info.si_code != SI_TIMER);
1611 q->info.si_overrun++;
1612 result = TRACE_SIGNAL_ALREADY_PENDING;
1615 q->info.si_overrun = 0;
1617 signalfd_notify(t, sig);
1618 pending = group ? &t->signal->shared_pending : &t->pending;
1619 list_add_tail(&q->list, &pending->list);
1620 sigaddset(&pending->signal, sig);
1621 complete_signal(sig, t, group);
1622 result = TRACE_SIGNAL_DELIVERED;
1624 trace_signal_generate(sig, &q->info, t, group, result);
1625 unlock_task_sighand(t, &flags);
1631 * Let a parent know about the death of a child.
1632 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1634 * Returns true if our parent ignored us and so we've switched to
1637 bool do_notify_parent(struct task_struct *tsk, int sig)
1639 struct siginfo info;
1640 unsigned long flags;
1641 struct sighand_struct *psig;
1642 bool autoreap = false;
1643 cputime_t utime, stime;
1647 /* do_notify_parent_cldstop should have been called instead. */
1648 BUG_ON(task_is_stopped_or_traced(tsk));
1650 BUG_ON(!tsk->ptrace &&
1651 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1653 if (sig != SIGCHLD) {
1655 * This is only possible if parent == real_parent.
1656 * Check if it has changed security domain.
1658 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1662 info.si_signo = sig;
1665 * We are under tasklist_lock here so our parent is tied to
1666 * us and cannot change.
1668 * task_active_pid_ns will always return the same pid namespace
1669 * until a task passes through release_task.
1671 * write_lock() currently calls preempt_disable() which is the
1672 * same as rcu_read_lock(), but according to Oleg, this is not
1673 * correct to rely on this
1676 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1677 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1681 task_cputime(tsk, &utime, &stime);
1682 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1683 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1685 info.si_status = tsk->exit_code & 0x7f;
1686 if (tsk->exit_code & 0x80)
1687 info.si_code = CLD_DUMPED;
1688 else if (tsk->exit_code & 0x7f)
1689 info.si_code = CLD_KILLED;
1691 info.si_code = CLD_EXITED;
1692 info.si_status = tsk->exit_code >> 8;
1695 psig = tsk->parent->sighand;
1696 spin_lock_irqsave(&psig->siglock, flags);
1697 if (!tsk->ptrace && sig == SIGCHLD &&
1698 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1699 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1701 * We are exiting and our parent doesn't care. POSIX.1
1702 * defines special semantics for setting SIGCHLD to SIG_IGN
1703 * or setting the SA_NOCLDWAIT flag: we should be reaped
1704 * automatically and not left for our parent's wait4 call.
1705 * Rather than having the parent do it as a magic kind of
1706 * signal handler, we just set this to tell do_exit that we
1707 * can be cleaned up without becoming a zombie. Note that
1708 * we still call __wake_up_parent in this case, because a
1709 * blocked sys_wait4 might now return -ECHILD.
1711 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1712 * is implementation-defined: we do (if you don't want
1713 * it, just use SIG_IGN instead).
1716 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1719 if (valid_signal(sig) && sig)
1720 __group_send_sig_info(sig, &info, tsk->parent);
1721 __wake_up_parent(tsk, tsk->parent);
1722 spin_unlock_irqrestore(&psig->siglock, flags);
1728 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1729 * @tsk: task reporting the state change
1730 * @for_ptracer: the notification is for ptracer
1731 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1733 * Notify @tsk's parent that the stopped/continued state has changed. If
1734 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1735 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1738 * Must be called with tasklist_lock at least read locked.
1740 static void do_notify_parent_cldstop(struct task_struct *tsk,
1741 bool for_ptracer, int why)
1743 struct siginfo info;
1744 unsigned long flags;
1745 struct task_struct *parent;
1746 struct sighand_struct *sighand;
1747 cputime_t utime, stime;
1750 parent = tsk->parent;
1752 tsk = tsk->group_leader;
1753 parent = tsk->real_parent;
1756 info.si_signo = SIGCHLD;
1759 * see comment in do_notify_parent() about the following 4 lines
1762 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1763 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1766 task_cputime(tsk, &utime, &stime);
1767 info.si_utime = cputime_to_clock_t(utime);
1768 info.si_stime = cputime_to_clock_t(stime);
1773 info.si_status = SIGCONT;
1776 info.si_status = tsk->signal->group_exit_code & 0x7f;
1779 info.si_status = tsk->exit_code & 0x7f;
1785 sighand = parent->sighand;
1786 spin_lock_irqsave(&sighand->siglock, flags);
1787 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1788 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1789 __group_send_sig_info(SIGCHLD, &info, parent);
1791 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1793 __wake_up_parent(tsk, parent);
1794 spin_unlock_irqrestore(&sighand->siglock, flags);
1797 static inline int may_ptrace_stop(void)
1799 if (!likely(current->ptrace))
1802 * Are we in the middle of do_coredump?
1803 * If so and our tracer is also part of the coredump stopping
1804 * is a deadlock situation, and pointless because our tracer
1805 * is dead so don't allow us to stop.
1806 * If SIGKILL was already sent before the caller unlocked
1807 * ->siglock we must see ->core_state != NULL. Otherwise it
1808 * is safe to enter schedule().
1810 * This is almost outdated, a task with the pending SIGKILL can't
1811 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1812 * after SIGKILL was already dequeued.
1814 if (unlikely(current->mm->core_state) &&
1815 unlikely(current->mm == current->parent->mm))
1822 * Return non-zero if there is a SIGKILL that should be waking us up.
1823 * Called with the siglock held.
1825 static int sigkill_pending(struct task_struct *tsk)
1827 return sigismember(&tsk->pending.signal, SIGKILL) ||
1828 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1832 * This must be called with current->sighand->siglock held.
1834 * This should be the path for all ptrace stops.
1835 * We always set current->last_siginfo while stopped here.
1836 * That makes it a way to test a stopped process for
1837 * being ptrace-stopped vs being job-control-stopped.
1839 * If we actually decide not to stop at all because the tracer
1840 * is gone, we keep current->exit_code unless clear_code.
1842 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1843 __releases(¤t->sighand->siglock)
1844 __acquires(¤t->sighand->siglock)
1846 bool gstop_done = false;
1848 if (arch_ptrace_stop_needed(exit_code, info)) {
1850 * The arch code has something special to do before a
1851 * ptrace stop. This is allowed to block, e.g. for faults
1852 * on user stack pages. We can't keep the siglock while
1853 * calling arch_ptrace_stop, so we must release it now.
1854 * To preserve proper semantics, we must do this before
1855 * any signal bookkeeping like checking group_stop_count.
1856 * Meanwhile, a SIGKILL could come in before we retake the
1857 * siglock. That must prevent us from sleeping in TASK_TRACED.
1858 * So after regaining the lock, we must check for SIGKILL.
1860 spin_unlock_irq(¤t->sighand->siglock);
1861 arch_ptrace_stop(exit_code, info);
1862 spin_lock_irq(¤t->sighand->siglock);
1863 if (sigkill_pending(current))
1868 * We're committing to trapping. TRACED should be visible before
1869 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1870 * Also, transition to TRACED and updates to ->jobctl should be
1871 * atomic with respect to siglock and should be done after the arch
1872 * hook as siglock is released and regrabbed across it.
1874 set_current_state(TASK_TRACED);
1876 current->last_siginfo = info;
1877 current->exit_code = exit_code;
1880 * If @why is CLD_STOPPED, we're trapping to participate in a group
1881 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1882 * across siglock relocks since INTERRUPT was scheduled, PENDING
1883 * could be clear now. We act as if SIGCONT is received after
1884 * TASK_TRACED is entered - ignore it.
1886 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1887 gstop_done = task_participate_group_stop(current);
1889 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1890 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1891 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1892 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1894 /* entering a trap, clear TRAPPING */
1895 task_clear_jobctl_trapping(current);
1897 spin_unlock_irq(¤t->sighand->siglock);
1898 read_lock(&tasklist_lock);
1899 if (may_ptrace_stop()) {
1901 * Notify parents of the stop.
1903 * While ptraced, there are two parents - the ptracer and
1904 * the real_parent of the group_leader. The ptracer should
1905 * know about every stop while the real parent is only
1906 * interested in the completion of group stop. The states
1907 * for the two don't interact with each other. Notify
1908 * separately unless they're gonna be duplicates.
1910 do_notify_parent_cldstop(current, true, why);
1911 if (gstop_done && ptrace_reparented(current))
1912 do_notify_parent_cldstop(current, false, why);
1915 * Don't want to allow preemption here, because
1916 * sys_ptrace() needs this task to be inactive.
1918 * XXX: implement read_unlock_no_resched().
1921 read_unlock(&tasklist_lock);
1922 preempt_enable_no_resched();
1923 freezable_schedule();
1926 * By the time we got the lock, our tracer went away.
1927 * Don't drop the lock yet, another tracer may come.
1929 * If @gstop_done, the ptracer went away between group stop
1930 * completion and here. During detach, it would have set
1931 * JOBCTL_STOP_PENDING on us and we'll re-enter
1932 * TASK_STOPPED in do_signal_stop() on return, so notifying
1933 * the real parent of the group stop completion is enough.
1936 do_notify_parent_cldstop(current, false, why);
1938 /* tasklist protects us from ptrace_freeze_traced() */
1939 __set_current_state(TASK_RUNNING);
1941 current->exit_code = 0;
1942 read_unlock(&tasklist_lock);
1946 * We are back. Now reacquire the siglock before touching
1947 * last_siginfo, so that we are sure to have synchronized with
1948 * any signal-sending on another CPU that wants to examine it.
1950 spin_lock_irq(¤t->sighand->siglock);
1951 current->last_siginfo = NULL;
1953 /* LISTENING can be set only during STOP traps, clear it */
1954 current->jobctl &= ~JOBCTL_LISTENING;
1957 * Queued signals ignored us while we were stopped for tracing.
1958 * So check for any that we should take before resuming user mode.
1959 * This sets TIF_SIGPENDING, but never clears it.
1961 recalc_sigpending_tsk(current);
1964 static void ptrace_do_notify(int signr, int exit_code, int why)
1968 memset(&info, 0, sizeof info);
1969 info.si_signo = signr;
1970 info.si_code = exit_code;
1971 info.si_pid = task_pid_vnr(current);
1972 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1974 /* Let the debugger run. */
1975 ptrace_stop(exit_code, why, 1, &info);
1978 void ptrace_notify(int exit_code)
1980 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1981 if (unlikely(current->task_works))
1984 spin_lock_irq(¤t->sighand->siglock);
1985 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1986 spin_unlock_irq(¤t->sighand->siglock);
1990 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1991 * @signr: signr causing group stop if initiating
1993 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1994 * and participate in it. If already set, participate in the existing
1995 * group stop. If participated in a group stop (and thus slept), %true is
1996 * returned with siglock released.
1998 * If ptraced, this function doesn't handle stop itself. Instead,
1999 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2000 * untouched. The caller must ensure that INTERRUPT trap handling takes
2001 * places afterwards.
2004 * Must be called with @current->sighand->siglock held, which is released
2008 * %false if group stop is already cancelled or ptrace trap is scheduled.
2009 * %true if participated in group stop.
2011 static bool do_signal_stop(int signr)
2012 __releases(¤t->sighand->siglock)
2014 struct signal_struct *sig = current->signal;
2016 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2017 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2018 struct task_struct *t;
2020 /* signr will be recorded in task->jobctl for retries */
2021 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2023 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2024 unlikely(signal_group_exit(sig)))
2027 * There is no group stop already in progress. We must
2030 * While ptraced, a task may be resumed while group stop is
2031 * still in effect and then receive a stop signal and
2032 * initiate another group stop. This deviates from the
2033 * usual behavior as two consecutive stop signals can't
2034 * cause two group stops when !ptraced. That is why we
2035 * also check !task_is_stopped(t) below.
2037 * The condition can be distinguished by testing whether
2038 * SIGNAL_STOP_STOPPED is already set. Don't generate
2039 * group_exit_code in such case.
2041 * This is not necessary for SIGNAL_STOP_CONTINUED because
2042 * an intervening stop signal is required to cause two
2043 * continued events regardless of ptrace.
2045 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2046 sig->group_exit_code = signr;
2048 sig->group_stop_count = 0;
2050 if (task_set_jobctl_pending(current, signr | gstop))
2051 sig->group_stop_count++;
2054 while_each_thread(current, t) {
2056 * Setting state to TASK_STOPPED for a group
2057 * stop is always done with the siglock held,
2058 * so this check has no races.
2060 if (!task_is_stopped(t) &&
2061 task_set_jobctl_pending(t, signr | gstop)) {
2062 sig->group_stop_count++;
2063 if (likely(!(t->ptrace & PT_SEIZED)))
2064 signal_wake_up(t, 0);
2066 ptrace_trap_notify(t);
2071 if (likely(!current->ptrace)) {
2075 * If there are no other threads in the group, or if there
2076 * is a group stop in progress and we are the last to stop,
2077 * report to the parent.
2079 if (task_participate_group_stop(current))
2080 notify = CLD_STOPPED;
2082 __set_current_state(TASK_STOPPED);
2083 spin_unlock_irq(¤t->sighand->siglock);
2086 * Notify the parent of the group stop completion. Because
2087 * we're not holding either the siglock or tasklist_lock
2088 * here, ptracer may attach inbetween; however, this is for
2089 * group stop and should always be delivered to the real
2090 * parent of the group leader. The new ptracer will get
2091 * its notification when this task transitions into
2095 read_lock(&tasklist_lock);
2096 do_notify_parent_cldstop(current, false, notify);
2097 read_unlock(&tasklist_lock);
2100 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2101 freezable_schedule();
2105 * While ptraced, group stop is handled by STOP trap.
2106 * Schedule it and let the caller deal with it.
2108 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2114 * do_jobctl_trap - take care of ptrace jobctl traps
2116 * When PT_SEIZED, it's used for both group stop and explicit
2117 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2118 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2119 * the stop signal; otherwise, %SIGTRAP.
2121 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2122 * number as exit_code and no siginfo.
2125 * Must be called with @current->sighand->siglock held, which may be
2126 * released and re-acquired before returning with intervening sleep.
2128 static void do_jobctl_trap(void)
2130 struct signal_struct *signal = current->signal;
2131 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2133 if (current->ptrace & PT_SEIZED) {
2134 if (!signal->group_stop_count &&
2135 !(signal->flags & SIGNAL_STOP_STOPPED))
2137 WARN_ON_ONCE(!signr);
2138 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2141 WARN_ON_ONCE(!signr);
2142 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2143 current->exit_code = 0;
2147 static int ptrace_signal(int signr, siginfo_t *info)
2149 ptrace_signal_deliver();
2151 * We do not check sig_kernel_stop(signr) but set this marker
2152 * unconditionally because we do not know whether debugger will
2153 * change signr. This flag has no meaning unless we are going
2154 * to stop after return from ptrace_stop(). In this case it will
2155 * be checked in do_signal_stop(), we should only stop if it was
2156 * not cleared by SIGCONT while we were sleeping. See also the
2157 * comment in dequeue_signal().
2159 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2160 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2162 /* We're back. Did the debugger cancel the sig? */
2163 signr = current->exit_code;
2167 current->exit_code = 0;
2170 * Update the siginfo structure if the signal has
2171 * changed. If the debugger wanted something
2172 * specific in the siginfo structure then it should
2173 * have updated *info via PTRACE_SETSIGINFO.
2175 if (signr != info->si_signo) {
2176 info->si_signo = signr;
2178 info->si_code = SI_USER;
2180 info->si_pid = task_pid_vnr(current->parent);
2181 info->si_uid = from_kuid_munged(current_user_ns(),
2182 task_uid(current->parent));
2186 /* If the (new) signal is now blocked, requeue it. */
2187 if (sigismember(¤t->blocked, signr)) {
2188 specific_send_sig_info(signr, info, current);
2195 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2196 struct pt_regs *regs, void *cookie)
2198 struct sighand_struct *sighand = current->sighand;
2199 struct signal_struct *signal = current->signal;
2202 if (unlikely(current->task_works))
2205 if (unlikely(uprobe_deny_signal()))
2209 * Do this once, we can't return to user-mode if freezing() == T.
2210 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2211 * thus do not need another check after return.
2216 spin_lock_irq(&sighand->siglock);
2218 * Every stopped thread goes here after wakeup. Check to see if
2219 * we should notify the parent, prepare_signal(SIGCONT) encodes
2220 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2222 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2225 if (signal->flags & SIGNAL_CLD_CONTINUED)
2226 why = CLD_CONTINUED;
2230 signal->flags &= ~SIGNAL_CLD_MASK;
2232 spin_unlock_irq(&sighand->siglock);
2235 * Notify the parent that we're continuing. This event is
2236 * always per-process and doesn't make whole lot of sense
2237 * for ptracers, who shouldn't consume the state via
2238 * wait(2) either, but, for backward compatibility, notify
2239 * the ptracer of the group leader too unless it's gonna be
2242 read_lock(&tasklist_lock);
2243 do_notify_parent_cldstop(current, false, why);
2245 if (ptrace_reparented(current->group_leader))
2246 do_notify_parent_cldstop(current->group_leader,
2248 read_unlock(&tasklist_lock);
2254 struct k_sigaction *ka;
2256 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2260 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2262 spin_unlock_irq(&sighand->siglock);
2266 signr = dequeue_signal(current, ¤t->blocked, info);
2269 break; /* will return 0 */
2271 if (unlikely(current->ptrace) && signr != SIGKILL) {
2272 signr = ptrace_signal(signr, info);
2277 ka = &sighand->action[signr-1];
2279 /* Trace actually delivered signals. */
2280 trace_signal_deliver(signr, info, ka);
2282 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2284 if (ka->sa.sa_handler != SIG_DFL) {
2285 /* Run the handler. */
2288 if (ka->sa.sa_flags & SA_ONESHOT)
2289 ka->sa.sa_handler = SIG_DFL;
2291 break; /* will return non-zero "signr" value */
2295 * Now we are doing the default action for this signal.
2297 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2301 * Global init gets no signals it doesn't want.
2302 * Container-init gets no signals it doesn't want from same
2305 * Note that if global/container-init sees a sig_kernel_only()
2306 * signal here, the signal must have been generated internally
2307 * or must have come from an ancestor namespace. In either
2308 * case, the signal cannot be dropped.
2310 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2311 !sig_kernel_only(signr))
2314 if (sig_kernel_stop(signr)) {
2316 * The default action is to stop all threads in
2317 * the thread group. The job control signals
2318 * do nothing in an orphaned pgrp, but SIGSTOP
2319 * always works. Note that siglock needs to be
2320 * dropped during the call to is_orphaned_pgrp()
2321 * because of lock ordering with tasklist_lock.
2322 * This allows an intervening SIGCONT to be posted.
2323 * We need to check for that and bail out if necessary.
2325 if (signr != SIGSTOP) {
2326 spin_unlock_irq(&sighand->siglock);
2328 /* signals can be posted during this window */
2330 if (is_current_pgrp_orphaned())
2333 spin_lock_irq(&sighand->siglock);
2336 if (likely(do_signal_stop(info->si_signo))) {
2337 /* It released the siglock. */
2342 * We didn't actually stop, due to a race
2343 * with SIGCONT or something like that.
2348 spin_unlock_irq(&sighand->siglock);
2351 * Anything else is fatal, maybe with a core dump.
2353 current->flags |= PF_SIGNALED;
2355 if (sig_kernel_coredump(signr)) {
2356 if (print_fatal_signals)
2357 print_fatal_signal(info->si_signo);
2358 proc_coredump_connector(current);
2360 * If it was able to dump core, this kills all
2361 * other threads in the group and synchronizes with
2362 * their demise. If we lost the race with another
2363 * thread getting here, it set group_exit_code
2364 * first and our do_group_exit call below will use
2365 * that value and ignore the one we pass it.
2371 * Death signals, no core dump.
2373 do_group_exit(info->si_signo);
2376 spin_unlock_irq(&sighand->siglock);
2381 * signal_delivered -
2382 * @sig: number of signal being delivered
2383 * @info: siginfo_t of signal being delivered
2384 * @ka: sigaction setting that chose the handler
2385 * @regs: user register state
2386 * @stepping: nonzero if debugger single-step or block-step in use
2388 * This function should be called when a signal has successfully been
2389 * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
2390 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2391 * is set in @ka->sa.sa_flags. Tracing is notified.
2393 void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
2394 struct pt_regs *regs, int stepping)
2398 /* A signal was successfully delivered, and the
2399 saved sigmask was stored on the signal frame,
2400 and will be restored by sigreturn. So we can
2401 simply clear the restore sigmask flag. */
2402 clear_restore_sigmask();
2404 sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask);
2405 if (!(ka->sa.sa_flags & SA_NODEFER))
2406 sigaddset(&blocked, sig);
2407 set_current_blocked(&blocked);
2408 tracehook_signal_handler(sig, info, ka, regs, stepping);
2411 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2414 force_sigsegv(ksig->sig, current);
2416 signal_delivered(ksig->sig, &ksig->info, &ksig->ka,
2417 signal_pt_regs(), stepping);
2421 * It could be that complete_signal() picked us to notify about the
2422 * group-wide signal. Other threads should be notified now to take
2423 * the shared signals in @which since we will not.
2425 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2428 struct task_struct *t;
2430 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2431 if (sigisemptyset(&retarget))
2435 while_each_thread(tsk, t) {
2436 if (t->flags & PF_EXITING)
2439 if (!has_pending_signals(&retarget, &t->blocked))
2441 /* Remove the signals this thread can handle. */
2442 sigandsets(&retarget, &retarget, &t->blocked);
2444 if (!signal_pending(t))
2445 signal_wake_up(t, 0);
2447 if (sigisemptyset(&retarget))
2452 void exit_signals(struct task_struct *tsk)
2458 * @tsk is about to have PF_EXITING set - lock out users which
2459 * expect stable threadgroup.
2461 threadgroup_change_begin(tsk);
2463 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2464 tsk->flags |= PF_EXITING;
2465 threadgroup_change_end(tsk);
2469 spin_lock_irq(&tsk->sighand->siglock);
2471 * From now this task is not visible for group-wide signals,
2472 * see wants_signal(), do_signal_stop().
2474 tsk->flags |= PF_EXITING;
2476 threadgroup_change_end(tsk);
2478 if (!signal_pending(tsk))
2481 unblocked = tsk->blocked;
2482 signotset(&unblocked);
2483 retarget_shared_pending(tsk, &unblocked);
2485 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2486 task_participate_group_stop(tsk))
2487 group_stop = CLD_STOPPED;
2489 spin_unlock_irq(&tsk->sighand->siglock);
2492 * If group stop has completed, deliver the notification. This
2493 * should always go to the real parent of the group leader.
2495 if (unlikely(group_stop)) {
2496 read_lock(&tasklist_lock);
2497 do_notify_parent_cldstop(tsk, false, group_stop);
2498 read_unlock(&tasklist_lock);
2502 EXPORT_SYMBOL(recalc_sigpending);
2503 EXPORT_SYMBOL_GPL(dequeue_signal);
2504 EXPORT_SYMBOL(flush_signals);
2505 EXPORT_SYMBOL(force_sig);
2506 EXPORT_SYMBOL(send_sig);
2507 EXPORT_SYMBOL(send_sig_info);
2508 EXPORT_SYMBOL(sigprocmask);
2509 EXPORT_SYMBOL(block_all_signals);
2510 EXPORT_SYMBOL(unblock_all_signals);
2514 * System call entry points.
2518 * sys_restart_syscall - restart a system call
2520 SYSCALL_DEFINE0(restart_syscall)
2522 struct restart_block *restart = ¤t_thread_info()->restart_block;
2523 return restart->fn(restart);
2526 long do_no_restart_syscall(struct restart_block *param)
2531 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2533 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2534 sigset_t newblocked;
2535 /* A set of now blocked but previously unblocked signals. */
2536 sigandnsets(&newblocked, newset, ¤t->blocked);
2537 retarget_shared_pending(tsk, &newblocked);
2539 tsk->blocked = *newset;
2540 recalc_sigpending();
2544 * set_current_blocked - change current->blocked mask
2547 * It is wrong to change ->blocked directly, this helper should be used
2548 * to ensure the process can't miss a shared signal we are going to block.
2550 void set_current_blocked(sigset_t *newset)
2552 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2553 __set_current_blocked(newset);
2556 void __set_current_blocked(const sigset_t *newset)
2558 struct task_struct *tsk = current;
2560 spin_lock_irq(&tsk->sighand->siglock);
2561 __set_task_blocked(tsk, newset);
2562 spin_unlock_irq(&tsk->sighand->siglock);
2566 * This is also useful for kernel threads that want to temporarily
2567 * (or permanently) block certain signals.
2569 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2570 * interface happily blocks "unblockable" signals like SIGKILL
2573 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2575 struct task_struct *tsk = current;
2578 /* Lockless, only current can change ->blocked, never from irq */
2580 *oldset = tsk->blocked;
2584 sigorsets(&newset, &tsk->blocked, set);
2587 sigandnsets(&newset, &tsk->blocked, set);
2596 __set_current_blocked(&newset);
2601 * sys_rt_sigprocmask - change the list of currently blocked signals
2602 * @how: whether to add, remove, or set signals
2603 * @nset: stores pending signals
2604 * @oset: previous value of signal mask if non-null
2605 * @sigsetsize: size of sigset_t type
2607 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2608 sigset_t __user *, oset, size_t, sigsetsize)
2610 sigset_t old_set, new_set;
2613 /* XXX: Don't preclude handling different sized sigset_t's. */
2614 if (sigsetsize != sizeof(sigset_t))
2617 old_set = current->blocked;
2620 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2622 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2624 error = sigprocmask(how, &new_set, NULL);
2630 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2637 #ifdef CONFIG_COMPAT
2638 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2639 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2642 sigset_t old_set = current->blocked;
2644 /* XXX: Don't preclude handling different sized sigset_t's. */
2645 if (sigsetsize != sizeof(sigset_t))
2649 compat_sigset_t new32;
2652 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2655 sigset_from_compat(&new_set, &new32);
2656 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2658 error = sigprocmask(how, &new_set, NULL);
2663 compat_sigset_t old32;
2664 sigset_to_compat(&old32, &old_set);
2665 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2670 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2671 (sigset_t __user *)oset, sigsetsize);
2676 static int do_sigpending(void *set, unsigned long sigsetsize)
2678 if (sigsetsize > sizeof(sigset_t))
2681 spin_lock_irq(¤t->sighand->siglock);
2682 sigorsets(set, ¤t->pending.signal,
2683 ¤t->signal->shared_pending.signal);
2684 spin_unlock_irq(¤t->sighand->siglock);
2686 /* Outside the lock because only this thread touches it. */
2687 sigandsets(set, ¤t->blocked, set);
2692 * sys_rt_sigpending - examine a pending signal that has been raised
2694 * @uset: stores pending signals
2695 * @sigsetsize: size of sigset_t type or larger
2697 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2700 int err = do_sigpending(&set, sigsetsize);
2701 if (!err && copy_to_user(uset, &set, sigsetsize))
2706 #ifdef CONFIG_COMPAT
2707 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2708 compat_size_t, sigsetsize)
2712 int err = do_sigpending(&set, sigsetsize);
2714 compat_sigset_t set32;
2715 sigset_to_compat(&set32, &set);
2716 /* we can get here only if sigsetsize <= sizeof(set) */
2717 if (copy_to_user(uset, &set32, sigsetsize))
2722 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2727 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2729 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2733 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2735 if (from->si_code < 0)
2736 return __copy_to_user(to, from, sizeof(siginfo_t))
2739 * If you change siginfo_t structure, please be sure
2740 * this code is fixed accordingly.
2741 * Please remember to update the signalfd_copyinfo() function
2742 * inside fs/signalfd.c too, in case siginfo_t changes.
2743 * It should never copy any pad contained in the structure
2744 * to avoid security leaks, but must copy the generic
2745 * 3 ints plus the relevant union member.
2747 err = __put_user(from->si_signo, &to->si_signo);
2748 err |= __put_user(from->si_errno, &to->si_errno);
2749 err |= __put_user((short)from->si_code, &to->si_code);
2750 switch (from->si_code & __SI_MASK) {
2752 err |= __put_user(from->si_pid, &to->si_pid);
2753 err |= __put_user(from->si_uid, &to->si_uid);
2756 err |= __put_user(from->si_tid, &to->si_tid);
2757 err |= __put_user(from->si_overrun, &to->si_overrun);
2758 err |= __put_user(from->si_ptr, &to->si_ptr);
2761 err |= __put_user(from->si_band, &to->si_band);
2762 err |= __put_user(from->si_fd, &to->si_fd);
2765 err |= __put_user(from->si_addr, &to->si_addr);
2766 #ifdef __ARCH_SI_TRAPNO
2767 err |= __put_user(from->si_trapno, &to->si_trapno);
2769 #ifdef BUS_MCEERR_AO
2771 * Other callers might not initialize the si_lsb field,
2772 * so check explicitly for the right codes here.
2774 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2775 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2779 err |= __put_user(from->si_pid, &to->si_pid);
2780 err |= __put_user(from->si_uid, &to->si_uid);
2781 err |= __put_user(from->si_status, &to->si_status);
2782 err |= __put_user(from->si_utime, &to->si_utime);
2783 err |= __put_user(from->si_stime, &to->si_stime);
2785 case __SI_RT: /* This is not generated by the kernel as of now. */
2786 case __SI_MESGQ: /* But this is */
2787 err |= __put_user(from->si_pid, &to->si_pid);
2788 err |= __put_user(from->si_uid, &to->si_uid);
2789 err |= __put_user(from->si_ptr, &to->si_ptr);
2791 #ifdef __ARCH_SIGSYS
2793 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2794 err |= __put_user(from->si_syscall, &to->si_syscall);
2795 err |= __put_user(from->si_arch, &to->si_arch);
2798 default: /* this is just in case for now ... */
2799 err |= __put_user(from->si_pid, &to->si_pid);
2800 err |= __put_user(from->si_uid, &to->si_uid);
2809 * do_sigtimedwait - wait for queued signals specified in @which
2810 * @which: queued signals to wait for
2811 * @info: if non-null, the signal's siginfo is returned here
2812 * @ts: upper bound on process time suspension
2814 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2815 const struct timespec *ts)
2817 struct task_struct *tsk = current;
2818 long timeout = MAX_SCHEDULE_TIMEOUT;
2819 sigset_t mask = *which;
2823 if (!timespec_valid(ts))
2825 timeout = timespec_to_jiffies(ts);
2827 * We can be close to the next tick, add another one
2828 * to ensure we will wait at least the time asked for.
2830 if (ts->tv_sec || ts->tv_nsec)
2835 * Invert the set of allowed signals to get those we want to block.
2837 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2840 spin_lock_irq(&tsk->sighand->siglock);
2841 sig = dequeue_signal(tsk, &mask, info);
2842 if (!sig && timeout) {
2844 * None ready, temporarily unblock those we're interested
2845 * while we are sleeping in so that we'll be awakened when
2846 * they arrive. Unblocking is always fine, we can avoid
2847 * set_current_blocked().
2849 tsk->real_blocked = tsk->blocked;
2850 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2851 recalc_sigpending();
2852 spin_unlock_irq(&tsk->sighand->siglock);
2854 timeout = freezable_schedule_timeout_interruptible(timeout);
2856 spin_lock_irq(&tsk->sighand->siglock);
2857 __set_task_blocked(tsk, &tsk->real_blocked);
2858 siginitset(&tsk->real_blocked, 0);
2859 sig = dequeue_signal(tsk, &mask, info);
2861 spin_unlock_irq(&tsk->sighand->siglock);
2865 return timeout ? -EINTR : -EAGAIN;
2869 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2871 * @uthese: queued signals to wait for
2872 * @uinfo: if non-null, the signal's siginfo is returned here
2873 * @uts: upper bound on process time suspension
2874 * @sigsetsize: size of sigset_t type
2876 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2877 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2885 /* XXX: Don't preclude handling different sized sigset_t's. */
2886 if (sigsetsize != sizeof(sigset_t))
2889 if (copy_from_user(&these, uthese, sizeof(these)))
2893 if (copy_from_user(&ts, uts, sizeof(ts)))
2897 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2899 if (ret > 0 && uinfo) {
2900 if (copy_siginfo_to_user(uinfo, &info))
2908 * sys_kill - send a signal to a process
2909 * @pid: the PID of the process
2910 * @sig: signal to be sent
2912 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2914 struct siginfo info;
2916 info.si_signo = sig;
2918 info.si_code = SI_USER;
2919 info.si_pid = task_tgid_vnr(current);
2920 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2922 return kill_something_info(sig, &info, pid);
2926 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2928 struct task_struct *p;
2932 p = find_task_by_vpid(pid);
2933 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2934 error = check_kill_permission(sig, info, p);
2936 * The null signal is a permissions and process existence
2937 * probe. No signal is actually delivered.
2939 if (!error && sig) {
2940 error = do_send_sig_info(sig, info, p, false);
2942 * If lock_task_sighand() failed we pretend the task
2943 * dies after receiving the signal. The window is tiny,
2944 * and the signal is private anyway.
2946 if (unlikely(error == -ESRCH))
2955 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2957 struct siginfo info = {};
2959 info.si_signo = sig;
2961 info.si_code = SI_TKILL;
2962 info.si_pid = task_tgid_vnr(current);
2963 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2965 return do_send_specific(tgid, pid, sig, &info);
2969 * sys_tgkill - send signal to one specific thread
2970 * @tgid: the thread group ID of the thread
2971 * @pid: the PID of the thread
2972 * @sig: signal to be sent
2974 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2975 * exists but it's not belonging to the target process anymore. This
2976 * method solves the problem of threads exiting and PIDs getting reused.
2978 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2980 /* This is only valid for single tasks */
2981 if (pid <= 0 || tgid <= 0)
2984 return do_tkill(tgid, pid, sig);
2988 * sys_tkill - send signal to one specific task
2989 * @pid: the PID of the task
2990 * @sig: signal to be sent
2992 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2994 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2996 /* This is only valid for single tasks */
3000 return do_tkill(0, pid, sig);
3003 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3005 /* Not even root can pretend to send signals from the kernel.
3006 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3008 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3009 (task_pid_vnr(current) != pid)) {
3010 /* We used to allow any < 0 si_code */
3011 WARN_ON_ONCE(info->si_code < 0);
3014 info->si_signo = sig;
3016 /* POSIX.1b doesn't mention process groups. */
3017 return kill_proc_info(sig, info, pid);
3021 * sys_rt_sigqueueinfo - send signal information to a signal
3022 * @pid: the PID of the thread
3023 * @sig: signal to be sent
3024 * @uinfo: signal info to be sent
3026 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3027 siginfo_t __user *, uinfo)
3030 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3032 return do_rt_sigqueueinfo(pid, sig, &info);
3035 #ifdef CONFIG_COMPAT
3036 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3039 struct compat_siginfo __user *, uinfo)
3042 int ret = copy_siginfo_from_user32(&info, uinfo);
3045 return do_rt_sigqueueinfo(pid, sig, &info);
3049 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3051 /* This is only valid for single tasks */
3052 if (pid <= 0 || tgid <= 0)
3055 /* Not even root can pretend to send signals from the kernel.
3056 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3058 if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3059 (task_pid_vnr(current) != pid)) {
3060 /* We used to allow any < 0 si_code */
3061 WARN_ON_ONCE(info->si_code < 0);
3064 info->si_signo = sig;
3066 return do_send_specific(tgid, pid, sig, info);
3069 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3070 siginfo_t __user *, uinfo)
3074 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3077 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3080 #ifdef CONFIG_COMPAT
3081 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3085 struct compat_siginfo __user *, uinfo)
3089 if (copy_siginfo_from_user32(&info, uinfo))
3091 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3095 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3097 struct task_struct *t = current;
3098 struct k_sigaction *k;
3101 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3104 k = &t->sighand->action[sig-1];
3106 spin_lock_irq(¤t->sighand->siglock);
3111 sigdelsetmask(&act->sa.sa_mask,
3112 sigmask(SIGKILL) | sigmask(SIGSTOP));
3116 * "Setting a signal action to SIG_IGN for a signal that is
3117 * pending shall cause the pending signal to be discarded,
3118 * whether or not it is blocked."
3120 * "Setting a signal action to SIG_DFL for a signal that is
3121 * pending and whose default action is to ignore the signal
3122 * (for example, SIGCHLD), shall cause the pending signal to
3123 * be discarded, whether or not it is blocked"
3125 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
3127 sigaddset(&mask, sig);
3128 rm_from_queue_full(&mask, &t->signal->shared_pending);
3130 rm_from_queue_full(&mask, &t->pending);
3131 } while_each_thread(current, t);
3135 spin_unlock_irq(¤t->sighand->siglock);
3140 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3145 oss.ss_sp = (void __user *) current->sas_ss_sp;
3146 oss.ss_size = current->sas_ss_size;
3147 oss.ss_flags = sas_ss_flags(sp);
3155 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3157 error = __get_user(ss_sp, &uss->ss_sp) |
3158 __get_user(ss_flags, &uss->ss_flags) |
3159 __get_user(ss_size, &uss->ss_size);
3164 if (on_sig_stack(sp))
3169 * Note - this code used to test ss_flags incorrectly:
3170 * old code may have been written using ss_flags==0
3171 * to mean ss_flags==SS_ONSTACK (as this was the only
3172 * way that worked) - this fix preserves that older
3175 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3178 if (ss_flags == SS_DISABLE) {
3183 if (ss_size < MINSIGSTKSZ)
3187 current->sas_ss_sp = (unsigned long) ss_sp;
3188 current->sas_ss_size = ss_size;
3194 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3196 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3197 __put_user(oss.ss_size, &uoss->ss_size) |
3198 __put_user(oss.ss_flags, &uoss->ss_flags);
3204 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3206 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3209 int restore_altstack(const stack_t __user *uss)
3211 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3212 /* squash all but EFAULT for now */
3213 return err == -EFAULT ? err : 0;
3216 int __save_altstack(stack_t __user *uss, unsigned long sp)
3218 struct task_struct *t = current;
3219 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3220 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3221 __put_user(t->sas_ss_size, &uss->ss_size);
3224 #ifdef CONFIG_COMPAT
3225 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3226 const compat_stack_t __user *, uss_ptr,
3227 compat_stack_t __user *, uoss_ptr)
3234 compat_stack_t uss32;
3236 memset(&uss, 0, sizeof(stack_t));
3237 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3239 uss.ss_sp = compat_ptr(uss32.ss_sp);
3240 uss.ss_flags = uss32.ss_flags;
3241 uss.ss_size = uss32.ss_size;
3245 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3246 (stack_t __force __user *) &uoss,
3247 compat_user_stack_pointer());
3249 if (ret >= 0 && uoss_ptr) {
3250 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3251 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3252 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3253 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3259 int compat_restore_altstack(const compat_stack_t __user *uss)
3261 int err = compat_sys_sigaltstack(uss, NULL);
3262 /* squash all but -EFAULT for now */
3263 return err == -EFAULT ? err : 0;
3266 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3268 struct task_struct *t = current;
3269 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3270 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3271 __put_user(t->sas_ss_size, &uss->ss_size);
3275 #ifdef __ARCH_WANT_SYS_SIGPENDING
3278 * sys_sigpending - examine pending signals
3279 * @set: where mask of pending signal is returned
3281 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3283 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3288 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3290 * sys_sigprocmask - examine and change blocked signals
3291 * @how: whether to add, remove, or set signals
3292 * @nset: signals to add or remove (if non-null)
3293 * @oset: previous value of signal mask if non-null
3295 * Some platforms have their own version with special arguments;
3296 * others support only sys_rt_sigprocmask.
3299 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3300 old_sigset_t __user *, oset)
3302 old_sigset_t old_set, new_set;
3303 sigset_t new_blocked;
3305 old_set = current->blocked.sig[0];
3308 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3311 new_blocked = current->blocked;
3315 sigaddsetmask(&new_blocked, new_set);
3318 sigdelsetmask(&new_blocked, new_set);
3321 new_blocked.sig[0] = new_set;
3327 set_current_blocked(&new_blocked);
3331 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3337 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3339 #ifndef CONFIG_ODD_RT_SIGACTION
3341 * sys_rt_sigaction - alter an action taken by a process
3342 * @sig: signal to be sent
3343 * @act: new sigaction
3344 * @oact: used to save the previous sigaction
3345 * @sigsetsize: size of sigset_t type
3347 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3348 const struct sigaction __user *, act,
3349 struct sigaction __user *, oact,
3352 struct k_sigaction new_sa, old_sa;
3355 /* XXX: Don't preclude handling different sized sigset_t's. */
3356 if (sigsetsize != sizeof(sigset_t))
3360 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3364 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3367 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3373 #ifdef CONFIG_COMPAT
3374 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3375 const struct compat_sigaction __user *, act,
3376 struct compat_sigaction __user *, oact,
3377 compat_size_t, sigsetsize)
3379 struct k_sigaction new_ka, old_ka;
3380 compat_sigset_t mask;
3381 #ifdef __ARCH_HAS_SA_RESTORER
3382 compat_uptr_t restorer;
3386 /* XXX: Don't preclude handling different sized sigset_t's. */
3387 if (sigsetsize != sizeof(compat_sigset_t))
3391 compat_uptr_t handler;
3392 ret = get_user(handler, &act->sa_handler);
3393 new_ka.sa.sa_handler = compat_ptr(handler);
3394 #ifdef __ARCH_HAS_SA_RESTORER
3395 ret |= get_user(restorer, &act->sa_restorer);
3396 new_ka.sa.sa_restorer = compat_ptr(restorer);
3398 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3399 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3402 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3405 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3407 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3408 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3410 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3411 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3412 #ifdef __ARCH_HAS_SA_RESTORER
3413 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3414 &oact->sa_restorer);
3420 #endif /* !CONFIG_ODD_RT_SIGACTION */
3422 #ifdef CONFIG_OLD_SIGACTION
3423 SYSCALL_DEFINE3(sigaction, int, sig,
3424 const struct old_sigaction __user *, act,
3425 struct old_sigaction __user *, oact)
3427 struct k_sigaction new_ka, old_ka;
3432 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3433 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3434 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3435 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3436 __get_user(mask, &act->sa_mask))
3438 #ifdef __ARCH_HAS_KA_RESTORER
3439 new_ka.ka_restorer = NULL;
3441 siginitset(&new_ka.sa.sa_mask, mask);
3444 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3447 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3448 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3449 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3450 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3451 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3458 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3459 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3460 const struct compat_old_sigaction __user *, act,
3461 struct compat_old_sigaction __user *, oact)
3463 struct k_sigaction new_ka, old_ka;
3465 compat_old_sigset_t mask;
3466 compat_uptr_t handler, restorer;
3469 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3470 __get_user(handler, &act->sa_handler) ||
3471 __get_user(restorer, &act->sa_restorer) ||
3472 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3473 __get_user(mask, &act->sa_mask))
3476 #ifdef __ARCH_HAS_KA_RESTORER
3477 new_ka.ka_restorer = NULL;
3479 new_ka.sa.sa_handler = compat_ptr(handler);
3480 new_ka.sa.sa_restorer = compat_ptr(restorer);
3481 siginitset(&new_ka.sa.sa_mask, mask);
3484 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3487 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3488 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3489 &oact->sa_handler) ||
3490 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3491 &oact->sa_restorer) ||
3492 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3493 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3500 #ifdef CONFIG_SGETMASK_SYSCALL
3503 * For backwards compatibility. Functionality superseded by sigprocmask.
3505 SYSCALL_DEFINE0(sgetmask)
3508 return current->blocked.sig[0];
3511 SYSCALL_DEFINE1(ssetmask, int, newmask)
3513 int old = current->blocked.sig[0];
3516 siginitset(&newset, newmask);
3517 set_current_blocked(&newset);
3521 #endif /* CONFIG_SGETMASK_SYSCALL */
3523 #ifdef __ARCH_WANT_SYS_SIGNAL
3525 * For backwards compatibility. Functionality superseded by sigaction.
3527 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3529 struct k_sigaction new_sa, old_sa;
3532 new_sa.sa.sa_handler = handler;
3533 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3534 sigemptyset(&new_sa.sa.sa_mask);
3536 ret = do_sigaction(sig, &new_sa, &old_sa);
3538 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3540 #endif /* __ARCH_WANT_SYS_SIGNAL */
3542 #ifdef __ARCH_WANT_SYS_PAUSE
3544 SYSCALL_DEFINE0(pause)
3546 while (!signal_pending(current)) {
3547 current->state = TASK_INTERRUPTIBLE;
3550 return -ERESTARTNOHAND;
3555 int sigsuspend(sigset_t *set)
3557 current->saved_sigmask = current->blocked;
3558 set_current_blocked(set);
3560 current->state = TASK_INTERRUPTIBLE;
3562 set_restore_sigmask();
3563 return -ERESTARTNOHAND;
3567 * sys_rt_sigsuspend - replace the signal mask for a value with the
3568 * @unewset value until a signal is received
3569 * @unewset: new signal mask value
3570 * @sigsetsize: size of sigset_t type
3572 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3576 /* XXX: Don't preclude handling different sized sigset_t's. */
3577 if (sigsetsize != sizeof(sigset_t))
3580 if (copy_from_user(&newset, unewset, sizeof(newset)))
3582 return sigsuspend(&newset);
3585 #ifdef CONFIG_COMPAT
3586 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3590 compat_sigset_t newset32;
3592 /* XXX: Don't preclude handling different sized sigset_t's. */
3593 if (sigsetsize != sizeof(sigset_t))
3596 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3598 sigset_from_compat(&newset, &newset32);
3599 return sigsuspend(&newset);
3601 /* on little-endian bitmaps don't care about granularity */
3602 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3607 #ifdef CONFIG_OLD_SIGSUSPEND
3608 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3611 siginitset(&blocked, mask);
3612 return sigsuspend(&blocked);
3615 #ifdef CONFIG_OLD_SIGSUSPEND3
3616 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3619 siginitset(&blocked, mask);
3620 return sigsuspend(&blocked);
3624 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3629 void __init signals_init(void)
3631 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3634 #ifdef CONFIG_KGDB_KDB
3635 #include <linux/kdb.h>
3637 * kdb_send_sig_info - Allows kdb to send signals without exposing
3638 * signal internals. This function checks if the required locks are
3639 * available before calling the main signal code, to avoid kdb
3643 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3645 static struct task_struct *kdb_prev_t;
3647 if (!spin_trylock(&t->sighand->siglock)) {
3648 kdb_printf("Can't do kill command now.\n"
3649 "The sigmask lock is held somewhere else in "
3650 "kernel, try again later\n");
3653 spin_unlock(&t->sighand->siglock);
3654 new_t = kdb_prev_t != t;
3656 if (t->state != TASK_RUNNING && new_t) {
3657 kdb_printf("Process is not RUNNING, sending a signal from "
3658 "kdb risks deadlock\n"
3659 "on the run queue locks. "
3660 "The signal has _not_ been sent.\n"
3661 "Reissue the kill command if you want to risk "
3665 sig = info->si_signo;
3666 if (send_sig_info(sig, info, t))
3667 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3670 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3672 #endif /* CONFIG_KGDB_KDB */