2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
37 #include <linux/aio.h>
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/signal.h>
42 #include <asm/param.h>
43 #include <asm/uaccess.h>
44 #include <asm/unistd.h>
45 #include <asm/siginfo.h>
46 #include <asm/cacheflush.h>
47 #include "audit.h" /* audit_signal_info() */
50 * SLAB caches for signal bits.
53 static struct kmem_cache *sigqueue_cachep;
55 int print_fatal_signals __read_mostly;
57 static void __user *sig_handler(struct task_struct *t, int sig)
59 return t->sighand->action[sig - 1].sa.sa_handler;
62 static int sig_handler_ignored(void __user *handler, int sig)
64 /* Is it explicitly or implicitly ignored? */
65 return handler == SIG_IGN ||
66 (handler == SIG_DFL && sig_kernel_ignore(sig));
69 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
73 handler = sig_handler(t, sig);
75 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
76 handler == SIG_DFL && !force)
79 return sig_handler_ignored(handler, sig);
82 static int sig_ignored(struct task_struct *t, int sig, bool force)
85 * Blocked signals are never ignored, since the
86 * signal handler may change by the time it is
89 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
92 if (!sig_task_ignored(t, sig, force))
96 * Tracers may want to know about even ignored signals.
102 * Re-calculate pending state from the set of locally pending
103 * signals, globally pending signals, and blocked signals.
105 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
110 switch (_NSIG_WORDS) {
112 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
113 ready |= signal->sig[i] &~ blocked->sig[i];
116 case 4: ready = signal->sig[3] &~ blocked->sig[3];
117 ready |= signal->sig[2] &~ blocked->sig[2];
118 ready |= signal->sig[1] &~ blocked->sig[1];
119 ready |= signal->sig[0] &~ blocked->sig[0];
122 case 2: ready = signal->sig[1] &~ blocked->sig[1];
123 ready |= signal->sig[0] &~ blocked->sig[0];
126 case 1: ready = signal->sig[0] &~ blocked->sig[0];
131 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
133 static int recalc_sigpending_tsk(struct task_struct *t)
135 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
136 PENDING(&t->pending, &t->blocked) ||
137 PENDING(&t->signal->shared_pending, &t->blocked)) {
138 set_tsk_thread_flag(t, TIF_SIGPENDING);
142 * We must never clear the flag in another thread, or in current
143 * when it's possible the current syscall is returning -ERESTART*.
144 * So we don't clear it here, and only callers who know they should do.
150 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
151 * This is superfluous when called on current, the wakeup is a harmless no-op.
153 void recalc_sigpending_and_wake(struct task_struct *t)
155 if (recalc_sigpending_tsk(t))
156 signal_wake_up(t, 0);
159 void recalc_sigpending(void)
161 if (!recalc_sigpending_tsk(current) && !freezing(current))
162 clear_thread_flag(TIF_SIGPENDING);
166 /* Given the mask, find the first available signal that should be serviced. */
168 #define SYNCHRONOUS_MASK \
169 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
170 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
172 int next_signal(struct sigpending *pending, sigset_t *mask)
174 unsigned long i, *s, *m, x;
177 s = pending->signal.sig;
181 * Handle the first word specially: it contains the
182 * synchronous signals that need to be dequeued first.
186 if (x & SYNCHRONOUS_MASK)
187 x &= SYNCHRONOUS_MASK;
192 switch (_NSIG_WORDS) {
194 for (i = 1; i < _NSIG_WORDS; ++i) {
198 sig = ffz(~x) + i*_NSIG_BPW + 1;
207 sig = ffz(~x) + _NSIG_BPW + 1;
218 static inline void print_dropped_signal(int sig)
220 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
222 if (!print_fatal_signals)
225 if (!__ratelimit(&ratelimit_state))
228 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
229 current->comm, current->pid, sig);
233 * task_set_jobctl_pending - set jobctl pending bits
235 * @mask: pending bits to set
237 * Clear @mask from @task->jobctl. @mask must be subset of
238 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
239 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
240 * cleared. If @task is already being killed or exiting, this function
244 * Must be called with @task->sighand->siglock held.
247 * %true if @mask is set, %false if made noop because @task was dying.
249 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
251 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
252 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
253 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
255 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
258 if (mask & JOBCTL_STOP_SIGMASK)
259 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
261 task->jobctl |= mask;
266 * task_clear_jobctl_trapping - clear jobctl trapping bit
269 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
270 * Clear it and wake up the ptracer. Note that we don't need any further
271 * locking. @task->siglock guarantees that @task->parent points to the
275 * Must be called with @task->sighand->siglock held.
277 void task_clear_jobctl_trapping(struct task_struct *task)
279 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
280 task->jobctl &= ~JOBCTL_TRAPPING;
281 smp_mb(); /* advised by wake_up_bit() */
282 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
287 * task_clear_jobctl_pending - clear jobctl pending bits
289 * @mask: pending bits to clear
291 * Clear @mask from @task->jobctl. @mask must be subset of
292 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
293 * STOP bits are cleared together.
295 * If clearing of @mask leaves no stop or trap pending, this function calls
296 * task_clear_jobctl_trapping().
299 * Must be called with @task->sighand->siglock held.
301 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
303 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
305 if (mask & JOBCTL_STOP_PENDING)
306 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
308 task->jobctl &= ~mask;
310 if (!(task->jobctl & JOBCTL_PENDING_MASK))
311 task_clear_jobctl_trapping(task);
315 * task_participate_group_stop - participate in a group stop
316 * @task: task participating in a group stop
318 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
319 * Group stop states are cleared and the group stop count is consumed if
320 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
321 * stop, the appropriate %SIGNAL_* flags are set.
324 * Must be called with @task->sighand->siglock held.
327 * %true if group stop completion should be notified to the parent, %false
330 static bool task_participate_group_stop(struct task_struct *task)
332 struct signal_struct *sig = task->signal;
333 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
335 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
337 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
342 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
343 sig->group_stop_count--;
346 * Tell the caller to notify completion iff we are entering into a
347 * fresh group stop. Read comment in do_signal_stop() for details.
349 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
350 sig->flags = SIGNAL_STOP_STOPPED;
357 * allocate a new signal queue record
358 * - this may be called without locks if and only if t == current, otherwise an
359 * appropriate lock must be held to stop the target task from exiting
361 static struct sigqueue *
362 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
364 struct sigqueue *q = NULL;
365 struct user_struct *user;
368 * Protect access to @t credentials. This can go away when all
369 * callers hold rcu read lock.
372 user = get_uid(__task_cred(t)->user);
373 atomic_inc(&user->sigpending);
376 if (override_rlimit ||
377 atomic_read(&user->sigpending) <=
378 task_rlimit(t, RLIMIT_SIGPENDING)) {
379 q = kmem_cache_alloc(sigqueue_cachep, flags);
381 print_dropped_signal(sig);
384 if (unlikely(q == NULL)) {
385 atomic_dec(&user->sigpending);
388 INIT_LIST_HEAD(&q->list);
396 static void __sigqueue_free(struct sigqueue *q)
398 if (q->flags & SIGQUEUE_PREALLOC)
400 atomic_dec(&q->user->sigpending);
402 kmem_cache_free(sigqueue_cachep, q);
405 void flush_sigqueue(struct sigpending *queue)
409 sigemptyset(&queue->signal);
410 while (!list_empty(&queue->list)) {
411 q = list_entry(queue->list.next, struct sigqueue , list);
412 list_del_init(&q->list);
418 * Flush all pending signals for this kthread.
420 void flush_signals(struct task_struct *t)
424 spin_lock_irqsave(&t->sighand->siglock, flags);
425 clear_tsk_thread_flag(t, TIF_SIGPENDING);
426 flush_sigqueue(&t->pending);
427 flush_sigqueue(&t->signal->shared_pending);
428 spin_unlock_irqrestore(&t->sighand->siglock, flags);
431 static void __flush_itimer_signals(struct sigpending *pending)
433 sigset_t signal, retain;
434 struct sigqueue *q, *n;
436 signal = pending->signal;
437 sigemptyset(&retain);
439 list_for_each_entry_safe(q, n, &pending->list, list) {
440 int sig = q->info.si_signo;
442 if (likely(q->info.si_code != SI_TIMER)) {
443 sigaddset(&retain, sig);
445 sigdelset(&signal, sig);
446 list_del_init(&q->list);
451 sigorsets(&pending->signal, &signal, &retain);
454 void flush_itimer_signals(void)
456 struct task_struct *tsk = current;
459 spin_lock_irqsave(&tsk->sighand->siglock, flags);
460 __flush_itimer_signals(&tsk->pending);
461 __flush_itimer_signals(&tsk->signal->shared_pending);
462 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
465 void ignore_signals(struct task_struct *t)
469 for (i = 0; i < _NSIG; ++i)
470 t->sighand->action[i].sa.sa_handler = SIG_IGN;
476 * Flush all handlers for a task.
480 flush_signal_handlers(struct task_struct *t, int force_default)
483 struct k_sigaction *ka = &t->sighand->action[0];
484 for (i = _NSIG ; i != 0 ; i--) {
485 if (force_default || ka->sa.sa_handler != SIG_IGN)
486 ka->sa.sa_handler = SIG_DFL;
488 #ifdef __ARCH_HAS_SA_RESTORER
489 ka->sa.sa_restorer = NULL;
491 sigemptyset(&ka->sa.sa_mask);
496 int unhandled_signal(struct task_struct *tsk, int sig)
498 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
499 if (is_global_init(tsk))
501 if (handler != SIG_IGN && handler != SIG_DFL)
503 /* if ptraced, let the tracer determine */
507 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
509 struct sigqueue *q, *first = NULL;
512 * Collect the siginfo appropriate to this signal. Check if
513 * there is another siginfo for the same signal.
515 list_for_each_entry(q, &list->list, list) {
516 if (q->info.si_signo == sig) {
523 sigdelset(&list->signal, sig);
527 list_del_init(&first->list);
528 copy_siginfo(info, &first->info);
529 __sigqueue_free(first);
532 * Ok, it wasn't in the queue. This must be
533 * a fast-pathed signal or we must have been
534 * out of queue space. So zero out the info.
536 info->si_signo = sig;
538 info->si_code = SI_USER;
544 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
547 int sig = next_signal(pending, mask);
550 collect_signal(sig, pending, info);
555 * Dequeue a signal and return the element to the caller, which is
556 * expected to free it.
558 * All callers have to hold the siglock.
560 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
564 /* We only dequeue private signals from ourselves, we don't let
565 * signalfd steal them
567 signr = __dequeue_signal(&tsk->pending, mask, info);
569 signr = __dequeue_signal(&tsk->signal->shared_pending,
574 * itimers are process shared and we restart periodic
575 * itimers in the signal delivery path to prevent DoS
576 * attacks in the high resolution timer case. This is
577 * compliant with the old way of self-restarting
578 * itimers, as the SIGALRM is a legacy signal and only
579 * queued once. Changing the restart behaviour to
580 * restart the timer in the signal dequeue path is
581 * reducing the timer noise on heavy loaded !highres
584 if (unlikely(signr == SIGALRM)) {
585 struct hrtimer *tmr = &tsk->signal->real_timer;
587 if (!hrtimer_is_queued(tmr) &&
588 tsk->signal->it_real_incr.tv64 != 0) {
589 hrtimer_forward(tmr, tmr->base->get_time(),
590 tsk->signal->it_real_incr);
591 hrtimer_restart(tmr);
600 if (unlikely(sig_kernel_stop(signr))) {
602 * Set a marker that we have dequeued a stop signal. Our
603 * caller might release the siglock and then the pending
604 * stop signal it is about to process is no longer in the
605 * pending bitmasks, but must still be cleared by a SIGCONT
606 * (and overruled by a SIGKILL). So those cases clear this
607 * shared flag after we've set it. Note that this flag may
608 * remain set after the signal we return is ignored or
609 * handled. That doesn't matter because its only purpose
610 * is to alert stop-signal processing code when another
611 * processor has come along and cleared the flag.
613 current->jobctl |= JOBCTL_STOP_DEQUEUED;
615 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
617 * Release the siglock to ensure proper locking order
618 * of timer locks outside of siglocks. Note, we leave
619 * irqs disabled here, since the posix-timers code is
620 * about to disable them again anyway.
622 spin_unlock(&tsk->sighand->siglock);
623 do_schedule_next_timer(info);
624 spin_lock(&tsk->sighand->siglock);
630 * Tell a process that it has a new active signal..
632 * NOTE! we rely on the previous spin_lock to
633 * lock interrupts for us! We can only be called with
634 * "siglock" held, and the local interrupt must
635 * have been disabled when that got acquired!
637 * No need to set need_resched since signal event passing
638 * goes through ->blocked
640 void signal_wake_up_state(struct task_struct *t, unsigned int state)
642 set_tsk_thread_flag(t, TIF_SIGPENDING);
644 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
645 * case. We don't check t->state here because there is a race with it
646 * executing another processor and just now entering stopped state.
647 * By using wake_up_state, we ensure the process will wake up and
648 * handle its death signal.
650 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
655 * Remove signals in mask from the pending set and queue.
656 * Returns 1 if any signals were found.
658 * All callers must be holding the siglock.
660 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
662 struct sigqueue *q, *n;
665 sigandsets(&m, mask, &s->signal);
666 if (sigisemptyset(&m))
669 sigandnsets(&s->signal, &s->signal, mask);
670 list_for_each_entry_safe(q, n, &s->list, list) {
671 if (sigismember(mask, q->info.si_signo)) {
672 list_del_init(&q->list);
679 static inline int is_si_special(const struct siginfo *info)
681 return info <= SEND_SIG_FORCED;
684 static inline bool si_fromuser(const struct siginfo *info)
686 return info == SEND_SIG_NOINFO ||
687 (!is_si_special(info) && SI_FROMUSER(info));
691 * called with RCU read lock from check_kill_permission()
693 static int kill_ok_by_cred(struct task_struct *t)
695 const struct cred *cred = current_cred();
696 const struct cred *tcred = __task_cred(t);
698 if (uid_eq(cred->euid, tcred->suid) ||
699 uid_eq(cred->euid, tcred->uid) ||
700 uid_eq(cred->uid, tcred->suid) ||
701 uid_eq(cred->uid, tcred->uid))
704 if (ns_capable(tcred->user_ns, CAP_KILL))
711 * Bad permissions for sending the signal
712 * - the caller must hold the RCU read lock
714 static int check_kill_permission(int sig, struct siginfo *info,
715 struct task_struct *t)
720 if (!valid_signal(sig))
723 if (!si_fromuser(info))
726 error = audit_signal_info(sig, t); /* Let audit system see the signal */
730 if (!same_thread_group(current, t) &&
731 !kill_ok_by_cred(t)) {
734 sid = task_session(t);
736 * We don't return the error if sid == NULL. The
737 * task was unhashed, the caller must notice this.
739 if (!sid || sid == task_session(current))
746 return security_task_kill(t, info, sig, 0);
750 * ptrace_trap_notify - schedule trap to notify ptracer
751 * @t: tracee wanting to notify tracer
753 * This function schedules sticky ptrace trap which is cleared on the next
754 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
757 * If @t is running, STOP trap will be taken. If trapped for STOP and
758 * ptracer is listening for events, tracee is woken up so that it can
759 * re-trap for the new event. If trapped otherwise, STOP trap will be
760 * eventually taken without returning to userland after the existing traps
761 * are finished by PTRACE_CONT.
764 * Must be called with @task->sighand->siglock held.
766 static void ptrace_trap_notify(struct task_struct *t)
768 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
769 assert_spin_locked(&t->sighand->siglock);
771 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
772 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
776 * Handle magic process-wide effects of stop/continue signals. Unlike
777 * the signal actions, these happen immediately at signal-generation
778 * time regardless of blocking, ignoring, or handling. This does the
779 * actual continuing for SIGCONT, but not the actual stopping for stop
780 * signals. The process stop is done as a signal action for SIG_DFL.
782 * Returns true if the signal should be actually delivered, otherwise
783 * it should be dropped.
785 static bool prepare_signal(int sig, struct task_struct *p, bool force)
787 struct signal_struct *signal = p->signal;
788 struct task_struct *t;
791 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
792 if (!(signal->flags & SIGNAL_GROUP_EXIT))
793 return sig == SIGKILL;
795 * The process is in the middle of dying, nothing to do.
797 } else if (sig_kernel_stop(sig)) {
799 * This is a stop signal. Remove SIGCONT from all queues.
801 siginitset(&flush, sigmask(SIGCONT));
802 flush_sigqueue_mask(&flush, &signal->shared_pending);
803 for_each_thread(p, t)
804 flush_sigqueue_mask(&flush, &t->pending);
805 } else if (sig == SIGCONT) {
808 * Remove all stop signals from all queues, wake all threads.
810 siginitset(&flush, SIG_KERNEL_STOP_MASK);
811 flush_sigqueue_mask(&flush, &signal->shared_pending);
812 for_each_thread(p, t) {
813 flush_sigqueue_mask(&flush, &t->pending);
814 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
815 if (likely(!(t->ptrace & PT_SEIZED)))
816 wake_up_state(t, __TASK_STOPPED);
818 ptrace_trap_notify(t);
822 * Notify the parent with CLD_CONTINUED if we were stopped.
824 * If we were in the middle of a group stop, we pretend it
825 * was already finished, and then continued. Since SIGCHLD
826 * doesn't queue we report only CLD_STOPPED, as if the next
827 * CLD_CONTINUED was dropped.
830 if (signal->flags & SIGNAL_STOP_STOPPED)
831 why |= SIGNAL_CLD_CONTINUED;
832 else if (signal->group_stop_count)
833 why |= SIGNAL_CLD_STOPPED;
837 * The first thread which returns from do_signal_stop()
838 * will take ->siglock, notice SIGNAL_CLD_MASK, and
839 * notify its parent. See get_signal_to_deliver().
841 signal->flags = why | SIGNAL_STOP_CONTINUED;
842 signal->group_stop_count = 0;
843 signal->group_exit_code = 0;
847 return !sig_ignored(p, sig, force);
851 * Test if P wants to take SIG. After we've checked all threads with this,
852 * it's equivalent to finding no threads not blocking SIG. Any threads not
853 * blocking SIG were ruled out because they are not running and already
854 * have pending signals. Such threads will dequeue from the shared queue
855 * as soon as they're available, so putting the signal on the shared queue
856 * will be equivalent to sending it to one such thread.
858 static inline int wants_signal(int sig, struct task_struct *p)
860 if (sigismember(&p->blocked, sig))
862 if (p->flags & PF_EXITING)
866 if (task_is_stopped_or_traced(p))
868 return task_curr(p) || !signal_pending(p);
871 static void complete_signal(int sig, struct task_struct *p, int group)
873 struct signal_struct *signal = p->signal;
874 struct task_struct *t;
877 * Now find a thread we can wake up to take the signal off the queue.
879 * If the main thread wants the signal, it gets first crack.
880 * Probably the least surprising to the average bear.
882 if (wants_signal(sig, p))
884 else if (!group || thread_group_empty(p))
886 * There is just one thread and it does not need to be woken.
887 * It will dequeue unblocked signals before it runs again.
892 * Otherwise try to find a suitable thread.
894 t = signal->curr_target;
895 while (!wants_signal(sig, t)) {
897 if (t == signal->curr_target)
899 * No thread needs to be woken.
900 * Any eligible threads will see
901 * the signal in the queue soon.
905 signal->curr_target = t;
909 * Found a killable thread. If the signal will be fatal,
910 * then start taking the whole group down immediately.
912 if (sig_fatal(p, sig) &&
913 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
914 !sigismember(&t->real_blocked, sig) &&
915 (sig == SIGKILL || !t->ptrace)) {
917 * This signal will be fatal to the whole group.
919 if (!sig_kernel_coredump(sig)) {
921 * Start a group exit and wake everybody up.
922 * This way we don't have other threads
923 * running and doing things after a slower
924 * thread has the fatal signal pending.
926 signal->flags = SIGNAL_GROUP_EXIT;
927 signal->group_exit_code = sig;
928 signal->group_stop_count = 0;
931 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
932 sigaddset(&t->pending.signal, SIGKILL);
933 signal_wake_up(t, 1);
934 } while_each_thread(p, t);
940 * The signal is already in the shared-pending queue.
941 * Tell the chosen thread to wake up and dequeue it.
943 signal_wake_up(t, sig == SIGKILL);
947 static inline int legacy_queue(struct sigpending *signals, int sig)
949 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
952 #ifdef CONFIG_USER_NS
953 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
955 if (current_user_ns() == task_cred_xxx(t, user_ns))
958 if (SI_FROMKERNEL(info))
962 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
963 make_kuid(current_user_ns(), info->si_uid));
967 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
973 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
974 int group, int from_ancestor_ns)
976 struct sigpending *pending;
981 assert_spin_locked(&t->sighand->siglock);
983 result = TRACE_SIGNAL_IGNORED;
984 if (!prepare_signal(sig, t,
985 from_ancestor_ns || (info == SEND_SIG_FORCED)))
988 pending = group ? &t->signal->shared_pending : &t->pending;
990 * Short-circuit ignored signals and support queuing
991 * exactly one non-rt signal, so that we can get more
992 * detailed information about the cause of the signal.
994 result = TRACE_SIGNAL_ALREADY_PENDING;
995 if (legacy_queue(pending, sig))
998 result = TRACE_SIGNAL_DELIVERED;
1000 * fast-pathed signals for kernel-internal things like SIGSTOP
1003 if (info == SEND_SIG_FORCED)
1007 * Real-time signals must be queued if sent by sigqueue, or
1008 * some other real-time mechanism. It is implementation
1009 * defined whether kill() does so. We attempt to do so, on
1010 * the principle of least surprise, but since kill is not
1011 * allowed to fail with EAGAIN when low on memory we just
1012 * make sure at least one signal gets delivered and don't
1013 * pass on the info struct.
1016 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1018 override_rlimit = 0;
1020 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1023 list_add_tail(&q->list, &pending->list);
1024 switch ((unsigned long) info) {
1025 case (unsigned long) SEND_SIG_NOINFO:
1026 q->info.si_signo = sig;
1027 q->info.si_errno = 0;
1028 q->info.si_code = SI_USER;
1029 q->info.si_pid = task_tgid_nr_ns(current,
1030 task_active_pid_ns(t));
1031 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1033 case (unsigned long) SEND_SIG_PRIV:
1034 q->info.si_signo = sig;
1035 q->info.si_errno = 0;
1036 q->info.si_code = SI_KERNEL;
1041 copy_siginfo(&q->info, info);
1042 if (from_ancestor_ns)
1047 userns_fixup_signal_uid(&q->info, t);
1049 } else if (!is_si_special(info)) {
1050 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1052 * Queue overflow, abort. We may abort if the
1053 * signal was rt and sent by user using something
1054 * other than kill().
1056 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1061 * This is a silent loss of information. We still
1062 * send the signal, but the *info bits are lost.
1064 result = TRACE_SIGNAL_LOSE_INFO;
1069 signalfd_notify(t, sig);
1070 sigaddset(&pending->signal, sig);
1071 complete_signal(sig, t, group);
1073 trace_signal_generate(sig, info, t, group, result);
1077 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1080 int from_ancestor_ns = 0;
1082 #ifdef CONFIG_PID_NS
1083 from_ancestor_ns = si_fromuser(info) &&
1084 !task_pid_nr_ns(current, task_active_pid_ns(t));
1087 return __send_signal(sig, info, t, group, from_ancestor_ns);
1090 static void print_fatal_signal(int signr)
1092 struct pt_regs *regs = signal_pt_regs();
1093 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1095 #if defined(__i386__) && !defined(__arch_um__)
1096 printk(KERN_INFO "code at %08lx: ", regs->ip);
1099 for (i = 0; i < 16; i++) {
1102 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1104 printk(KERN_CONT "%02x ", insn);
1107 printk(KERN_CONT "\n");
1114 static int __init setup_print_fatal_signals(char *str)
1116 get_option (&str, &print_fatal_signals);
1121 __setup("print-fatal-signals=", setup_print_fatal_signals);
1124 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1126 return send_signal(sig, info, p, 1);
1130 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1132 return send_signal(sig, info, t, 0);
1135 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1138 unsigned long flags;
1141 if (lock_task_sighand(p, &flags)) {
1142 ret = send_signal(sig, info, p, group);
1143 unlock_task_sighand(p, &flags);
1150 * Force a signal that the process can't ignore: if necessary
1151 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1153 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1154 * since we do not want to have a signal handler that was blocked
1155 * be invoked when user space had explicitly blocked it.
1157 * We don't want to have recursive SIGSEGV's etc, for example,
1158 * that is why we also clear SIGNAL_UNKILLABLE.
1161 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1163 unsigned long int flags;
1164 int ret, blocked, ignored;
1165 struct k_sigaction *action;
1167 spin_lock_irqsave(&t->sighand->siglock, flags);
1168 action = &t->sighand->action[sig-1];
1169 ignored = action->sa.sa_handler == SIG_IGN;
1170 blocked = sigismember(&t->blocked, sig);
1171 if (blocked || ignored) {
1172 action->sa.sa_handler = SIG_DFL;
1174 sigdelset(&t->blocked, sig);
1175 recalc_sigpending_and_wake(t);
1178 if (action->sa.sa_handler == SIG_DFL)
1179 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1180 ret = specific_send_sig_info(sig, info, t);
1181 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1187 * Nuke all other threads in the group.
1189 int zap_other_threads(struct task_struct *p)
1191 struct task_struct *t = p;
1194 p->signal->group_stop_count = 0;
1196 while_each_thread(p, t) {
1197 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1200 /* Don't bother with already dead threads */
1203 sigaddset(&t->pending.signal, SIGKILL);
1204 signal_wake_up(t, 1);
1210 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1211 unsigned long *flags)
1213 struct sighand_struct *sighand;
1217 * Disable interrupts early to avoid deadlocks.
1218 * See rcu_read_unlock() comment header for details.
1220 local_irq_save(*flags);
1222 sighand = rcu_dereference(tsk->sighand);
1223 if (unlikely(sighand == NULL)) {
1225 local_irq_restore(*flags);
1229 * This sighand can be already freed and even reused, but
1230 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1231 * initializes ->siglock: this slab can't go away, it has
1232 * the same object type, ->siglock can't be reinitialized.
1234 * We need to ensure that tsk->sighand is still the same
1235 * after we take the lock, we can race with de_thread() or
1236 * __exit_signal(). In the latter case the next iteration
1237 * must see ->sighand == NULL.
1239 spin_lock(&sighand->siglock);
1240 if (likely(sighand == tsk->sighand)) {
1244 spin_unlock(&sighand->siglock);
1246 local_irq_restore(*flags);
1253 * send signal info to all the members of a group
1255 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1260 ret = check_kill_permission(sig, info, p);
1264 ret = do_send_sig_info(sig, info, p, true);
1270 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1271 * control characters do (^C, ^Z etc)
1272 * - the caller must hold at least a readlock on tasklist_lock
1274 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1276 struct task_struct *p = NULL;
1277 int retval, success;
1281 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1282 int err = group_send_sig_info(sig, info, p);
1285 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1286 return success ? 0 : retval;
1289 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1292 struct task_struct *p;
1296 p = pid_task(pid, PIDTYPE_PID);
1298 error = group_send_sig_info(sig, info, p);
1300 if (likely(!p || error != -ESRCH))
1304 * The task was unhashed in between, try again. If it
1305 * is dead, pid_task() will return NULL, if we race with
1306 * de_thread() it will find the new leader.
1311 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1315 error = kill_pid_info(sig, info, find_vpid(pid));
1320 static int kill_as_cred_perm(const struct cred *cred,
1321 struct task_struct *target)
1323 const struct cred *pcred = __task_cred(target);
1324 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1325 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1330 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1331 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1332 const struct cred *cred, u32 secid)
1335 struct task_struct *p;
1336 unsigned long flags;
1338 if (!valid_signal(sig))
1342 p = pid_task(pid, PIDTYPE_PID);
1347 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1351 ret = security_task_kill(p, info, sig, secid);
1356 if (lock_task_sighand(p, &flags)) {
1357 ret = __send_signal(sig, info, p, 1, 0);
1358 unlock_task_sighand(p, &flags);
1366 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1369 * kill_something_info() interprets pid in interesting ways just like kill(2).
1371 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1372 * is probably wrong. Should make it like BSD or SYSV.
1375 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1381 ret = kill_pid_info(sig, info, find_vpid(pid));
1386 read_lock(&tasklist_lock);
1388 ret = __kill_pgrp_info(sig, info,
1389 pid ? find_vpid(-pid) : task_pgrp(current));
1391 int retval = 0, count = 0;
1392 struct task_struct * p;
1394 for_each_process(p) {
1395 if (task_pid_vnr(p) > 1 &&
1396 !same_thread_group(p, current)) {
1397 int err = group_send_sig_info(sig, info, p);
1403 ret = count ? retval : -ESRCH;
1405 read_unlock(&tasklist_lock);
1411 * These are for backward compatibility with the rest of the kernel source.
1414 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1417 * Make sure legacy kernel users don't send in bad values
1418 * (normal paths check this in check_kill_permission).
1420 if (!valid_signal(sig))
1423 return do_send_sig_info(sig, info, p, false);
1426 /* io_send_sig: send a signal caused by an i/o operation
1428 * Use this helper when a signal is being sent to the task that is responsible
1429 * for aer initiated operation. Most commonly this is used to send signals
1430 * like SIGPIPE or SIGXFS that are the result of attempting a read or write
1431 * operation. This is used by aio to direct a signal to the correct task in
1432 * the case of async operations.
1434 int io_send_sig(int sig)
1436 struct task_struct *task = current;
1437 #if IS_ENABLED(CONFIG_AIO)
1439 task = aio_get_task(task->kiocb);
1441 return send_sig(sig, task, 0);
1443 EXPORT_SYMBOL(io_send_sig);
1445 #define __si_special(priv) \
1446 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1449 send_sig(int sig, struct task_struct *p, int priv)
1451 return send_sig_info(sig, __si_special(priv), p);
1455 force_sig(int sig, struct task_struct *p)
1457 force_sig_info(sig, SEND_SIG_PRIV, p);
1461 * When things go south during signal handling, we
1462 * will force a SIGSEGV. And if the signal that caused
1463 * the problem was already a SIGSEGV, we'll want to
1464 * make sure we don't even try to deliver the signal..
1467 force_sigsegv(int sig, struct task_struct *p)
1469 if (sig == SIGSEGV) {
1470 unsigned long flags;
1471 spin_lock_irqsave(&p->sighand->siglock, flags);
1472 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1473 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1475 force_sig(SIGSEGV, p);
1479 int kill_pgrp(struct pid *pid, int sig, int priv)
1483 read_lock(&tasklist_lock);
1484 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1485 read_unlock(&tasklist_lock);
1489 EXPORT_SYMBOL(kill_pgrp);
1491 int kill_pid(struct pid *pid, int sig, int priv)
1493 return kill_pid_info(sig, __si_special(priv), pid);
1495 EXPORT_SYMBOL(kill_pid);
1498 * These functions support sending signals using preallocated sigqueue
1499 * structures. This is needed "because realtime applications cannot
1500 * afford to lose notifications of asynchronous events, like timer
1501 * expirations or I/O completions". In the case of POSIX Timers
1502 * we allocate the sigqueue structure from the timer_create. If this
1503 * allocation fails we are able to report the failure to the application
1504 * with an EAGAIN error.
1506 struct sigqueue *sigqueue_alloc(void)
1508 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1511 q->flags |= SIGQUEUE_PREALLOC;
1516 void sigqueue_free(struct sigqueue *q)
1518 unsigned long flags;
1519 spinlock_t *lock = ¤t->sighand->siglock;
1521 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1523 * We must hold ->siglock while testing q->list
1524 * to serialize with collect_signal() or with
1525 * __exit_signal()->flush_sigqueue().
1527 spin_lock_irqsave(lock, flags);
1528 q->flags &= ~SIGQUEUE_PREALLOC;
1530 * If it is queued it will be freed when dequeued,
1531 * like the "regular" sigqueue.
1533 if (!list_empty(&q->list))
1535 spin_unlock_irqrestore(lock, flags);
1541 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1543 int sig = q->info.si_signo;
1544 struct sigpending *pending;
1545 unsigned long flags;
1548 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1551 if (!likely(lock_task_sighand(t, &flags)))
1554 ret = 1; /* the signal is ignored */
1555 result = TRACE_SIGNAL_IGNORED;
1556 if (!prepare_signal(sig, t, false))
1560 if (unlikely(!list_empty(&q->list))) {
1562 * If an SI_TIMER entry is already queue just increment
1563 * the overrun count.
1565 BUG_ON(q->info.si_code != SI_TIMER);
1566 q->info.si_overrun++;
1567 result = TRACE_SIGNAL_ALREADY_PENDING;
1570 q->info.si_overrun = 0;
1572 signalfd_notify(t, sig);
1573 pending = group ? &t->signal->shared_pending : &t->pending;
1574 list_add_tail(&q->list, &pending->list);
1575 sigaddset(&pending->signal, sig);
1576 complete_signal(sig, t, group);
1577 result = TRACE_SIGNAL_DELIVERED;
1579 trace_signal_generate(sig, &q->info, t, group, result);
1580 unlock_task_sighand(t, &flags);
1586 * Let a parent know about the death of a child.
1587 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1589 * Returns true if our parent ignored us and so we've switched to
1592 bool do_notify_parent(struct task_struct *tsk, int sig)
1594 struct siginfo info;
1595 unsigned long flags;
1596 struct sighand_struct *psig;
1597 bool autoreap = false;
1598 cputime_t utime, stime;
1602 /* do_notify_parent_cldstop should have been called instead. */
1603 BUG_ON(task_is_stopped_or_traced(tsk));
1605 BUG_ON(!tsk->ptrace &&
1606 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1608 if (sig != SIGCHLD) {
1610 * This is only possible if parent == real_parent.
1611 * Check if it has changed security domain.
1613 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1617 info.si_signo = sig;
1620 * We are under tasklist_lock here so our parent is tied to
1621 * us and cannot change.
1623 * task_active_pid_ns will always return the same pid namespace
1624 * until a task passes through release_task.
1626 * write_lock() currently calls preempt_disable() which is the
1627 * same as rcu_read_lock(), but according to Oleg, this is not
1628 * correct to rely on this
1631 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1632 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1636 task_cputime(tsk, &utime, &stime);
1637 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1638 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1640 info.si_status = tsk->exit_code & 0x7f;
1641 if (tsk->exit_code & 0x80)
1642 info.si_code = CLD_DUMPED;
1643 else if (tsk->exit_code & 0x7f)
1644 info.si_code = CLD_KILLED;
1646 info.si_code = CLD_EXITED;
1647 info.si_status = tsk->exit_code >> 8;
1650 psig = tsk->parent->sighand;
1651 spin_lock_irqsave(&psig->siglock, flags);
1652 if (!tsk->ptrace && sig == SIGCHLD &&
1653 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1654 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1656 * We are exiting and our parent doesn't care. POSIX.1
1657 * defines special semantics for setting SIGCHLD to SIG_IGN
1658 * or setting the SA_NOCLDWAIT flag: we should be reaped
1659 * automatically and not left for our parent's wait4 call.
1660 * Rather than having the parent do it as a magic kind of
1661 * signal handler, we just set this to tell do_exit that we
1662 * can be cleaned up without becoming a zombie. Note that
1663 * we still call __wake_up_parent in this case, because a
1664 * blocked sys_wait4 might now return -ECHILD.
1666 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1667 * is implementation-defined: we do (if you don't want
1668 * it, just use SIG_IGN instead).
1671 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1674 if (valid_signal(sig) && sig)
1675 __group_send_sig_info(sig, &info, tsk->parent);
1676 __wake_up_parent(tsk, tsk->parent);
1677 spin_unlock_irqrestore(&psig->siglock, flags);
1683 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1684 * @tsk: task reporting the state change
1685 * @for_ptracer: the notification is for ptracer
1686 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1688 * Notify @tsk's parent that the stopped/continued state has changed. If
1689 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1690 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1693 * Must be called with tasklist_lock at least read locked.
1695 static void do_notify_parent_cldstop(struct task_struct *tsk,
1696 bool for_ptracer, int why)
1698 struct siginfo info;
1699 unsigned long flags;
1700 struct task_struct *parent;
1701 struct sighand_struct *sighand;
1702 cputime_t utime, stime;
1705 parent = tsk->parent;
1707 tsk = tsk->group_leader;
1708 parent = tsk->real_parent;
1711 info.si_signo = SIGCHLD;
1714 * see comment in do_notify_parent() about the following 4 lines
1717 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1718 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1721 task_cputime(tsk, &utime, &stime);
1722 info.si_utime = cputime_to_clock_t(utime);
1723 info.si_stime = cputime_to_clock_t(stime);
1728 info.si_status = SIGCONT;
1731 info.si_status = tsk->signal->group_exit_code & 0x7f;
1734 info.si_status = tsk->exit_code & 0x7f;
1740 sighand = parent->sighand;
1741 spin_lock_irqsave(&sighand->siglock, flags);
1742 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1743 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1744 __group_send_sig_info(SIGCHLD, &info, parent);
1746 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1748 __wake_up_parent(tsk, parent);
1749 spin_unlock_irqrestore(&sighand->siglock, flags);
1752 static inline int may_ptrace_stop(void)
1754 if (!likely(current->ptrace))
1757 * Are we in the middle of do_coredump?
1758 * If so and our tracer is also part of the coredump stopping
1759 * is a deadlock situation, and pointless because our tracer
1760 * is dead so don't allow us to stop.
1761 * If SIGKILL was already sent before the caller unlocked
1762 * ->siglock we must see ->core_state != NULL. Otherwise it
1763 * is safe to enter schedule().
1765 * This is almost outdated, a task with the pending SIGKILL can't
1766 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1767 * after SIGKILL was already dequeued.
1769 if (unlikely(current->mm->core_state) &&
1770 unlikely(current->mm == current->parent->mm))
1777 * Return non-zero if there is a SIGKILL that should be waking us up.
1778 * Called with the siglock held.
1780 static int sigkill_pending(struct task_struct *tsk)
1782 return sigismember(&tsk->pending.signal, SIGKILL) ||
1783 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1787 * This must be called with current->sighand->siglock held.
1789 * This should be the path for all ptrace stops.
1790 * We always set current->last_siginfo while stopped here.
1791 * That makes it a way to test a stopped process for
1792 * being ptrace-stopped vs being job-control-stopped.
1794 * If we actually decide not to stop at all because the tracer
1795 * is gone, we keep current->exit_code unless clear_code.
1797 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1798 __releases(¤t->sighand->siglock)
1799 __acquires(¤t->sighand->siglock)
1801 bool gstop_done = false;
1803 if (arch_ptrace_stop_needed(exit_code, info)) {
1805 * The arch code has something special to do before a
1806 * ptrace stop. This is allowed to block, e.g. for faults
1807 * on user stack pages. We can't keep the siglock while
1808 * calling arch_ptrace_stop, so we must release it now.
1809 * To preserve proper semantics, we must do this before
1810 * any signal bookkeeping like checking group_stop_count.
1811 * Meanwhile, a SIGKILL could come in before we retake the
1812 * siglock. That must prevent us from sleeping in TASK_TRACED.
1813 * So after regaining the lock, we must check for SIGKILL.
1815 spin_unlock_irq(¤t->sighand->siglock);
1816 arch_ptrace_stop(exit_code, info);
1817 spin_lock_irq(¤t->sighand->siglock);
1818 if (sigkill_pending(current))
1823 * We're committing to trapping. TRACED should be visible before
1824 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1825 * Also, transition to TRACED and updates to ->jobctl should be
1826 * atomic with respect to siglock and should be done after the arch
1827 * hook as siglock is released and regrabbed across it.
1829 set_current_state(TASK_TRACED);
1831 current->last_siginfo = info;
1832 current->exit_code = exit_code;
1835 * If @why is CLD_STOPPED, we're trapping to participate in a group
1836 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1837 * across siglock relocks since INTERRUPT was scheduled, PENDING
1838 * could be clear now. We act as if SIGCONT is received after
1839 * TASK_TRACED is entered - ignore it.
1841 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1842 gstop_done = task_participate_group_stop(current);
1844 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1845 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1846 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1847 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1849 /* entering a trap, clear TRAPPING */
1850 task_clear_jobctl_trapping(current);
1852 spin_unlock_irq(¤t->sighand->siglock);
1853 read_lock(&tasklist_lock);
1854 if (may_ptrace_stop()) {
1856 * Notify parents of the stop.
1858 * While ptraced, there are two parents - the ptracer and
1859 * the real_parent of the group_leader. The ptracer should
1860 * know about every stop while the real parent is only
1861 * interested in the completion of group stop. The states
1862 * for the two don't interact with each other. Notify
1863 * separately unless they're gonna be duplicates.
1865 do_notify_parent_cldstop(current, true, why);
1866 if (gstop_done && ptrace_reparented(current))
1867 do_notify_parent_cldstop(current, false, why);
1870 * Don't want to allow preemption here, because
1871 * sys_ptrace() needs this task to be inactive.
1873 * XXX: implement read_unlock_no_resched().
1876 read_unlock(&tasklist_lock);
1877 preempt_enable_no_resched();
1878 freezable_schedule();
1881 * By the time we got the lock, our tracer went away.
1882 * Don't drop the lock yet, another tracer may come.
1884 * If @gstop_done, the ptracer went away between group stop
1885 * completion and here. During detach, it would have set
1886 * JOBCTL_STOP_PENDING on us and we'll re-enter
1887 * TASK_STOPPED in do_signal_stop() on return, so notifying
1888 * the real parent of the group stop completion is enough.
1891 do_notify_parent_cldstop(current, false, why);
1893 /* tasklist protects us from ptrace_freeze_traced() */
1894 __set_current_state(TASK_RUNNING);
1896 current->exit_code = 0;
1897 read_unlock(&tasklist_lock);
1901 * We are back. Now reacquire the siglock before touching
1902 * last_siginfo, so that we are sure to have synchronized with
1903 * any signal-sending on another CPU that wants to examine it.
1905 spin_lock_irq(¤t->sighand->siglock);
1906 current->last_siginfo = NULL;
1908 /* LISTENING can be set only during STOP traps, clear it */
1909 current->jobctl &= ~JOBCTL_LISTENING;
1912 * Queued signals ignored us while we were stopped for tracing.
1913 * So check for any that we should take before resuming user mode.
1914 * This sets TIF_SIGPENDING, but never clears it.
1916 recalc_sigpending_tsk(current);
1919 static void ptrace_do_notify(int signr, int exit_code, int why)
1923 memset(&info, 0, sizeof info);
1924 info.si_signo = signr;
1925 info.si_code = exit_code;
1926 info.si_pid = task_pid_vnr(current);
1927 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1929 /* Let the debugger run. */
1930 ptrace_stop(exit_code, why, 1, &info);
1933 void ptrace_notify(int exit_code)
1935 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1936 if (unlikely(current->task_works))
1939 spin_lock_irq(¤t->sighand->siglock);
1940 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1941 spin_unlock_irq(¤t->sighand->siglock);
1945 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1946 * @signr: signr causing group stop if initiating
1948 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1949 * and participate in it. If already set, participate in the existing
1950 * group stop. If participated in a group stop (and thus slept), %true is
1951 * returned with siglock released.
1953 * If ptraced, this function doesn't handle stop itself. Instead,
1954 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1955 * untouched. The caller must ensure that INTERRUPT trap handling takes
1956 * places afterwards.
1959 * Must be called with @current->sighand->siglock held, which is released
1963 * %false if group stop is already cancelled or ptrace trap is scheduled.
1964 * %true if participated in group stop.
1966 static bool do_signal_stop(int signr)
1967 __releases(¤t->sighand->siglock)
1969 struct signal_struct *sig = current->signal;
1971 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1972 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1973 struct task_struct *t;
1975 /* signr will be recorded in task->jobctl for retries */
1976 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1978 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1979 unlikely(signal_group_exit(sig)))
1982 * There is no group stop already in progress. We must
1985 * While ptraced, a task may be resumed while group stop is
1986 * still in effect and then receive a stop signal and
1987 * initiate another group stop. This deviates from the
1988 * usual behavior as two consecutive stop signals can't
1989 * cause two group stops when !ptraced. That is why we
1990 * also check !task_is_stopped(t) below.
1992 * The condition can be distinguished by testing whether
1993 * SIGNAL_STOP_STOPPED is already set. Don't generate
1994 * group_exit_code in such case.
1996 * This is not necessary for SIGNAL_STOP_CONTINUED because
1997 * an intervening stop signal is required to cause two
1998 * continued events regardless of ptrace.
2000 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2001 sig->group_exit_code = signr;
2003 sig->group_stop_count = 0;
2005 if (task_set_jobctl_pending(current, signr | gstop))
2006 sig->group_stop_count++;
2009 while_each_thread(current, t) {
2011 * Setting state to TASK_STOPPED for a group
2012 * stop is always done with the siglock held,
2013 * so this check has no races.
2015 if (!task_is_stopped(t) &&
2016 task_set_jobctl_pending(t, signr | gstop)) {
2017 sig->group_stop_count++;
2018 if (likely(!(t->ptrace & PT_SEIZED)))
2019 signal_wake_up(t, 0);
2021 ptrace_trap_notify(t);
2026 if (likely(!current->ptrace)) {
2030 * If there are no other threads in the group, or if there
2031 * is a group stop in progress and we are the last to stop,
2032 * report to the parent.
2034 if (task_participate_group_stop(current))
2035 notify = CLD_STOPPED;
2037 __set_current_state(TASK_STOPPED);
2038 spin_unlock_irq(¤t->sighand->siglock);
2041 * Notify the parent of the group stop completion. Because
2042 * we're not holding either the siglock or tasklist_lock
2043 * here, ptracer may attach inbetween; however, this is for
2044 * group stop and should always be delivered to the real
2045 * parent of the group leader. The new ptracer will get
2046 * its notification when this task transitions into
2050 read_lock(&tasklist_lock);
2051 do_notify_parent_cldstop(current, false, notify);
2052 read_unlock(&tasklist_lock);
2055 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2056 freezable_schedule();
2060 * While ptraced, group stop is handled by STOP trap.
2061 * Schedule it and let the caller deal with it.
2063 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2069 * do_jobctl_trap - take care of ptrace jobctl traps
2071 * When PT_SEIZED, it's used for both group stop and explicit
2072 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2073 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2074 * the stop signal; otherwise, %SIGTRAP.
2076 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2077 * number as exit_code and no siginfo.
2080 * Must be called with @current->sighand->siglock held, which may be
2081 * released and re-acquired before returning with intervening sleep.
2083 static void do_jobctl_trap(void)
2085 struct signal_struct *signal = current->signal;
2086 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2088 if (current->ptrace & PT_SEIZED) {
2089 if (!signal->group_stop_count &&
2090 !(signal->flags & SIGNAL_STOP_STOPPED))
2092 WARN_ON_ONCE(!signr);
2093 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2096 WARN_ON_ONCE(!signr);
2097 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2098 current->exit_code = 0;
2102 static int ptrace_signal(int signr, siginfo_t *info)
2104 ptrace_signal_deliver();
2106 * We do not check sig_kernel_stop(signr) but set this marker
2107 * unconditionally because we do not know whether debugger will
2108 * change signr. This flag has no meaning unless we are going
2109 * to stop after return from ptrace_stop(). In this case it will
2110 * be checked in do_signal_stop(), we should only stop if it was
2111 * not cleared by SIGCONT while we were sleeping. See also the
2112 * comment in dequeue_signal().
2114 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2115 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2117 /* We're back. Did the debugger cancel the sig? */
2118 signr = current->exit_code;
2122 current->exit_code = 0;
2125 * Update the siginfo structure if the signal has
2126 * changed. If the debugger wanted something
2127 * specific in the siginfo structure then it should
2128 * have updated *info via PTRACE_SETSIGINFO.
2130 if (signr != info->si_signo) {
2131 info->si_signo = signr;
2133 info->si_code = SI_USER;
2135 info->si_pid = task_pid_vnr(current->parent);
2136 info->si_uid = from_kuid_munged(current_user_ns(),
2137 task_uid(current->parent));
2141 /* If the (new) signal is now blocked, requeue it. */
2142 if (sigismember(¤t->blocked, signr)) {
2143 specific_send_sig_info(signr, info, current);
2150 int get_signal(struct ksignal *ksig)
2152 struct sighand_struct *sighand = current->sighand;
2153 struct signal_struct *signal = current->signal;
2156 if (unlikely(current->task_works))
2159 if (unlikely(uprobe_deny_signal()))
2163 * Do this once, we can't return to user-mode if freezing() == T.
2164 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2165 * thus do not need another check after return.
2170 spin_lock_irq(&sighand->siglock);
2172 * Every stopped thread goes here after wakeup. Check to see if
2173 * we should notify the parent, prepare_signal(SIGCONT) encodes
2174 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2176 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2179 if (signal->flags & SIGNAL_CLD_CONTINUED)
2180 why = CLD_CONTINUED;
2184 signal->flags &= ~SIGNAL_CLD_MASK;
2186 spin_unlock_irq(&sighand->siglock);
2189 * Notify the parent that we're continuing. This event is
2190 * always per-process and doesn't make whole lot of sense
2191 * for ptracers, who shouldn't consume the state via
2192 * wait(2) either, but, for backward compatibility, notify
2193 * the ptracer of the group leader too unless it's gonna be
2196 read_lock(&tasklist_lock);
2197 do_notify_parent_cldstop(current, false, why);
2199 if (ptrace_reparented(current->group_leader))
2200 do_notify_parent_cldstop(current->group_leader,
2202 read_unlock(&tasklist_lock);
2208 struct k_sigaction *ka;
2210 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2214 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2216 spin_unlock_irq(&sighand->siglock);
2220 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2223 break; /* will return 0 */
2225 if (unlikely(current->ptrace) && signr != SIGKILL) {
2226 signr = ptrace_signal(signr, &ksig->info);
2231 ka = &sighand->action[signr-1];
2233 /* Trace actually delivered signals. */
2234 trace_signal_deliver(signr, &ksig->info, ka);
2236 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2238 if (ka->sa.sa_handler != SIG_DFL) {
2239 /* Run the handler. */
2242 if (ka->sa.sa_flags & SA_ONESHOT)
2243 ka->sa.sa_handler = SIG_DFL;
2245 break; /* will return non-zero "signr" value */
2249 * Now we are doing the default action for this signal.
2251 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2255 * Global init gets no signals it doesn't want.
2256 * Container-init gets no signals it doesn't want from same
2259 * Note that if global/container-init sees a sig_kernel_only()
2260 * signal here, the signal must have been generated internally
2261 * or must have come from an ancestor namespace. In either
2262 * case, the signal cannot be dropped.
2264 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2265 !sig_kernel_only(signr))
2268 if (sig_kernel_stop(signr)) {
2270 * The default action is to stop all threads in
2271 * the thread group. The job control signals
2272 * do nothing in an orphaned pgrp, but SIGSTOP
2273 * always works. Note that siglock needs to be
2274 * dropped during the call to is_orphaned_pgrp()
2275 * because of lock ordering with tasklist_lock.
2276 * This allows an intervening SIGCONT to be posted.
2277 * We need to check for that and bail out if necessary.
2279 if (signr != SIGSTOP) {
2280 spin_unlock_irq(&sighand->siglock);
2282 /* signals can be posted during this window */
2284 if (is_current_pgrp_orphaned())
2287 spin_lock_irq(&sighand->siglock);
2290 if (likely(do_signal_stop(ksig->info.si_signo))) {
2291 /* It released the siglock. */
2296 * We didn't actually stop, due to a race
2297 * with SIGCONT or something like that.
2302 spin_unlock_irq(&sighand->siglock);
2305 * Anything else is fatal, maybe with a core dump.
2307 current->flags |= PF_SIGNALED;
2309 if (sig_kernel_coredump(signr)) {
2310 if (print_fatal_signals)
2311 print_fatal_signal(ksig->info.si_signo);
2312 proc_coredump_connector(current);
2314 * If it was able to dump core, this kills all
2315 * other threads in the group and synchronizes with
2316 * their demise. If we lost the race with another
2317 * thread getting here, it set group_exit_code
2318 * first and our do_group_exit call below will use
2319 * that value and ignore the one we pass it.
2321 do_coredump(&ksig->info);
2325 * Death signals, no core dump.
2327 do_group_exit(ksig->info.si_signo);
2330 spin_unlock_irq(&sighand->siglock);
2333 return ksig->sig > 0;
2337 * signal_delivered -
2338 * @ksig: kernel signal struct
2339 * @stepping: nonzero if debugger single-step or block-step in use
2341 * This function should be called when a signal has successfully been
2342 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2343 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2344 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2346 static void signal_delivered(struct ksignal *ksig, int stepping)
2350 /* A signal was successfully delivered, and the
2351 saved sigmask was stored on the signal frame,
2352 and will be restored by sigreturn. So we can
2353 simply clear the restore sigmask flag. */
2354 clear_restore_sigmask();
2356 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2357 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2358 sigaddset(&blocked, ksig->sig);
2359 set_current_blocked(&blocked);
2360 tracehook_signal_handler(stepping);
2363 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2366 force_sigsegv(ksig->sig, current);
2368 signal_delivered(ksig, stepping);
2372 * It could be that complete_signal() picked us to notify about the
2373 * group-wide signal. Other threads should be notified now to take
2374 * the shared signals in @which since we will not.
2376 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2379 struct task_struct *t;
2381 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2382 if (sigisemptyset(&retarget))
2386 while_each_thread(tsk, t) {
2387 if (t->flags & PF_EXITING)
2390 if (!has_pending_signals(&retarget, &t->blocked))
2392 /* Remove the signals this thread can handle. */
2393 sigandsets(&retarget, &retarget, &t->blocked);
2395 if (!signal_pending(t))
2396 signal_wake_up(t, 0);
2398 if (sigisemptyset(&retarget))
2403 void exit_signals(struct task_struct *tsk)
2409 * @tsk is about to have PF_EXITING set - lock out users which
2410 * expect stable threadgroup.
2412 threadgroup_change_begin(tsk);
2414 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2415 tsk->flags |= PF_EXITING;
2416 threadgroup_change_end(tsk);
2420 spin_lock_irq(&tsk->sighand->siglock);
2422 * From now this task is not visible for group-wide signals,
2423 * see wants_signal(), do_signal_stop().
2425 tsk->flags |= PF_EXITING;
2427 threadgroup_change_end(tsk);
2429 if (!signal_pending(tsk))
2432 unblocked = tsk->blocked;
2433 signotset(&unblocked);
2434 retarget_shared_pending(tsk, &unblocked);
2436 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2437 task_participate_group_stop(tsk))
2438 group_stop = CLD_STOPPED;
2440 spin_unlock_irq(&tsk->sighand->siglock);
2443 * If group stop has completed, deliver the notification. This
2444 * should always go to the real parent of the group leader.
2446 if (unlikely(group_stop)) {
2447 read_lock(&tasklist_lock);
2448 do_notify_parent_cldstop(tsk, false, group_stop);
2449 read_unlock(&tasklist_lock);
2453 EXPORT_SYMBOL(recalc_sigpending);
2454 EXPORT_SYMBOL_GPL(dequeue_signal);
2455 EXPORT_SYMBOL(flush_signals);
2456 EXPORT_SYMBOL(force_sig);
2457 EXPORT_SYMBOL(send_sig);
2458 EXPORT_SYMBOL(send_sig_info);
2459 EXPORT_SYMBOL(sigprocmask);
2462 * System call entry points.
2466 * sys_restart_syscall - restart a system call
2468 SYSCALL_DEFINE0(restart_syscall)
2470 struct restart_block *restart = ¤t->restart_block;
2471 return restart->fn(restart);
2474 long do_no_restart_syscall(struct restart_block *param)
2479 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2481 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2482 sigset_t newblocked;
2483 /* A set of now blocked but previously unblocked signals. */
2484 sigandnsets(&newblocked, newset, ¤t->blocked);
2485 retarget_shared_pending(tsk, &newblocked);
2487 tsk->blocked = *newset;
2488 recalc_sigpending();
2492 * set_current_blocked - change current->blocked mask
2495 * It is wrong to change ->blocked directly, this helper should be used
2496 * to ensure the process can't miss a shared signal we are going to block.
2498 void set_current_blocked(sigset_t *newset)
2500 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2501 __set_current_blocked(newset);
2504 void __set_current_blocked(const sigset_t *newset)
2506 struct task_struct *tsk = current;
2508 spin_lock_irq(&tsk->sighand->siglock);
2509 __set_task_blocked(tsk, newset);
2510 spin_unlock_irq(&tsk->sighand->siglock);
2514 * This is also useful for kernel threads that want to temporarily
2515 * (or permanently) block certain signals.
2517 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2518 * interface happily blocks "unblockable" signals like SIGKILL
2521 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2523 struct task_struct *tsk = current;
2526 /* Lockless, only current can change ->blocked, never from irq */
2528 *oldset = tsk->blocked;
2532 sigorsets(&newset, &tsk->blocked, set);
2535 sigandnsets(&newset, &tsk->blocked, set);
2544 __set_current_blocked(&newset);
2549 * sys_rt_sigprocmask - change the list of currently blocked signals
2550 * @how: whether to add, remove, or set signals
2551 * @nset: stores pending signals
2552 * @oset: previous value of signal mask if non-null
2553 * @sigsetsize: size of sigset_t type
2555 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2556 sigset_t __user *, oset, size_t, sigsetsize)
2558 sigset_t old_set, new_set;
2561 /* XXX: Don't preclude handling different sized sigset_t's. */
2562 if (sigsetsize != sizeof(sigset_t))
2565 old_set = current->blocked;
2568 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2570 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2572 error = sigprocmask(how, &new_set, NULL);
2578 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2585 #ifdef CONFIG_COMPAT
2586 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2587 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2590 sigset_t old_set = current->blocked;
2592 /* XXX: Don't preclude handling different sized sigset_t's. */
2593 if (sigsetsize != sizeof(sigset_t))
2597 compat_sigset_t new32;
2600 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2603 sigset_from_compat(&new_set, &new32);
2604 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2606 error = sigprocmask(how, &new_set, NULL);
2611 compat_sigset_t old32;
2612 sigset_to_compat(&old32, &old_set);
2613 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2618 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2619 (sigset_t __user *)oset, sigsetsize);
2624 static int do_sigpending(void *set, unsigned long sigsetsize)
2626 if (sigsetsize > sizeof(sigset_t))
2629 spin_lock_irq(¤t->sighand->siglock);
2630 sigorsets(set, ¤t->pending.signal,
2631 ¤t->signal->shared_pending.signal);
2632 spin_unlock_irq(¤t->sighand->siglock);
2634 /* Outside the lock because only this thread touches it. */
2635 sigandsets(set, ¤t->blocked, set);
2640 * sys_rt_sigpending - examine a pending signal that has been raised
2642 * @uset: stores pending signals
2643 * @sigsetsize: size of sigset_t type or larger
2645 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2648 int err = do_sigpending(&set, sigsetsize);
2649 if (!err && copy_to_user(uset, &set, sigsetsize))
2654 #ifdef CONFIG_COMPAT
2655 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2656 compat_size_t, sigsetsize)
2660 int err = do_sigpending(&set, sigsetsize);
2662 compat_sigset_t set32;
2663 sigset_to_compat(&set32, &set);
2664 /* we can get here only if sigsetsize <= sizeof(set) */
2665 if (copy_to_user(uset, &set32, sigsetsize))
2670 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2675 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2677 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2681 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2683 if (from->si_code < 0)
2684 return __copy_to_user(to, from, sizeof(siginfo_t))
2687 * If you change siginfo_t structure, please be sure
2688 * this code is fixed accordingly.
2689 * Please remember to update the signalfd_copyinfo() function
2690 * inside fs/signalfd.c too, in case siginfo_t changes.
2691 * It should never copy any pad contained in the structure
2692 * to avoid security leaks, but must copy the generic
2693 * 3 ints plus the relevant union member.
2695 err = __put_user(from->si_signo, &to->si_signo);
2696 err |= __put_user(from->si_errno, &to->si_errno);
2697 err |= __put_user((short)from->si_code, &to->si_code);
2698 switch (from->si_code & __SI_MASK) {
2700 err |= __put_user(from->si_pid, &to->si_pid);
2701 err |= __put_user(from->si_uid, &to->si_uid);
2704 err |= __put_user(from->si_tid, &to->si_tid);
2705 err |= __put_user(from->si_overrun, &to->si_overrun);
2706 err |= __put_user(from->si_ptr, &to->si_ptr);
2709 err |= __put_user(from->si_band, &to->si_band);
2710 err |= __put_user(from->si_fd, &to->si_fd);
2713 err |= __put_user(from->si_addr, &to->si_addr);
2714 #ifdef __ARCH_SI_TRAPNO
2715 err |= __put_user(from->si_trapno, &to->si_trapno);
2717 #ifdef BUS_MCEERR_AO
2719 * Other callers might not initialize the si_lsb field,
2720 * so check explicitly for the right codes here.
2722 if (from->si_signo == SIGBUS &&
2723 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2724 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2727 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2728 err |= __put_user(from->si_lower, &to->si_lower);
2729 err |= __put_user(from->si_upper, &to->si_upper);
2734 err |= __put_user(from->si_pid, &to->si_pid);
2735 err |= __put_user(from->si_uid, &to->si_uid);
2736 err |= __put_user(from->si_status, &to->si_status);
2737 err |= __put_user(from->si_utime, &to->si_utime);
2738 err |= __put_user(from->si_stime, &to->si_stime);
2740 case __SI_RT: /* This is not generated by the kernel as of now. */
2741 case __SI_MESGQ: /* But this is */
2742 err |= __put_user(from->si_pid, &to->si_pid);
2743 err |= __put_user(from->si_uid, &to->si_uid);
2744 err |= __put_user(from->si_ptr, &to->si_ptr);
2746 #ifdef __ARCH_SIGSYS
2748 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2749 err |= __put_user(from->si_syscall, &to->si_syscall);
2750 err |= __put_user(from->si_arch, &to->si_arch);
2753 default: /* this is just in case for now ... */
2754 err |= __put_user(from->si_pid, &to->si_pid);
2755 err |= __put_user(from->si_uid, &to->si_uid);
2764 * do_sigtimedwait - wait for queued signals specified in @which
2765 * @which: queued signals to wait for
2766 * @info: if non-null, the signal's siginfo is returned here
2767 * @ts: upper bound on process time suspension
2769 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2770 const struct timespec *ts)
2772 struct task_struct *tsk = current;
2773 long timeout = MAX_SCHEDULE_TIMEOUT;
2774 sigset_t mask = *which;
2778 if (!timespec_valid(ts))
2780 timeout = timespec_to_jiffies(ts);
2782 * We can be close to the next tick, add another one
2783 * to ensure we will wait at least the time asked for.
2785 if (ts->tv_sec || ts->tv_nsec)
2790 * Invert the set of allowed signals to get those we want to block.
2792 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2795 spin_lock_irq(&tsk->sighand->siglock);
2796 sig = dequeue_signal(tsk, &mask, info);
2797 if (!sig && timeout) {
2799 * None ready, temporarily unblock those we're interested
2800 * while we are sleeping in so that we'll be awakened when
2801 * they arrive. Unblocking is always fine, we can avoid
2802 * set_current_blocked().
2804 tsk->real_blocked = tsk->blocked;
2805 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2806 recalc_sigpending();
2807 spin_unlock_irq(&tsk->sighand->siglock);
2809 timeout = freezable_schedule_timeout_interruptible(timeout);
2811 spin_lock_irq(&tsk->sighand->siglock);
2812 __set_task_blocked(tsk, &tsk->real_blocked);
2813 sigemptyset(&tsk->real_blocked);
2814 sig = dequeue_signal(tsk, &mask, info);
2816 spin_unlock_irq(&tsk->sighand->siglock);
2820 return timeout ? -EINTR : -EAGAIN;
2824 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2826 * @uthese: queued signals to wait for
2827 * @uinfo: if non-null, the signal's siginfo is returned here
2828 * @uts: upper bound on process time suspension
2829 * @sigsetsize: size of sigset_t type
2831 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2832 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2840 /* XXX: Don't preclude handling different sized sigset_t's. */
2841 if (sigsetsize != sizeof(sigset_t))
2844 if (copy_from_user(&these, uthese, sizeof(these)))
2848 if (copy_from_user(&ts, uts, sizeof(ts)))
2852 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2854 if (ret > 0 && uinfo) {
2855 if (copy_siginfo_to_user(uinfo, &info))
2863 * sys_kill - send a signal to a process
2864 * @pid: the PID of the process
2865 * @sig: signal to be sent
2867 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2869 struct siginfo info;
2871 info.si_signo = sig;
2873 info.si_code = SI_USER;
2874 info.si_pid = task_tgid_vnr(current);
2875 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2877 return kill_something_info(sig, &info, pid);
2881 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2883 struct task_struct *p;
2887 p = find_task_by_vpid(pid);
2888 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2889 error = check_kill_permission(sig, info, p);
2891 * The null signal is a permissions and process existence
2892 * probe. No signal is actually delivered.
2894 if (!error && sig) {
2895 error = do_send_sig_info(sig, info, p, false);
2897 * If lock_task_sighand() failed we pretend the task
2898 * dies after receiving the signal. The window is tiny,
2899 * and the signal is private anyway.
2901 if (unlikely(error == -ESRCH))
2910 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2912 struct siginfo info = {};
2914 info.si_signo = sig;
2916 info.si_code = SI_TKILL;
2917 info.si_pid = task_tgid_vnr(current);
2918 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2920 return do_send_specific(tgid, pid, sig, &info);
2924 * sys_tgkill - send signal to one specific thread
2925 * @tgid: the thread group ID of the thread
2926 * @pid: the PID of the thread
2927 * @sig: signal to be sent
2929 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2930 * exists but it's not belonging to the target process anymore. This
2931 * method solves the problem of threads exiting and PIDs getting reused.
2933 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2935 /* This is only valid for single tasks */
2936 if (pid <= 0 || tgid <= 0)
2939 return do_tkill(tgid, pid, sig);
2943 * sys_tkill - send signal to one specific task
2944 * @pid: the PID of the task
2945 * @sig: signal to be sent
2947 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2949 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2951 /* This is only valid for single tasks */
2955 return do_tkill(0, pid, sig);
2958 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2960 /* Not even root can pretend to send signals from the kernel.
2961 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2963 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2964 (task_pid_vnr(current) != pid))
2967 info->si_signo = sig;
2969 /* POSIX.1b doesn't mention process groups. */
2970 return kill_proc_info(sig, info, pid);
2974 * sys_rt_sigqueueinfo - send signal information to a signal
2975 * @pid: the PID of the thread
2976 * @sig: signal to be sent
2977 * @uinfo: signal info to be sent
2979 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2980 siginfo_t __user *, uinfo)
2983 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2985 return do_rt_sigqueueinfo(pid, sig, &info);
2988 #ifdef CONFIG_COMPAT
2989 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
2992 struct compat_siginfo __user *, uinfo)
2994 siginfo_t info = {};
2995 int ret = copy_siginfo_from_user32(&info, uinfo);
2998 return do_rt_sigqueueinfo(pid, sig, &info);
3002 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3004 /* This is only valid for single tasks */
3005 if (pid <= 0 || tgid <= 0)
3008 /* Not even root can pretend to send signals from the kernel.
3009 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3011 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3012 (task_pid_vnr(current) != pid))
3015 info->si_signo = sig;
3017 return do_send_specific(tgid, pid, sig, info);
3020 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3021 siginfo_t __user *, uinfo)
3025 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3028 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3031 #ifdef CONFIG_COMPAT
3032 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3036 struct compat_siginfo __user *, uinfo)
3038 siginfo_t info = {};
3040 if (copy_siginfo_from_user32(&info, uinfo))
3042 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3047 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3049 void kernel_sigaction(int sig, __sighandler_t action)
3051 spin_lock_irq(¤t->sighand->siglock);
3052 current->sighand->action[sig - 1].sa.sa_handler = action;
3053 if (action == SIG_IGN) {
3057 sigaddset(&mask, sig);
3059 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3060 flush_sigqueue_mask(&mask, ¤t->pending);
3061 recalc_sigpending();
3063 spin_unlock_irq(¤t->sighand->siglock);
3065 EXPORT_SYMBOL(kernel_sigaction);
3067 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3069 struct task_struct *p = current, *t;
3070 struct k_sigaction *k;
3073 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3076 k = &p->sighand->action[sig-1];
3078 spin_lock_irq(&p->sighand->siglock);
3083 sigdelsetmask(&act->sa.sa_mask,
3084 sigmask(SIGKILL) | sigmask(SIGSTOP));
3088 * "Setting a signal action to SIG_IGN for a signal that is
3089 * pending shall cause the pending signal to be discarded,
3090 * whether or not it is blocked."
3092 * "Setting a signal action to SIG_DFL for a signal that is
3093 * pending and whose default action is to ignore the signal
3094 * (for example, SIGCHLD), shall cause the pending signal to
3095 * be discarded, whether or not it is blocked"
3097 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3099 sigaddset(&mask, sig);
3100 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3101 for_each_thread(p, t)
3102 flush_sigqueue_mask(&mask, &t->pending);
3106 spin_unlock_irq(&p->sighand->siglock);
3111 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3116 oss.ss_sp = (void __user *) current->sas_ss_sp;
3117 oss.ss_size = current->sas_ss_size;
3118 oss.ss_flags = sas_ss_flags(sp);
3126 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3128 error = __get_user(ss_sp, &uss->ss_sp) |
3129 __get_user(ss_flags, &uss->ss_flags) |
3130 __get_user(ss_size, &uss->ss_size);
3135 if (on_sig_stack(sp))
3140 * Note - this code used to test ss_flags incorrectly:
3141 * old code may have been written using ss_flags==0
3142 * to mean ss_flags==SS_ONSTACK (as this was the only
3143 * way that worked) - this fix preserves that older
3146 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3149 if (ss_flags == SS_DISABLE) {
3154 if (ss_size < MINSIGSTKSZ)
3158 current->sas_ss_sp = (unsigned long) ss_sp;
3159 current->sas_ss_size = ss_size;
3165 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3167 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3168 __put_user(oss.ss_size, &uoss->ss_size) |
3169 __put_user(oss.ss_flags, &uoss->ss_flags);
3175 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3177 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3180 int restore_altstack(const stack_t __user *uss)
3182 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3183 /* squash all but EFAULT for now */
3184 return err == -EFAULT ? err : 0;
3187 int __save_altstack(stack_t __user *uss, unsigned long sp)
3189 struct task_struct *t = current;
3190 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3191 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3192 __put_user(t->sas_ss_size, &uss->ss_size);
3195 #ifdef CONFIG_COMPAT
3196 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3197 const compat_stack_t __user *, uss_ptr,
3198 compat_stack_t __user *, uoss_ptr)
3205 compat_stack_t uss32;
3207 memset(&uss, 0, sizeof(stack_t));
3208 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3210 uss.ss_sp = compat_ptr(uss32.ss_sp);
3211 uss.ss_flags = uss32.ss_flags;
3212 uss.ss_size = uss32.ss_size;
3216 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3217 (stack_t __force __user *) &uoss,
3218 compat_user_stack_pointer());
3220 if (ret >= 0 && uoss_ptr) {
3221 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3222 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3223 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3224 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3230 int compat_restore_altstack(const compat_stack_t __user *uss)
3232 int err = compat_sys_sigaltstack(uss, NULL);
3233 /* squash all but -EFAULT for now */
3234 return err == -EFAULT ? err : 0;
3237 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3239 struct task_struct *t = current;
3240 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3241 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3242 __put_user(t->sas_ss_size, &uss->ss_size);
3246 #ifdef __ARCH_WANT_SYS_SIGPENDING
3249 * sys_sigpending - examine pending signals
3250 * @set: where mask of pending signal is returned
3252 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3254 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3259 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3261 * sys_sigprocmask - examine and change blocked signals
3262 * @how: whether to add, remove, or set signals
3263 * @nset: signals to add or remove (if non-null)
3264 * @oset: previous value of signal mask if non-null
3266 * Some platforms have their own version with special arguments;
3267 * others support only sys_rt_sigprocmask.
3270 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3271 old_sigset_t __user *, oset)
3273 old_sigset_t old_set, new_set;
3274 sigset_t new_blocked;
3276 old_set = current->blocked.sig[0];
3279 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3282 new_blocked = current->blocked;
3286 sigaddsetmask(&new_blocked, new_set);
3289 sigdelsetmask(&new_blocked, new_set);
3292 new_blocked.sig[0] = new_set;
3298 set_current_blocked(&new_blocked);
3302 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3308 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3310 #ifndef CONFIG_ODD_RT_SIGACTION
3312 * sys_rt_sigaction - alter an action taken by a process
3313 * @sig: signal to be sent
3314 * @act: new sigaction
3315 * @oact: used to save the previous sigaction
3316 * @sigsetsize: size of sigset_t type
3318 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3319 const struct sigaction __user *, act,
3320 struct sigaction __user *, oact,
3323 struct k_sigaction new_sa, old_sa;
3326 /* XXX: Don't preclude handling different sized sigset_t's. */
3327 if (sigsetsize != sizeof(sigset_t))
3331 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3335 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3338 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3344 #ifdef CONFIG_COMPAT
3345 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3346 const struct compat_sigaction __user *, act,
3347 struct compat_sigaction __user *, oact,
3348 compat_size_t, sigsetsize)
3350 struct k_sigaction new_ka, old_ka;
3351 compat_sigset_t mask;
3352 #ifdef __ARCH_HAS_SA_RESTORER
3353 compat_uptr_t restorer;
3357 /* XXX: Don't preclude handling different sized sigset_t's. */
3358 if (sigsetsize != sizeof(compat_sigset_t))
3362 compat_uptr_t handler;
3363 ret = get_user(handler, &act->sa_handler);
3364 new_ka.sa.sa_handler = compat_ptr(handler);
3365 #ifdef __ARCH_HAS_SA_RESTORER
3366 ret |= get_user(restorer, &act->sa_restorer);
3367 new_ka.sa.sa_restorer = compat_ptr(restorer);
3369 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3370 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3373 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3376 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3378 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3379 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3381 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3382 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3383 #ifdef __ARCH_HAS_SA_RESTORER
3384 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3385 &oact->sa_restorer);
3391 #endif /* !CONFIG_ODD_RT_SIGACTION */
3393 #ifdef CONFIG_OLD_SIGACTION
3394 SYSCALL_DEFINE3(sigaction, int, sig,
3395 const struct old_sigaction __user *, act,
3396 struct old_sigaction __user *, oact)
3398 struct k_sigaction new_ka, old_ka;
3403 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3404 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3405 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3406 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3407 __get_user(mask, &act->sa_mask))
3409 #ifdef __ARCH_HAS_KA_RESTORER
3410 new_ka.ka_restorer = NULL;
3412 siginitset(&new_ka.sa.sa_mask, mask);
3415 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3418 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3419 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3420 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3421 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3422 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3429 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3430 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3431 const struct compat_old_sigaction __user *, act,
3432 struct compat_old_sigaction __user *, oact)
3434 struct k_sigaction new_ka, old_ka;
3436 compat_old_sigset_t mask;
3437 compat_uptr_t handler, restorer;
3440 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3441 __get_user(handler, &act->sa_handler) ||
3442 __get_user(restorer, &act->sa_restorer) ||
3443 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3444 __get_user(mask, &act->sa_mask))
3447 #ifdef __ARCH_HAS_KA_RESTORER
3448 new_ka.ka_restorer = NULL;
3450 new_ka.sa.sa_handler = compat_ptr(handler);
3451 new_ka.sa.sa_restorer = compat_ptr(restorer);
3452 siginitset(&new_ka.sa.sa_mask, mask);
3455 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3458 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3459 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3460 &oact->sa_handler) ||
3461 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3462 &oact->sa_restorer) ||
3463 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3464 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3471 #ifdef CONFIG_SGETMASK_SYSCALL
3474 * For backwards compatibility. Functionality superseded by sigprocmask.
3476 SYSCALL_DEFINE0(sgetmask)
3479 return current->blocked.sig[0];
3482 SYSCALL_DEFINE1(ssetmask, int, newmask)
3484 int old = current->blocked.sig[0];
3487 siginitset(&newset, newmask);
3488 set_current_blocked(&newset);
3492 #endif /* CONFIG_SGETMASK_SYSCALL */
3494 #ifdef __ARCH_WANT_SYS_SIGNAL
3496 * For backwards compatibility. Functionality superseded by sigaction.
3498 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3500 struct k_sigaction new_sa, old_sa;
3503 new_sa.sa.sa_handler = handler;
3504 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3505 sigemptyset(&new_sa.sa.sa_mask);
3507 ret = do_sigaction(sig, &new_sa, &old_sa);
3509 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3511 #endif /* __ARCH_WANT_SYS_SIGNAL */
3513 #ifdef __ARCH_WANT_SYS_PAUSE
3515 SYSCALL_DEFINE0(pause)
3517 while (!signal_pending(current)) {
3518 __set_current_state(TASK_INTERRUPTIBLE);
3521 return -ERESTARTNOHAND;
3526 static int sigsuspend(sigset_t *set)
3528 current->saved_sigmask = current->blocked;
3529 set_current_blocked(set);
3531 while (!signal_pending(current)) {
3532 __set_current_state(TASK_INTERRUPTIBLE);
3535 set_restore_sigmask();
3536 return -ERESTARTNOHAND;
3540 * sys_rt_sigsuspend - replace the signal mask for a value with the
3541 * @unewset value until a signal is received
3542 * @unewset: new signal mask value
3543 * @sigsetsize: size of sigset_t type
3545 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3549 /* XXX: Don't preclude handling different sized sigset_t's. */
3550 if (sigsetsize != sizeof(sigset_t))
3553 if (copy_from_user(&newset, unewset, sizeof(newset)))
3555 return sigsuspend(&newset);
3558 #ifdef CONFIG_COMPAT
3559 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3563 compat_sigset_t newset32;
3565 /* XXX: Don't preclude handling different sized sigset_t's. */
3566 if (sigsetsize != sizeof(sigset_t))
3569 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3571 sigset_from_compat(&newset, &newset32);
3572 return sigsuspend(&newset);
3574 /* on little-endian bitmaps don't care about granularity */
3575 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3580 #ifdef CONFIG_OLD_SIGSUSPEND
3581 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3584 siginitset(&blocked, mask);
3585 return sigsuspend(&blocked);
3588 #ifdef CONFIG_OLD_SIGSUSPEND3
3589 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3592 siginitset(&blocked, mask);
3593 return sigsuspend(&blocked);
3597 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3602 void __init signals_init(void)
3604 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3607 #ifdef CONFIG_KGDB_KDB
3608 #include <linux/kdb.h>
3610 * kdb_send_sig_info - Allows kdb to send signals without exposing
3611 * signal internals. This function checks if the required locks are
3612 * available before calling the main signal code, to avoid kdb
3616 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3618 static struct task_struct *kdb_prev_t;
3620 if (!spin_trylock(&t->sighand->siglock)) {
3621 kdb_printf("Can't do kill command now.\n"
3622 "The sigmask lock is held somewhere else in "
3623 "kernel, try again later\n");
3626 spin_unlock(&t->sighand->siglock);
3627 new_t = kdb_prev_t != t;
3629 if (t->state != TASK_RUNNING && new_t) {
3630 kdb_printf("Process is not RUNNING, sending a signal from "
3631 "kdb risks deadlock\n"
3632 "on the run queue locks. "
3633 "The signal has _not_ been sent.\n"
3634 "Reissue the kill command if you want to risk "
3638 sig = info->si_signo;
3639 if (send_sig_info(sig, info, t))
3640 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3643 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3645 #endif /* CONFIG_KGDB_KDB */