2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched/signal.h>
6 #include <linux/sched/cputime.h>
7 #include <linux/posix-timers.h>
8 #include <linux/errno.h>
9 #include <linux/math64.h>
10 #include <linux/uaccess.h>
11 #include <linux/kernel_stat.h>
12 #include <trace/events/timer.h>
13 #include <linux/tick.h>
14 #include <linux/workqueue.h>
17 * Called after updating RLIMIT_CPU to run cpu timer and update
18 * tsk->signal->cputime_expires expiration cache if necessary. Needs
19 * siglock protection since other code may update expiration cache as
22 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
24 u64 nsecs = rlim_new * NSEC_PER_SEC;
26 spin_lock_irq(&task->sighand->siglock);
27 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
28 spin_unlock_irq(&task->sighand->siglock);
31 static int check_clock(const clockid_t which_clock)
34 struct task_struct *p;
35 const pid_t pid = CPUCLOCK_PID(which_clock);
37 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
44 p = find_task_by_vpid(pid);
45 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
46 same_thread_group(p, current) : has_group_leader_pid(p))) {
55 * Update expiry time from increment, and increase overrun count,
56 * given the current clock sample.
58 static void bump_cpu_timer(struct k_itimer *timer, u64 now)
63 if (timer->it.cpu.incr == 0)
66 if (now < timer->it.cpu.expires)
69 incr = timer->it.cpu.incr;
70 delta = now + incr - timer->it.cpu.expires;
72 /* Don't use (incr*2 < delta), incr*2 might overflow. */
73 for (i = 0; incr < delta - incr; i++)
76 for (; i >= 0; incr >>= 1, i--) {
80 timer->it.cpu.expires += incr;
81 timer->it_overrun += 1 << i;
87 * task_cputime_zero - Check a task_cputime struct for all zero fields.
89 * @cputime: The struct to compare.
91 * Checks @cputime to see if all fields are zero. Returns true if all fields
92 * are zero, false if any field is nonzero.
94 static inline int task_cputime_zero(const struct task_cputime *cputime)
96 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
101 static inline u64 prof_ticks(struct task_struct *p)
105 task_cputime(p, &utime, &stime);
107 return utime + stime;
109 static inline u64 virt_ticks(struct task_struct *p)
113 task_cputime(p, &utime, &stime);
119 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
121 int error = check_clock(which_clock);
124 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
125 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
127 * If sched_clock is using a cycle counter, we
128 * don't have any idea of its true resolution
129 * exported, but it is much more than 1s/HZ.
138 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp)
141 * You can never reset a CPU clock, but we check for other errors
142 * in the call before failing with EPERM.
144 int error = check_clock(which_clock);
153 * Sample a per-thread clock for the given task.
155 static int cpu_clock_sample(const clockid_t which_clock,
156 struct task_struct *p, u64 *sample)
158 switch (CPUCLOCK_WHICH(which_clock)) {
162 *sample = prof_ticks(p);
165 *sample = virt_ticks(p);
168 *sample = task_sched_runtime(p);
175 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
176 * to avoid race conditions with concurrent updates to cputime.
178 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
182 curr_cputime = atomic64_read(cputime);
183 if (sum_cputime > curr_cputime) {
184 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
189 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
191 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
192 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
193 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
196 /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
197 static inline void sample_cputime_atomic(struct task_cputime *times,
198 struct task_cputime_atomic *atomic_times)
200 times->utime = atomic64_read(&atomic_times->utime);
201 times->stime = atomic64_read(&atomic_times->stime);
202 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
205 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
207 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
208 struct task_cputime sum;
210 /* Check if cputimer isn't running. This is accessed without locking. */
211 if (!READ_ONCE(cputimer->running)) {
213 * The POSIX timer interface allows for absolute time expiry
214 * values through the TIMER_ABSTIME flag, therefore we have
215 * to synchronize the timer to the clock every time we start it.
217 thread_group_cputime(tsk, &sum);
218 update_gt_cputime(&cputimer->cputime_atomic, &sum);
221 * We're setting cputimer->running without a lock. Ensure
222 * this only gets written to in one operation. We set
223 * running after update_gt_cputime() as a small optimization,
224 * but barriers are not required because update_gt_cputime()
225 * can handle concurrent updates.
227 WRITE_ONCE(cputimer->running, true);
229 sample_cputime_atomic(times, &cputimer->cputime_atomic);
233 * Sample a process (thread group) clock for the given group_leader task.
234 * Must be called with task sighand lock held for safe while_each_thread()
237 static int cpu_clock_sample_group(const clockid_t which_clock,
238 struct task_struct *p,
241 struct task_cputime cputime;
243 switch (CPUCLOCK_WHICH(which_clock)) {
247 thread_group_cputime(p, &cputime);
248 *sample = cputime.utime + cputime.stime;
251 thread_group_cputime(p, &cputime);
252 *sample = cputime.utime;
255 thread_group_cputime(p, &cputime);
256 *sample = cputime.sum_exec_runtime;
262 static int posix_cpu_clock_get_task(struct task_struct *tsk,
263 const clockid_t which_clock,
264 struct timespec64 *tp)
269 if (CPUCLOCK_PERTHREAD(which_clock)) {
270 if (same_thread_group(tsk, current))
271 err = cpu_clock_sample(which_clock, tsk, &rtn);
273 if (tsk == current || thread_group_leader(tsk))
274 err = cpu_clock_sample_group(which_clock, tsk, &rtn);
278 *tp = ns_to_timespec64(rtn);
284 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp)
286 const pid_t pid = CPUCLOCK_PID(which_clock);
291 * Special case constant value for our own clocks.
292 * We don't have to do any lookup to find ourselves.
294 err = posix_cpu_clock_get_task(current, which_clock, tp);
297 * Find the given PID, and validate that the caller
298 * should be able to see it.
300 struct task_struct *p;
302 p = find_task_by_vpid(pid);
304 err = posix_cpu_clock_get_task(p, which_clock, tp);
312 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
313 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
314 * new timer already all-zeros initialized.
316 static int posix_cpu_timer_create(struct k_itimer *new_timer)
319 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
320 struct task_struct *p;
322 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
325 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
328 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
332 p = find_task_by_vpid(pid);
333 if (p && !same_thread_group(p, current))
338 p = current->group_leader;
340 p = find_task_by_vpid(pid);
341 if (p && !has_group_leader_pid(p))
345 new_timer->it.cpu.task = p;
357 * Clean up a CPU-clock timer that is about to be destroyed.
358 * This is called from timer deletion with the timer already locked.
359 * If we return TIMER_RETRY, it's necessary to release the timer's lock
360 * and try again. (This happens when the timer is in the middle of firing.)
362 static int posix_cpu_timer_del(struct k_itimer *timer)
366 struct sighand_struct *sighand;
367 struct task_struct *p = timer->it.cpu.task;
369 WARN_ON_ONCE(p == NULL);
372 * Protect against sighand release/switch in exit/exec and process/
373 * thread timer list entry concurrent read/writes.
375 sighand = lock_task_sighand(p, &flags);
376 if (unlikely(sighand == NULL)) {
378 * We raced with the reaping of the task.
379 * The deletion should have cleared us off the list.
381 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
383 if (timer->it.cpu.firing)
386 list_del(&timer->it.cpu.entry);
388 unlock_task_sighand(p, &flags);
397 static void cleanup_timers_list(struct list_head *head)
399 struct cpu_timer_list *timer, *next;
401 list_for_each_entry_safe(timer, next, head, entry)
402 list_del_init(&timer->entry);
406 * Clean out CPU timers still ticking when a thread exited. The task
407 * pointer is cleared, and the expiry time is replaced with the residual
408 * time for later timer_gettime calls to return.
409 * This must be called with the siglock held.
411 static void cleanup_timers(struct list_head *head)
413 cleanup_timers_list(head);
414 cleanup_timers_list(++head);
415 cleanup_timers_list(++head);
419 * These are both called with the siglock held, when the current thread
420 * is being reaped. When the final (leader) thread in the group is reaped,
421 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
423 void posix_cpu_timers_exit(struct task_struct *tsk)
425 cleanup_timers(tsk->cpu_timers);
427 void posix_cpu_timers_exit_group(struct task_struct *tsk)
429 cleanup_timers(tsk->signal->cpu_timers);
432 static inline int expires_gt(u64 expires, u64 new_exp)
434 return expires == 0 || expires > new_exp;
438 * Insert the timer on the appropriate list before any timers that
439 * expire later. This must be called with the sighand lock held.
441 static void arm_timer(struct k_itimer *timer)
443 struct task_struct *p = timer->it.cpu.task;
444 struct list_head *head, *listpos;
445 struct task_cputime *cputime_expires;
446 struct cpu_timer_list *const nt = &timer->it.cpu;
447 struct cpu_timer_list *next;
449 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
450 head = p->cpu_timers;
451 cputime_expires = &p->cputime_expires;
453 head = p->signal->cpu_timers;
454 cputime_expires = &p->signal->cputime_expires;
456 head += CPUCLOCK_WHICH(timer->it_clock);
459 list_for_each_entry(next, head, entry) {
460 if (nt->expires < next->expires)
462 listpos = &next->entry;
464 list_add(&nt->entry, listpos);
466 if (listpos == head) {
467 u64 exp = nt->expires;
470 * We are the new earliest-expiring POSIX 1.b timer, hence
471 * need to update expiration cache. Take into account that
472 * for process timers we share expiration cache with itimers
473 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
476 switch (CPUCLOCK_WHICH(timer->it_clock)) {
478 if (expires_gt(cputime_expires->prof_exp, exp))
479 cputime_expires->prof_exp = exp;
482 if (expires_gt(cputime_expires->virt_exp, exp))
483 cputime_expires->virt_exp = exp;
486 if (expires_gt(cputime_expires->sched_exp, exp))
487 cputime_expires->sched_exp = exp;
490 if (CPUCLOCK_PERTHREAD(timer->it_clock))
491 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
493 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
498 * The timer is locked, fire it and arrange for its reload.
500 static void cpu_timer_fire(struct k_itimer *timer)
502 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
504 * User don't want any signal.
506 timer->it.cpu.expires = 0;
507 } else if (unlikely(timer->sigq == NULL)) {
509 * This a special case for clock_nanosleep,
510 * not a normal timer from sys_timer_create.
512 wake_up_process(timer->it_process);
513 timer->it.cpu.expires = 0;
514 } else if (timer->it.cpu.incr == 0) {
516 * One-shot timer. Clear it as soon as it's fired.
518 posix_timer_event(timer, 0);
519 timer->it.cpu.expires = 0;
520 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
522 * The signal did not get queued because the signal
523 * was ignored, so we won't get any callback to
524 * reload the timer. But we need to keep it
525 * ticking in case the signal is deliverable next time.
527 posix_cpu_timer_schedule(timer);
532 * Sample a process (thread group) timer for the given group_leader task.
533 * Must be called with task sighand lock held for safe while_each_thread()
536 static int cpu_timer_sample_group(const clockid_t which_clock,
537 struct task_struct *p, u64 *sample)
539 struct task_cputime cputime;
541 thread_group_cputimer(p, &cputime);
542 switch (CPUCLOCK_WHICH(which_clock)) {
546 *sample = cputime.utime + cputime.stime;
549 *sample = cputime.utime;
552 *sample = cputime.sum_exec_runtime;
559 * Guts of sys_timer_settime for CPU timers.
560 * This is called with the timer locked and interrupts disabled.
561 * If we return TIMER_RETRY, it's necessary to release the timer's lock
562 * and try again. (This happens when the timer is in the middle of firing.)
564 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
565 struct itimerspec64 *new, struct itimerspec64 *old)
568 struct sighand_struct *sighand;
569 struct task_struct *p = timer->it.cpu.task;
570 u64 old_expires, new_expires, old_incr, val;
573 WARN_ON_ONCE(p == NULL);
575 new_expires = timespec64_to_ns(&new->it_value);
578 * Protect against sighand release/switch in exit/exec and p->cpu_timers
579 * and p->signal->cpu_timers read/write in arm_timer()
581 sighand = lock_task_sighand(p, &flags);
583 * If p has just been reaped, we can no
584 * longer get any information about it at all.
586 if (unlikely(sighand == NULL)) {
591 * Disarm any old timer after extracting its expiry time.
593 WARN_ON_ONCE(!irqs_disabled());
596 old_incr = timer->it.cpu.incr;
597 old_expires = timer->it.cpu.expires;
598 if (unlikely(timer->it.cpu.firing)) {
599 timer->it.cpu.firing = -1;
602 list_del_init(&timer->it.cpu.entry);
605 * We need to sample the current value to convert the new
606 * value from to relative and absolute, and to convert the
607 * old value from absolute to relative. To set a process
608 * timer, we need a sample to balance the thread expiry
609 * times (in arm_timer). With an absolute time, we must
610 * check if it's already passed. In short, we need a sample.
612 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
613 cpu_clock_sample(timer->it_clock, p, &val);
615 cpu_timer_sample_group(timer->it_clock, p, &val);
619 if (old_expires == 0) {
620 old->it_value.tv_sec = 0;
621 old->it_value.tv_nsec = 0;
624 * Update the timer in case it has
625 * overrun already. If it has,
626 * we'll report it as having overrun
627 * and with the next reloaded timer
628 * already ticking, though we are
629 * swallowing that pending
630 * notification here to install the
633 bump_cpu_timer(timer, val);
634 if (val < timer->it.cpu.expires) {
635 old_expires = timer->it.cpu.expires - val;
636 old->it_value = ns_to_timespec64(old_expires);
638 old->it_value.tv_nsec = 1;
639 old->it_value.tv_sec = 0;
646 * We are colliding with the timer actually firing.
647 * Punt after filling in the timer's old value, and
648 * disable this firing since we are already reporting
649 * it as an overrun (thanks to bump_cpu_timer above).
651 unlock_task_sighand(p, &flags);
655 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
660 * Install the new expiry time (or zero).
661 * For a timer with no notification action, we don't actually
662 * arm the timer (we'll just fake it for timer_gettime).
664 timer->it.cpu.expires = new_expires;
665 if (new_expires != 0 && val < new_expires) {
669 unlock_task_sighand(p, &flags);
671 * Install the new reload setting, and
672 * set up the signal and overrun bookkeeping.
674 timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
677 * This acts as a modification timestamp for the timer,
678 * so any automatic reload attempt will punt on seeing
679 * that we have reset the timer manually.
681 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
683 timer->it_overrun_last = 0;
684 timer->it_overrun = -1;
686 if (new_expires != 0 && !(val < new_expires)) {
688 * The designated time already passed, so we notify
689 * immediately, even if the thread never runs to
690 * accumulate more time on this clock.
692 cpu_timer_fire(timer);
698 old->it_interval = ns_to_timespec64(old_incr);
703 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
706 struct task_struct *p = timer->it.cpu.task;
708 WARN_ON_ONCE(p == NULL);
711 * Easy part: convert the reload time.
713 itp->it_interval = ns_to_timespec64(timer->it.cpu.incr);
715 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
716 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
721 * Sample the clock to take the difference with the expiry time.
723 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
724 cpu_clock_sample(timer->it_clock, p, &now);
726 struct sighand_struct *sighand;
730 * Protect against sighand release/switch in exit/exec and
731 * also make timer sampling safe if it ends up calling
732 * thread_group_cputime().
734 sighand = lock_task_sighand(p, &flags);
735 if (unlikely(sighand == NULL)) {
737 * The process has been reaped.
738 * We can't even collect a sample any more.
739 * Call the timer disarmed, nothing else to do.
741 timer->it.cpu.expires = 0;
742 itp->it_value = ns_to_timespec64(timer->it.cpu.expires);
745 cpu_timer_sample_group(timer->it_clock, p, &now);
746 unlock_task_sighand(p, &flags);
750 if (now < timer->it.cpu.expires) {
751 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now);
754 * The timer should have expired already, but the firing
755 * hasn't taken place yet. Say it's just about to expire.
757 itp->it_value.tv_nsec = 1;
758 itp->it_value.tv_sec = 0;
762 static unsigned long long
763 check_timers_list(struct list_head *timers,
764 struct list_head *firing,
765 unsigned long long curr)
769 while (!list_empty(timers)) {
770 struct cpu_timer_list *t;
772 t = list_first_entry(timers, struct cpu_timer_list, entry);
774 if (!--maxfire || curr < t->expires)
778 list_move_tail(&t->entry, firing);
785 * Check for any per-thread CPU timers that have fired and move them off
786 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
787 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
789 static void check_thread_timers(struct task_struct *tsk,
790 struct list_head *firing)
792 struct list_head *timers = tsk->cpu_timers;
793 struct signal_struct *const sig = tsk->signal;
794 struct task_cputime *tsk_expires = &tsk->cputime_expires;
799 * If cputime_expires is zero, then there are no active
800 * per thread CPU timers.
802 if (task_cputime_zero(&tsk->cputime_expires))
805 expires = check_timers_list(timers, firing, prof_ticks(tsk));
806 tsk_expires->prof_exp = expires;
808 expires = check_timers_list(++timers, firing, virt_ticks(tsk));
809 tsk_expires->virt_exp = expires;
811 tsk_expires->sched_exp = check_timers_list(++timers, firing,
812 tsk->se.sum_exec_runtime);
815 * Check for the special case thread timers.
817 soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
818 if (soft != RLIM_INFINITY) {
820 READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
822 if (hard != RLIM_INFINITY &&
823 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
825 * At the hard limit, we just die.
826 * No need to calculate anything else now.
828 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
829 tsk->comm, task_pid_nr(tsk));
830 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
833 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
835 * At the soft limit, send a SIGXCPU every second.
838 soft += USEC_PER_SEC;
839 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
841 pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
842 tsk->comm, task_pid_nr(tsk));
843 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
846 if (task_cputime_zero(tsk_expires))
847 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
850 static inline void stop_process_timers(struct signal_struct *sig)
852 struct thread_group_cputimer *cputimer = &sig->cputimer;
854 /* Turn off cputimer->running. This is done without locking. */
855 WRITE_ONCE(cputimer->running, false);
856 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
859 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
860 u64 *expires, u64 cur_time, int signo)
865 if (cur_time >= it->expires) {
867 it->expires += it->incr;
871 trace_itimer_expire(signo == SIGPROF ?
872 ITIMER_PROF : ITIMER_VIRTUAL,
873 tsk->signal->leader_pid, cur_time);
874 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
877 if (it->expires && (!*expires || it->expires < *expires))
878 *expires = it->expires;
882 * Check for any per-thread CPU timers that have fired and move them
883 * off the tsk->*_timers list onto the firing list. Per-thread timers
884 * have already been taken off.
886 static void check_process_timers(struct task_struct *tsk,
887 struct list_head *firing)
889 struct signal_struct *const sig = tsk->signal;
890 u64 utime, ptime, virt_expires, prof_expires;
891 u64 sum_sched_runtime, sched_expires;
892 struct list_head *timers = sig->cpu_timers;
893 struct task_cputime cputime;
897 * If cputimer is not running, then there are no active
898 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
900 if (!READ_ONCE(tsk->signal->cputimer.running))
904 * Signify that a thread is checking for process timers.
905 * Write access to this field is protected by the sighand lock.
907 sig->cputimer.checking_timer = true;
910 * Collect the current process totals.
912 thread_group_cputimer(tsk, &cputime);
913 utime = cputime.utime;
914 ptime = utime + cputime.stime;
915 sum_sched_runtime = cputime.sum_exec_runtime;
917 prof_expires = check_timers_list(timers, firing, ptime);
918 virt_expires = check_timers_list(++timers, firing, utime);
919 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
922 * Check for the special case process timers.
924 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
926 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
928 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
929 if (soft != RLIM_INFINITY) {
930 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
932 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
936 * At the hard limit, we just die.
937 * No need to calculate anything else now.
939 pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
940 tsk->comm, task_pid_nr(tsk));
941 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
946 * At the soft limit, send a SIGXCPU every second.
948 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
949 tsk->comm, task_pid_nr(tsk));
950 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
953 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
956 x = soft * NSEC_PER_SEC;
957 if (!prof_expires || x < prof_expires)
961 sig->cputime_expires.prof_exp = prof_expires;
962 sig->cputime_expires.virt_exp = virt_expires;
963 sig->cputime_expires.sched_exp = sched_expires;
964 if (task_cputime_zero(&sig->cputime_expires))
965 stop_process_timers(sig);
967 sig->cputimer.checking_timer = false;
971 * This is called from the signal code (via do_schedule_next_timer)
972 * when the last timer signal was delivered and we have to reload the timer.
974 void posix_cpu_timer_schedule(struct k_itimer *timer)
976 struct sighand_struct *sighand;
978 struct task_struct *p = timer->it.cpu.task;
981 WARN_ON_ONCE(p == NULL);
984 * Fetch the current sample and update the timer's expiry time.
986 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
987 cpu_clock_sample(timer->it_clock, p, &now);
988 bump_cpu_timer(timer, now);
989 if (unlikely(p->exit_state))
992 /* Protect timer list r/w in arm_timer() */
993 sighand = lock_task_sighand(p, &flags);
998 * Protect arm_timer() and timer sampling in case of call to
999 * thread_group_cputime().
1001 sighand = lock_task_sighand(p, &flags);
1002 if (unlikely(sighand == NULL)) {
1004 * The process has been reaped.
1005 * We can't even collect a sample any more.
1007 timer->it.cpu.expires = 0;
1009 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1010 unlock_task_sighand(p, &flags);
1011 /* Optimizations: if the process is dying, no need to rearm */
1014 cpu_timer_sample_group(timer->it_clock, p, &now);
1015 bump_cpu_timer(timer, now);
1016 /* Leave the sighand locked for the call below. */
1020 * Now re-arm for the new expiry time.
1022 WARN_ON_ONCE(!irqs_disabled());
1024 unlock_task_sighand(p, &flags);
1027 timer->it_overrun_last = timer->it_overrun;
1028 timer->it_overrun = -1;
1029 ++timer->it_requeue_pending;
1033 * task_cputime_expired - Compare two task_cputime entities.
1035 * @sample: The task_cputime structure to be checked for expiration.
1036 * @expires: Expiration times, against which @sample will be checked.
1038 * Checks @sample against @expires to see if any field of @sample has expired.
1039 * Returns true if any field of the former is greater than the corresponding
1040 * field of the latter if the latter field is set. Otherwise returns false.
1042 static inline int task_cputime_expired(const struct task_cputime *sample,
1043 const struct task_cputime *expires)
1045 if (expires->utime && sample->utime >= expires->utime)
1047 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1049 if (expires->sum_exec_runtime != 0 &&
1050 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1056 * fastpath_timer_check - POSIX CPU timers fast path.
1058 * @tsk: The task (thread) being checked.
1060 * Check the task and thread group timers. If both are zero (there are no
1061 * timers set) return false. Otherwise snapshot the task and thread group
1062 * timers and compare them with the corresponding expiration times. Return
1063 * true if a timer has expired, else return false.
1065 static inline int fastpath_timer_check(struct task_struct *tsk)
1067 struct signal_struct *sig;
1069 if (!task_cputime_zero(&tsk->cputime_expires)) {
1070 struct task_cputime task_sample;
1072 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1073 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1074 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1080 * Check if thread group timers expired when the cputimer is
1081 * running and no other thread in the group is already checking
1082 * for thread group cputimers. These fields are read without the
1083 * sighand lock. However, this is fine because this is meant to
1084 * be a fastpath heuristic to determine whether we should try to
1085 * acquire the sighand lock to check/handle timers.
1087 * In the worst case scenario, if 'running' or 'checking_timer' gets
1088 * set but the current thread doesn't see the change yet, we'll wait
1089 * until the next thread in the group gets a scheduler interrupt to
1090 * handle the timer. This isn't an issue in practice because these
1091 * types of delays with signals actually getting sent are expected.
1093 if (READ_ONCE(sig->cputimer.running) &&
1094 !READ_ONCE(sig->cputimer.checking_timer)) {
1095 struct task_cputime group_sample;
1097 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1099 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1107 * This is called from the timer interrupt handler. The irq handler has
1108 * already updated our counts. We need to check if any timers fire now.
1109 * Interrupts are disabled.
1111 void run_posix_cpu_timers(struct task_struct *tsk)
1114 struct k_itimer *timer, *next;
1115 unsigned long flags;
1117 WARN_ON_ONCE(!irqs_disabled());
1120 * The fast path checks that there are no expired thread or thread
1121 * group timers. If that's so, just return.
1123 if (!fastpath_timer_check(tsk))
1126 if (!lock_task_sighand(tsk, &flags))
1129 * Here we take off tsk->signal->cpu_timers[N] and
1130 * tsk->cpu_timers[N] all the timers that are firing, and
1131 * put them on the firing list.
1133 check_thread_timers(tsk, &firing);
1135 check_process_timers(tsk, &firing);
1138 * We must release these locks before taking any timer's lock.
1139 * There is a potential race with timer deletion here, as the
1140 * siglock now protects our private firing list. We have set
1141 * the firing flag in each timer, so that a deletion attempt
1142 * that gets the timer lock before we do will give it up and
1143 * spin until we've taken care of that timer below.
1145 unlock_task_sighand(tsk, &flags);
1148 * Now that all the timers on our list have the firing flag,
1149 * no one will touch their list entries but us. We'll take
1150 * each timer's lock before clearing its firing flag, so no
1151 * timer call will interfere.
1153 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1156 spin_lock(&timer->it_lock);
1157 list_del_init(&timer->it.cpu.entry);
1158 cpu_firing = timer->it.cpu.firing;
1159 timer->it.cpu.firing = 0;
1161 * The firing flag is -1 if we collided with a reset
1162 * of the timer, which already reported this
1163 * almost-firing as an overrun. So don't generate an event.
1165 if (likely(cpu_firing >= 0))
1166 cpu_timer_fire(timer);
1167 spin_unlock(&timer->it_lock);
1172 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1173 * The tsk->sighand->siglock must be held by the caller.
1175 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1176 u64 *newval, u64 *oldval)
1180 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1181 cpu_timer_sample_group(clock_idx, tsk, &now);
1185 * We are setting itimer. The *oldval is absolute and we update
1186 * it to be relative, *newval argument is relative and we update
1187 * it to be absolute.
1190 if (*oldval <= now) {
1191 /* Just about to fire. */
1192 *oldval = TICK_NSEC;
1204 * Update expiration cache if we are the earliest timer, or eventually
1205 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1207 switch (clock_idx) {
1209 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1210 tsk->signal->cputime_expires.prof_exp = *newval;
1213 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1214 tsk->signal->cputime_expires.virt_exp = *newval;
1218 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1221 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1222 struct timespec64 *rqtp, struct itimerspec64 *it)
1224 struct k_itimer timer;
1228 * Set up a temporary timer and then wait for it to go off.
1230 memset(&timer, 0, sizeof timer);
1231 spin_lock_init(&timer.it_lock);
1232 timer.it_clock = which_clock;
1233 timer.it_overrun = -1;
1234 error = posix_cpu_timer_create(&timer);
1235 timer.it_process = current;
1237 static struct itimerspec64 zero_it;
1239 memset(it, 0, sizeof *it);
1240 it->it_value = *rqtp;
1242 spin_lock_irq(&timer.it_lock);
1243 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1245 spin_unlock_irq(&timer.it_lock);
1249 while (!signal_pending(current)) {
1250 if (timer.it.cpu.expires == 0) {
1252 * Our timer fired and was reset, below
1253 * deletion can not fail.
1255 posix_cpu_timer_del(&timer);
1256 spin_unlock_irq(&timer.it_lock);
1261 * Block until cpu_timer_fire (or a signal) wakes us.
1263 __set_current_state(TASK_INTERRUPTIBLE);
1264 spin_unlock_irq(&timer.it_lock);
1266 spin_lock_irq(&timer.it_lock);
1270 * We were interrupted by a signal.
1272 *rqtp = ns_to_timespec64(timer.it.cpu.expires);
1273 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1276 * Timer is now unarmed, deletion can not fail.
1278 posix_cpu_timer_del(&timer);
1280 spin_unlock_irq(&timer.it_lock);
1282 while (error == TIMER_RETRY) {
1284 * We need to handle case when timer was or is in the
1285 * middle of firing. In other cases we already freed
1288 spin_lock_irq(&timer.it_lock);
1289 error = posix_cpu_timer_del(&timer);
1290 spin_unlock_irq(&timer.it_lock);
1293 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1295 * It actually did fire already.
1300 error = -ERESTART_RESTARTBLOCK;
1306 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1308 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1309 struct timespec64 *rqtp, struct timespec __user *rmtp)
1311 struct restart_block *restart_block = ¤t->restart_block;
1312 struct itimerspec64 it;
1317 * Diagnose required errors first.
1319 if (CPUCLOCK_PERTHREAD(which_clock) &&
1320 (CPUCLOCK_PID(which_clock) == 0 ||
1321 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1324 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1326 if (error == -ERESTART_RESTARTBLOCK) {
1328 if (flags & TIMER_ABSTIME)
1329 return -ERESTARTNOHAND;
1331 * Report back to the user the time still remaining.
1333 ts = timespec64_to_timespec(it.it_value);
1334 if (rmtp && copy_to_user(rmtp, &ts, sizeof(*rmtp)))
1337 restart_block->fn = posix_cpu_nsleep_restart;
1338 restart_block->nanosleep.clockid = which_clock;
1339 restart_block->nanosleep.rmtp = rmtp;
1340 restart_block->nanosleep.expires = timespec64_to_ns(rqtp);
1345 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1347 clockid_t which_clock = restart_block->nanosleep.clockid;
1348 struct itimerspec64 it;
1349 struct timespec64 t;
1350 struct timespec tmp;
1353 t = ns_to_timespec64(restart_block->nanosleep.expires);
1355 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1357 if (error == -ERESTART_RESTARTBLOCK) {
1358 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1360 * Report back to the user the time still remaining.
1362 tmp = timespec64_to_timespec(it.it_value);
1363 if (rmtp && copy_to_user(rmtp, &tmp, sizeof(*rmtp)))
1366 restart_block->nanosleep.expires = timespec64_to_ns(&t);
1372 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1373 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1375 static int process_cpu_clock_getres(const clockid_t which_clock,
1376 struct timespec64 *tp)
1378 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1380 static int process_cpu_clock_get(const clockid_t which_clock,
1381 struct timespec64 *tp)
1383 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1385 static int process_cpu_timer_create(struct k_itimer *timer)
1387 timer->it_clock = PROCESS_CLOCK;
1388 return posix_cpu_timer_create(timer);
1390 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1391 struct timespec64 *rqtp,
1392 struct timespec __user *rmtp)
1394 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1396 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1400 static int thread_cpu_clock_getres(const clockid_t which_clock,
1401 struct timespec64 *tp)
1403 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1405 static int thread_cpu_clock_get(const clockid_t which_clock,
1406 struct timespec64 *tp)
1408 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1410 static int thread_cpu_timer_create(struct k_itimer *timer)
1412 timer->it_clock = THREAD_CLOCK;
1413 return posix_cpu_timer_create(timer);
1416 struct k_clock clock_posix_cpu = {
1417 .clock_getres = posix_cpu_clock_getres,
1418 .clock_set = posix_cpu_clock_set,
1419 .clock_get = posix_cpu_clock_get,
1420 .timer_create = posix_cpu_timer_create,
1421 .nsleep = posix_cpu_nsleep,
1422 .nsleep_restart = posix_cpu_nsleep_restart,
1423 .timer_set = posix_cpu_timer_set,
1424 .timer_del = posix_cpu_timer_del,
1425 .timer_get = posix_cpu_timer_get,
1428 static __init int init_posix_cpu_timers(void)
1430 struct k_clock process = {
1431 .clock_getres = process_cpu_clock_getres,
1432 .clock_get = process_cpu_clock_get,
1433 .timer_create = process_cpu_timer_create,
1434 .nsleep = process_cpu_nsleep,
1435 .nsleep_restart = process_cpu_nsleep_restart,
1437 struct k_clock thread = {
1438 .clock_getres = thread_cpu_clock_getres,
1439 .clock_get = thread_cpu_clock_get,
1440 .timer_create = thread_cpu_timer_create,
1443 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1444 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1448 __initcall(init_posix_cpu_timers);