2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
11 #include <trace/events/timer.h>
12 #include <linux/tick.h>
13 #include <linux/workqueue.h>
16 * Called after updating RLIMIT_CPU to run cpu timer and update
17 * tsk->signal->cputime_expires expiration cache if necessary. Needs
18 * siglock protection since other code may update expiration cache as
21 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
23 cputime_t cputime = secs_to_cputime(rlim_new);
25 spin_lock_irq(&task->sighand->siglock);
26 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
27 spin_unlock_irq(&task->sighand->siglock);
30 static int check_clock(const clockid_t which_clock)
33 struct task_struct *p;
34 const pid_t pid = CPUCLOCK_PID(which_clock);
36 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
43 p = find_task_by_vpid(pid);
44 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
45 same_thread_group(p, current) : has_group_leader_pid(p))) {
53 static inline unsigned long long
54 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
56 unsigned long long ret;
58 ret = 0; /* high half always zero when .cpu used */
59 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
60 ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
62 ret = cputime_to_expires(timespec_to_cputime(tp));
67 static void sample_to_timespec(const clockid_t which_clock,
68 unsigned long long expires,
71 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
72 *tp = ns_to_timespec(expires);
74 cputime_to_timespec((__force cputime_t)expires, tp);
78 * Update expiry time from increment, and increase overrun count,
79 * given the current clock sample.
81 static void bump_cpu_timer(struct k_itimer *timer,
82 unsigned long long now)
85 unsigned long long delta, incr;
87 if (timer->it.cpu.incr == 0)
90 if (now < timer->it.cpu.expires)
93 incr = timer->it.cpu.incr;
94 delta = now + incr - timer->it.cpu.expires;
96 /* Don't use (incr*2 < delta), incr*2 might overflow. */
97 for (i = 0; incr < delta - incr; i++)
100 for (; i >= 0; incr >>= 1, i--) {
104 timer->it.cpu.expires += incr;
105 timer->it_overrun += 1 << i;
111 * task_cputime_zero - Check a task_cputime struct for all zero fields.
113 * @cputime: The struct to compare.
115 * Checks @cputime to see if all fields are zero. Returns true if all fields
116 * are zero, false if any field is nonzero.
118 static inline int task_cputime_zero(const struct task_cputime *cputime)
120 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
125 static inline unsigned long long prof_ticks(struct task_struct *p)
127 cputime_t utime, stime;
129 task_cputime(p, &utime, &stime);
131 return cputime_to_expires(utime + stime);
133 static inline unsigned long long virt_ticks(struct task_struct *p)
135 cputime_t utime, stime;
137 task_cputime(p, &utime, &stime);
139 return cputime_to_expires(utime);
143 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
145 int error = check_clock(which_clock);
148 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
149 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
151 * If sched_clock is using a cycle counter, we
152 * don't have any idea of its true resolution
153 * exported, but it is much more than 1s/HZ.
162 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
165 * You can never reset a CPU clock, but we check for other errors
166 * in the call before failing with EPERM.
168 int error = check_clock(which_clock);
177 * Sample a per-thread clock for the given task.
179 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
180 unsigned long long *sample)
182 switch (CPUCLOCK_WHICH(which_clock)) {
186 *sample = prof_ticks(p);
189 *sample = virt_ticks(p);
192 *sample = task_sched_runtime(p);
199 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
200 * to avoid race conditions with concurrent updates to cputime.
202 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
206 curr_cputime = atomic64_read(cputime);
207 if (sum_cputime > curr_cputime) {
208 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
213 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
215 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
216 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
217 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
220 /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
221 static inline void sample_cputime_atomic(struct task_cputime *times,
222 struct task_cputime_atomic *atomic_times)
224 times->utime = atomic64_read(&atomic_times->utime);
225 times->stime = atomic64_read(&atomic_times->stime);
226 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
229 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
231 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
232 struct task_cputime sum;
234 /* Check if cputimer isn't running. This is accessed without locking. */
235 if (!READ_ONCE(cputimer->running)) {
237 * The POSIX timer interface allows for absolute time expiry
238 * values through the TIMER_ABSTIME flag, therefore we have
239 * to synchronize the timer to the clock every time we start it.
241 thread_group_cputime(tsk, &sum);
242 update_gt_cputime(&cputimer->cputime_atomic, &sum);
245 * We're setting cputimer->running without a lock. Ensure
246 * this only gets written to in one operation. We set
247 * running after update_gt_cputime() as a small optimization,
248 * but barriers are not required because update_gt_cputime()
249 * can handle concurrent updates.
251 WRITE_ONCE(cputimer->running, true);
253 sample_cputime_atomic(times, &cputimer->cputime_atomic);
257 * Sample a process (thread group) clock for the given group_leader task.
258 * Must be called with task sighand lock held for safe while_each_thread()
261 static int cpu_clock_sample_group(const clockid_t which_clock,
262 struct task_struct *p,
263 unsigned long long *sample)
265 struct task_cputime cputime;
267 switch (CPUCLOCK_WHICH(which_clock)) {
271 thread_group_cputime(p, &cputime);
272 *sample = cputime_to_expires(cputime.utime + cputime.stime);
275 thread_group_cputime(p, &cputime);
276 *sample = cputime_to_expires(cputime.utime);
279 thread_group_cputime(p, &cputime);
280 *sample = cputime.sum_exec_runtime;
286 static int posix_cpu_clock_get_task(struct task_struct *tsk,
287 const clockid_t which_clock,
291 unsigned long long rtn;
293 if (CPUCLOCK_PERTHREAD(which_clock)) {
294 if (same_thread_group(tsk, current))
295 err = cpu_clock_sample(which_clock, tsk, &rtn);
297 if (tsk == current || thread_group_leader(tsk))
298 err = cpu_clock_sample_group(which_clock, tsk, &rtn);
302 sample_to_timespec(which_clock, rtn, tp);
308 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
310 const pid_t pid = CPUCLOCK_PID(which_clock);
315 * Special case constant value for our own clocks.
316 * We don't have to do any lookup to find ourselves.
318 err = posix_cpu_clock_get_task(current, which_clock, tp);
321 * Find the given PID, and validate that the caller
322 * should be able to see it.
324 struct task_struct *p;
326 p = find_task_by_vpid(pid);
328 err = posix_cpu_clock_get_task(p, which_clock, tp);
336 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
337 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
338 * new timer already all-zeros initialized.
340 static int posix_cpu_timer_create(struct k_itimer *new_timer)
343 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
344 struct task_struct *p;
346 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
349 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
352 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
356 p = find_task_by_vpid(pid);
357 if (p && !same_thread_group(p, current))
362 p = current->group_leader;
364 p = find_task_by_vpid(pid);
365 if (p && !has_group_leader_pid(p))
369 new_timer->it.cpu.task = p;
381 * Clean up a CPU-clock timer that is about to be destroyed.
382 * This is called from timer deletion with the timer already locked.
383 * If we return TIMER_RETRY, it's necessary to release the timer's lock
384 * and try again. (This happens when the timer is in the middle of firing.)
386 static int posix_cpu_timer_del(struct k_itimer *timer)
390 struct sighand_struct *sighand;
391 struct task_struct *p = timer->it.cpu.task;
393 WARN_ON_ONCE(p == NULL);
396 * Protect against sighand release/switch in exit/exec and process/
397 * thread timer list entry concurrent read/writes.
399 sighand = lock_task_sighand(p, &flags);
400 if (unlikely(sighand == NULL)) {
402 * We raced with the reaping of the task.
403 * The deletion should have cleared us off the list.
405 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
407 if (timer->it.cpu.firing)
410 list_del(&timer->it.cpu.entry);
412 unlock_task_sighand(p, &flags);
421 static void cleanup_timers_list(struct list_head *head)
423 struct cpu_timer_list *timer, *next;
425 list_for_each_entry_safe(timer, next, head, entry)
426 list_del_init(&timer->entry);
430 * Clean out CPU timers still ticking when a thread exited. The task
431 * pointer is cleared, and the expiry time is replaced with the residual
432 * time for later timer_gettime calls to return.
433 * This must be called with the siglock held.
435 static void cleanup_timers(struct list_head *head)
437 cleanup_timers_list(head);
438 cleanup_timers_list(++head);
439 cleanup_timers_list(++head);
443 * These are both called with the siglock held, when the current thread
444 * is being reaped. When the final (leader) thread in the group is reaped,
445 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
447 void posix_cpu_timers_exit(struct task_struct *tsk)
449 cleanup_timers(tsk->cpu_timers);
451 void posix_cpu_timers_exit_group(struct task_struct *tsk)
453 cleanup_timers(tsk->signal->cpu_timers);
456 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
458 return expires == 0 || expires > new_exp;
462 * Insert the timer on the appropriate list before any timers that
463 * expire later. This must be called with the sighand lock held.
465 static void arm_timer(struct k_itimer *timer)
467 struct task_struct *p = timer->it.cpu.task;
468 struct list_head *head, *listpos;
469 struct task_cputime *cputime_expires;
470 struct cpu_timer_list *const nt = &timer->it.cpu;
471 struct cpu_timer_list *next;
473 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
474 head = p->cpu_timers;
475 cputime_expires = &p->cputime_expires;
477 head = p->signal->cpu_timers;
478 cputime_expires = &p->signal->cputime_expires;
480 head += CPUCLOCK_WHICH(timer->it_clock);
483 list_for_each_entry(next, head, entry) {
484 if (nt->expires < next->expires)
486 listpos = &next->entry;
488 list_add(&nt->entry, listpos);
490 if (listpos == head) {
491 unsigned long long exp = nt->expires;
494 * We are the new earliest-expiring POSIX 1.b timer, hence
495 * need to update expiration cache. Take into account that
496 * for process timers we share expiration cache with itimers
497 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
500 switch (CPUCLOCK_WHICH(timer->it_clock)) {
502 if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
503 cputime_expires->prof_exp = expires_to_cputime(exp);
506 if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
507 cputime_expires->virt_exp = expires_to_cputime(exp);
510 if (cputime_expires->sched_exp == 0 ||
511 cputime_expires->sched_exp > exp)
512 cputime_expires->sched_exp = exp;
515 if (CPUCLOCK_PERTHREAD(timer->it_clock))
516 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
518 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
523 * The timer is locked, fire it and arrange for its reload.
525 static void cpu_timer_fire(struct k_itimer *timer)
527 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
529 * User don't want any signal.
531 timer->it.cpu.expires = 0;
532 } else if (unlikely(timer->sigq == NULL)) {
534 * This a special case for clock_nanosleep,
535 * not a normal timer from sys_timer_create.
537 wake_up_process(timer->it_process);
538 timer->it.cpu.expires = 0;
539 } else if (timer->it.cpu.incr == 0) {
541 * One-shot timer. Clear it as soon as it's fired.
543 posix_timer_event(timer, 0);
544 timer->it.cpu.expires = 0;
545 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
547 * The signal did not get queued because the signal
548 * was ignored, so we won't get any callback to
549 * reload the timer. But we need to keep it
550 * ticking in case the signal is deliverable next time.
552 posix_cpu_timer_schedule(timer);
557 * Sample a process (thread group) timer for the given group_leader task.
558 * Must be called with task sighand lock held for safe while_each_thread()
561 static int cpu_timer_sample_group(const clockid_t which_clock,
562 struct task_struct *p,
563 unsigned long long *sample)
565 struct task_cputime cputime;
567 thread_group_cputimer(p, &cputime);
568 switch (CPUCLOCK_WHICH(which_clock)) {
572 *sample = cputime_to_expires(cputime.utime + cputime.stime);
575 *sample = cputime_to_expires(cputime.utime);
578 *sample = cputime.sum_exec_runtime;
585 * Guts of sys_timer_settime for CPU timers.
586 * This is called with the timer locked and interrupts disabled.
587 * If we return TIMER_RETRY, it's necessary to release the timer's lock
588 * and try again. (This happens when the timer is in the middle of firing.)
590 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
591 struct itimerspec *new, struct itimerspec *old)
594 struct sighand_struct *sighand;
595 struct task_struct *p = timer->it.cpu.task;
596 unsigned long long old_expires, new_expires, old_incr, val;
599 WARN_ON_ONCE(p == NULL);
601 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
604 * Protect against sighand release/switch in exit/exec and p->cpu_timers
605 * and p->signal->cpu_timers read/write in arm_timer()
607 sighand = lock_task_sighand(p, &flags);
609 * If p has just been reaped, we can no
610 * longer get any information about it at all.
612 if (unlikely(sighand == NULL)) {
617 * Disarm any old timer after extracting its expiry time.
619 WARN_ON_ONCE(!irqs_disabled());
622 old_incr = timer->it.cpu.incr;
623 old_expires = timer->it.cpu.expires;
624 if (unlikely(timer->it.cpu.firing)) {
625 timer->it.cpu.firing = -1;
628 list_del_init(&timer->it.cpu.entry);
631 * We need to sample the current value to convert the new
632 * value from to relative and absolute, and to convert the
633 * old value from absolute to relative. To set a process
634 * timer, we need a sample to balance the thread expiry
635 * times (in arm_timer). With an absolute time, we must
636 * check if it's already passed. In short, we need a sample.
638 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
639 cpu_clock_sample(timer->it_clock, p, &val);
641 cpu_timer_sample_group(timer->it_clock, p, &val);
645 if (old_expires == 0) {
646 old->it_value.tv_sec = 0;
647 old->it_value.tv_nsec = 0;
650 * Update the timer in case it has
651 * overrun already. If it has,
652 * we'll report it as having overrun
653 * and with the next reloaded timer
654 * already ticking, though we are
655 * swallowing that pending
656 * notification here to install the
659 bump_cpu_timer(timer, val);
660 if (val < timer->it.cpu.expires) {
661 old_expires = timer->it.cpu.expires - val;
662 sample_to_timespec(timer->it_clock,
666 old->it_value.tv_nsec = 1;
667 old->it_value.tv_sec = 0;
674 * We are colliding with the timer actually firing.
675 * Punt after filling in the timer's old value, and
676 * disable this firing since we are already reporting
677 * it as an overrun (thanks to bump_cpu_timer above).
679 unlock_task_sighand(p, &flags);
683 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
688 * Install the new expiry time (or zero).
689 * For a timer with no notification action, we don't actually
690 * arm the timer (we'll just fake it for timer_gettime).
692 timer->it.cpu.expires = new_expires;
693 if (new_expires != 0 && val < new_expires) {
697 unlock_task_sighand(p, &flags);
699 * Install the new reload setting, and
700 * set up the signal and overrun bookkeeping.
702 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
706 * This acts as a modification timestamp for the timer,
707 * so any automatic reload attempt will punt on seeing
708 * that we have reset the timer manually.
710 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
712 timer->it_overrun_last = 0;
713 timer->it_overrun = -1;
715 if (new_expires != 0 && !(val < new_expires)) {
717 * The designated time already passed, so we notify
718 * immediately, even if the thread never runs to
719 * accumulate more time on this clock.
721 cpu_timer_fire(timer);
727 sample_to_timespec(timer->it_clock,
728 old_incr, &old->it_interval);
734 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
736 unsigned long long now;
737 struct task_struct *p = timer->it.cpu.task;
739 WARN_ON_ONCE(p == NULL);
742 * Easy part: convert the reload time.
744 sample_to_timespec(timer->it_clock,
745 timer->it.cpu.incr, &itp->it_interval);
747 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
748 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
753 * Sample the clock to take the difference with the expiry time.
755 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
756 cpu_clock_sample(timer->it_clock, p, &now);
758 struct sighand_struct *sighand;
762 * Protect against sighand release/switch in exit/exec and
763 * also make timer sampling safe if it ends up calling
764 * thread_group_cputime().
766 sighand = lock_task_sighand(p, &flags);
767 if (unlikely(sighand == NULL)) {
769 * The process has been reaped.
770 * We can't even collect a sample any more.
771 * Call the timer disarmed, nothing else to do.
773 timer->it.cpu.expires = 0;
774 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
778 cpu_timer_sample_group(timer->it_clock, p, &now);
779 unlock_task_sighand(p, &flags);
783 if (now < timer->it.cpu.expires) {
784 sample_to_timespec(timer->it_clock,
785 timer->it.cpu.expires - now,
789 * The timer should have expired already, but the firing
790 * hasn't taken place yet. Say it's just about to expire.
792 itp->it_value.tv_nsec = 1;
793 itp->it_value.tv_sec = 0;
797 static unsigned long long
798 check_timers_list(struct list_head *timers,
799 struct list_head *firing,
800 unsigned long long curr)
804 while (!list_empty(timers)) {
805 struct cpu_timer_list *t;
807 t = list_first_entry(timers, struct cpu_timer_list, entry);
809 if (!--maxfire || curr < t->expires)
813 list_move_tail(&t->entry, firing);
820 * Check for any per-thread CPU timers that have fired and move them off
821 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
822 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
824 static void check_thread_timers(struct task_struct *tsk,
825 struct list_head *firing)
827 struct list_head *timers = tsk->cpu_timers;
828 struct signal_struct *const sig = tsk->signal;
829 struct task_cputime *tsk_expires = &tsk->cputime_expires;
830 unsigned long long expires;
834 * If cputime_expires is zero, then there are no active
835 * per thread CPU timers.
837 if (task_cputime_zero(&tsk->cputime_expires))
840 expires = check_timers_list(timers, firing, prof_ticks(tsk));
841 tsk_expires->prof_exp = expires_to_cputime(expires);
843 expires = check_timers_list(++timers, firing, virt_ticks(tsk));
844 tsk_expires->virt_exp = expires_to_cputime(expires);
846 tsk_expires->sched_exp = check_timers_list(++timers, firing,
847 tsk->se.sum_exec_runtime);
850 * Check for the special case thread timers.
852 soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
853 if (soft != RLIM_INFINITY) {
855 READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
857 if (hard != RLIM_INFINITY &&
858 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
860 * At the hard limit, we just die.
861 * No need to calculate anything else now.
863 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
866 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
868 * At the soft limit, send a SIGXCPU every second.
871 soft += USEC_PER_SEC;
872 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
875 "RT Watchdog Timeout: %s[%d]\n",
876 tsk->comm, task_pid_nr(tsk));
877 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
880 if (task_cputime_zero(tsk_expires))
881 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
884 static inline void stop_process_timers(struct signal_struct *sig)
886 struct thread_group_cputimer *cputimer = &sig->cputimer;
888 /* Turn off cputimer->running. This is done without locking. */
889 WRITE_ONCE(cputimer->running, false);
890 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
893 static u32 onecputick;
895 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
896 unsigned long long *expires,
897 unsigned long long cur_time, int signo)
902 if (cur_time >= it->expires) {
904 it->expires += it->incr;
905 it->error += it->incr_error;
906 if (it->error >= onecputick) {
907 it->expires -= cputime_one_jiffy;
908 it->error -= onecputick;
914 trace_itimer_expire(signo == SIGPROF ?
915 ITIMER_PROF : ITIMER_VIRTUAL,
916 tsk->signal->leader_pid, cur_time);
917 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
920 if (it->expires && (!*expires || it->expires < *expires)) {
921 *expires = it->expires;
926 * Check for any per-thread CPU timers that have fired and move them
927 * off the tsk->*_timers list onto the firing list. Per-thread timers
928 * have already been taken off.
930 static void check_process_timers(struct task_struct *tsk,
931 struct list_head *firing)
933 struct signal_struct *const sig = tsk->signal;
934 unsigned long long utime, ptime, virt_expires, prof_expires;
935 unsigned long long sum_sched_runtime, sched_expires;
936 struct list_head *timers = sig->cpu_timers;
937 struct task_cputime cputime;
941 * If cputimer is not running, then there are no active
942 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
944 if (!READ_ONCE(tsk->signal->cputimer.running))
948 * Signify that a thread is checking for process timers.
949 * Write access to this field is protected by the sighand lock.
951 sig->cputimer.checking_timer = true;
954 * Collect the current process totals.
956 thread_group_cputimer(tsk, &cputime);
957 utime = cputime_to_expires(cputime.utime);
958 ptime = utime + cputime_to_expires(cputime.stime);
959 sum_sched_runtime = cputime.sum_exec_runtime;
961 prof_expires = check_timers_list(timers, firing, ptime);
962 virt_expires = check_timers_list(++timers, firing, utime);
963 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
966 * Check for the special case process timers.
968 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
970 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
972 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
973 if (soft != RLIM_INFINITY) {
974 unsigned long psecs = cputime_to_secs(ptime);
976 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
980 * At the hard limit, we just die.
981 * No need to calculate anything else now.
983 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
988 * At the soft limit, send a SIGXCPU every second.
990 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
993 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
996 x = secs_to_cputime(soft);
997 if (!prof_expires || x < prof_expires) {
1002 sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
1003 sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
1004 sig->cputime_expires.sched_exp = sched_expires;
1005 if (task_cputime_zero(&sig->cputime_expires))
1006 stop_process_timers(sig);
1008 sig->cputimer.checking_timer = false;
1012 * This is called from the signal code (via do_schedule_next_timer)
1013 * when the last timer signal was delivered and we have to reload the timer.
1015 void posix_cpu_timer_schedule(struct k_itimer *timer)
1017 struct sighand_struct *sighand;
1018 unsigned long flags;
1019 struct task_struct *p = timer->it.cpu.task;
1020 unsigned long long now;
1022 WARN_ON_ONCE(p == NULL);
1025 * Fetch the current sample and update the timer's expiry time.
1027 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1028 cpu_clock_sample(timer->it_clock, p, &now);
1029 bump_cpu_timer(timer, now);
1030 if (unlikely(p->exit_state))
1033 /* Protect timer list r/w in arm_timer() */
1034 sighand = lock_task_sighand(p, &flags);
1039 * Protect arm_timer() and timer sampling in case of call to
1040 * thread_group_cputime().
1042 sighand = lock_task_sighand(p, &flags);
1043 if (unlikely(sighand == NULL)) {
1045 * The process has been reaped.
1046 * We can't even collect a sample any more.
1048 timer->it.cpu.expires = 0;
1050 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1051 unlock_task_sighand(p, &flags);
1052 /* Optimizations: if the process is dying, no need to rearm */
1055 cpu_timer_sample_group(timer->it_clock, p, &now);
1056 bump_cpu_timer(timer, now);
1057 /* Leave the sighand locked for the call below. */
1061 * Now re-arm for the new expiry time.
1063 WARN_ON_ONCE(!irqs_disabled());
1065 unlock_task_sighand(p, &flags);
1068 timer->it_overrun_last = timer->it_overrun;
1069 timer->it_overrun = -1;
1070 ++timer->it_requeue_pending;
1074 * task_cputime_expired - Compare two task_cputime entities.
1076 * @sample: The task_cputime structure to be checked for expiration.
1077 * @expires: Expiration times, against which @sample will be checked.
1079 * Checks @sample against @expires to see if any field of @sample has expired.
1080 * Returns true if any field of the former is greater than the corresponding
1081 * field of the latter if the latter field is set. Otherwise returns false.
1083 static inline int task_cputime_expired(const struct task_cputime *sample,
1084 const struct task_cputime *expires)
1086 if (expires->utime && sample->utime >= expires->utime)
1088 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1090 if (expires->sum_exec_runtime != 0 &&
1091 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1097 * fastpath_timer_check - POSIX CPU timers fast path.
1099 * @tsk: The task (thread) being checked.
1101 * Check the task and thread group timers. If both are zero (there are no
1102 * timers set) return false. Otherwise snapshot the task and thread group
1103 * timers and compare them with the corresponding expiration times. Return
1104 * true if a timer has expired, else return false.
1106 static inline int fastpath_timer_check(struct task_struct *tsk)
1108 struct signal_struct *sig;
1110 if (!task_cputime_zero(&tsk->cputime_expires)) {
1111 struct task_cputime task_sample;
1113 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1114 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1115 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1121 * Check if thread group timers expired when the cputimer is
1122 * running and no other thread in the group is already checking
1123 * for thread group cputimers. These fields are read without the
1124 * sighand lock. However, this is fine because this is meant to
1125 * be a fastpath heuristic to determine whether we should try to
1126 * acquire the sighand lock to check/handle timers.
1128 * In the worst case scenario, if 'running' or 'checking_timer' gets
1129 * set but the current thread doesn't see the change yet, we'll wait
1130 * until the next thread in the group gets a scheduler interrupt to
1131 * handle the timer. This isn't an issue in practice because these
1132 * types of delays with signals actually getting sent are expected.
1134 if (READ_ONCE(sig->cputimer.running) &&
1135 !READ_ONCE(sig->cputimer.checking_timer)) {
1136 struct task_cputime group_sample;
1138 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1140 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1148 * This is called from the timer interrupt handler. The irq handler has
1149 * already updated our counts. We need to check if any timers fire now.
1150 * Interrupts are disabled.
1152 void run_posix_cpu_timers(struct task_struct *tsk)
1155 struct k_itimer *timer, *next;
1156 unsigned long flags;
1158 WARN_ON_ONCE(!irqs_disabled());
1161 * The fast path checks that there are no expired thread or thread
1162 * group timers. If that's so, just return.
1164 if (!fastpath_timer_check(tsk))
1167 if (!lock_task_sighand(tsk, &flags))
1170 * Here we take off tsk->signal->cpu_timers[N] and
1171 * tsk->cpu_timers[N] all the timers that are firing, and
1172 * put them on the firing list.
1174 check_thread_timers(tsk, &firing);
1176 check_process_timers(tsk, &firing);
1179 * We must release these locks before taking any timer's lock.
1180 * There is a potential race with timer deletion here, as the
1181 * siglock now protects our private firing list. We have set
1182 * the firing flag in each timer, so that a deletion attempt
1183 * that gets the timer lock before we do will give it up and
1184 * spin until we've taken care of that timer below.
1186 unlock_task_sighand(tsk, &flags);
1189 * Now that all the timers on our list have the firing flag,
1190 * no one will touch their list entries but us. We'll take
1191 * each timer's lock before clearing its firing flag, so no
1192 * timer call will interfere.
1194 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1197 spin_lock(&timer->it_lock);
1198 list_del_init(&timer->it.cpu.entry);
1199 cpu_firing = timer->it.cpu.firing;
1200 timer->it.cpu.firing = 0;
1202 * The firing flag is -1 if we collided with a reset
1203 * of the timer, which already reported this
1204 * almost-firing as an overrun. So don't generate an event.
1206 if (likely(cpu_firing >= 0))
1207 cpu_timer_fire(timer);
1208 spin_unlock(&timer->it_lock);
1213 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1214 * The tsk->sighand->siglock must be held by the caller.
1216 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1217 cputime_t *newval, cputime_t *oldval)
1219 unsigned long long now;
1221 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1222 cpu_timer_sample_group(clock_idx, tsk, &now);
1226 * We are setting itimer. The *oldval is absolute and we update
1227 * it to be relative, *newval argument is relative and we update
1228 * it to be absolute.
1231 if (*oldval <= now) {
1232 /* Just about to fire. */
1233 *oldval = cputime_one_jiffy;
1245 * Update expiration cache if we are the earliest timer, or eventually
1246 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1248 switch (clock_idx) {
1250 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1251 tsk->signal->cputime_expires.prof_exp = *newval;
1254 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1255 tsk->signal->cputime_expires.virt_exp = *newval;
1259 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1262 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1263 struct timespec *rqtp, struct itimerspec *it)
1265 struct k_itimer timer;
1269 * Set up a temporary timer and then wait for it to go off.
1271 memset(&timer, 0, sizeof timer);
1272 spin_lock_init(&timer.it_lock);
1273 timer.it_clock = which_clock;
1274 timer.it_overrun = -1;
1275 error = posix_cpu_timer_create(&timer);
1276 timer.it_process = current;
1278 static struct itimerspec zero_it;
1280 memset(it, 0, sizeof *it);
1281 it->it_value = *rqtp;
1283 spin_lock_irq(&timer.it_lock);
1284 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1286 spin_unlock_irq(&timer.it_lock);
1290 while (!signal_pending(current)) {
1291 if (timer.it.cpu.expires == 0) {
1293 * Our timer fired and was reset, below
1294 * deletion can not fail.
1296 posix_cpu_timer_del(&timer);
1297 spin_unlock_irq(&timer.it_lock);
1302 * Block until cpu_timer_fire (or a signal) wakes us.
1304 __set_current_state(TASK_INTERRUPTIBLE);
1305 spin_unlock_irq(&timer.it_lock);
1307 spin_lock_irq(&timer.it_lock);
1311 * We were interrupted by a signal.
1313 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1314 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1317 * Timer is now unarmed, deletion can not fail.
1319 posix_cpu_timer_del(&timer);
1321 spin_unlock_irq(&timer.it_lock);
1323 while (error == TIMER_RETRY) {
1325 * We need to handle case when timer was or is in the
1326 * middle of firing. In other cases we already freed
1329 spin_lock_irq(&timer.it_lock);
1330 error = posix_cpu_timer_del(&timer);
1331 spin_unlock_irq(&timer.it_lock);
1334 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1336 * It actually did fire already.
1341 error = -ERESTART_RESTARTBLOCK;
1347 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1349 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1350 struct timespec *rqtp, struct timespec __user *rmtp)
1352 struct restart_block *restart_block = ¤t->restart_block;
1353 struct itimerspec it;
1357 * Diagnose required errors first.
1359 if (CPUCLOCK_PERTHREAD(which_clock) &&
1360 (CPUCLOCK_PID(which_clock) == 0 ||
1361 CPUCLOCK_PID(which_clock) == current->pid))
1364 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1366 if (error == -ERESTART_RESTARTBLOCK) {
1368 if (flags & TIMER_ABSTIME)
1369 return -ERESTARTNOHAND;
1371 * Report back to the user the time still remaining.
1373 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1376 restart_block->fn = posix_cpu_nsleep_restart;
1377 restart_block->nanosleep.clockid = which_clock;
1378 restart_block->nanosleep.rmtp = rmtp;
1379 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1384 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1386 clockid_t which_clock = restart_block->nanosleep.clockid;
1388 struct itimerspec it;
1391 t = ns_to_timespec(restart_block->nanosleep.expires);
1393 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1395 if (error == -ERESTART_RESTARTBLOCK) {
1396 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1398 * Report back to the user the time still remaining.
1400 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1403 restart_block->nanosleep.expires = timespec_to_ns(&t);
1409 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1410 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1412 static int process_cpu_clock_getres(const clockid_t which_clock,
1413 struct timespec *tp)
1415 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1417 static int process_cpu_clock_get(const clockid_t which_clock,
1418 struct timespec *tp)
1420 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1422 static int process_cpu_timer_create(struct k_itimer *timer)
1424 timer->it_clock = PROCESS_CLOCK;
1425 return posix_cpu_timer_create(timer);
1427 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1428 struct timespec *rqtp,
1429 struct timespec __user *rmtp)
1431 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1433 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1437 static int thread_cpu_clock_getres(const clockid_t which_clock,
1438 struct timespec *tp)
1440 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1442 static int thread_cpu_clock_get(const clockid_t which_clock,
1443 struct timespec *tp)
1445 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1447 static int thread_cpu_timer_create(struct k_itimer *timer)
1449 timer->it_clock = THREAD_CLOCK;
1450 return posix_cpu_timer_create(timer);
1453 struct k_clock clock_posix_cpu = {
1454 .clock_getres = posix_cpu_clock_getres,
1455 .clock_set = posix_cpu_clock_set,
1456 .clock_get = posix_cpu_clock_get,
1457 .timer_create = posix_cpu_timer_create,
1458 .nsleep = posix_cpu_nsleep,
1459 .nsleep_restart = posix_cpu_nsleep_restart,
1460 .timer_set = posix_cpu_timer_set,
1461 .timer_del = posix_cpu_timer_del,
1462 .timer_get = posix_cpu_timer_get,
1465 static __init int init_posix_cpu_timers(void)
1467 struct k_clock process = {
1468 .clock_getres = process_cpu_clock_getres,
1469 .clock_get = process_cpu_clock_get,
1470 .timer_create = process_cpu_timer_create,
1471 .nsleep = process_cpu_nsleep,
1472 .nsleep_restart = process_cpu_nsleep_restart,
1474 struct k_clock thread = {
1475 .clock_getres = thread_cpu_clock_getres,
1476 .clock_get = thread_cpu_clock_get,
1477 .timer_create = thread_cpu_timer_create,
1481 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1482 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1484 cputime_to_timespec(cputime_one_jiffy, &ts);
1485 onecputick = ts.tv_nsec;
1486 WARN_ON(ts.tv_sec != 0);
1490 __initcall(init_posix_cpu_timers);