1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
7 #include <linux/cputime.h>
10 #include <asm/paravirt.h>
14 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
17 * There are no locks covering percpu hardirq/softirq time.
18 * They are only modified in vtime_account, on corresponding CPU
19 * with interrupts disabled. So, writes are safe.
20 * They are read and saved off onto struct rq in update_rq_clock().
21 * This may result in other CPU reading this CPU's irq time and can
22 * race with irq/vtime_account on this CPU. We would either get old
23 * or new value with a side effect of accounting a slice of irq time to wrong
24 * task when irq is in progress while we read rq->clock. That is a worthy
25 * compromise in place of having locks on each irq in account_system_time.
27 DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
29 static int sched_clock_irqtime;
31 void enable_sched_clock_irqtime(void)
33 sched_clock_irqtime = 1;
36 void disable_sched_clock_irqtime(void)
38 sched_clock_irqtime = 0;
42 * Called before incrementing preempt_count on {soft,}irq_enter
43 * and before decrementing preempt_count on {soft,}irq_exit.
45 void irqtime_account_irq(struct task_struct *curr)
47 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
48 u64 *cpustat = kcpustat_this_cpu->cpustat;
52 if (!sched_clock_irqtime)
55 cpu = smp_processor_id();
56 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
57 irqtime->irq_start_time += delta;
59 u64_stats_update_begin(&irqtime->sync);
61 * We do not account for softirq time from ksoftirqd here.
62 * We want to continue accounting softirq time to ksoftirqd thread
63 * in that case, so as not to confuse scheduler with a special task
64 * that do not consume any time, but still wants to run.
66 if (hardirq_count()) {
67 cpustat[CPUTIME_IRQ] += delta;
68 irqtime->tick_delta += delta;
69 } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
70 cpustat[CPUTIME_SOFTIRQ] += delta;
71 irqtime->tick_delta += delta;
74 u64_stats_update_end(&irqtime->sync);
76 EXPORT_SYMBOL_GPL(irqtime_account_irq);
78 static u64 irqtime_tick_accounted(u64 maxtime)
80 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
83 delta = min(irqtime->tick_delta, maxtime);
84 irqtime->tick_delta -= delta;
89 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
91 #define sched_clock_irqtime (0)
93 static u64 irqtime_tick_accounted(u64 dummy)
98 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
100 static inline void task_group_account_field(struct task_struct *p, int index,
104 * Since all updates are sure to touch the root cgroup, we
105 * get ourselves ahead and touch it first. If the root cgroup
106 * is the only cgroup, then nothing else should be necessary.
109 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
111 cpuacct_account_field(p, index, tmp);
115 * Account user cpu time to a process.
116 * @p: the process that the cpu time gets accounted to
117 * @cputime: the cpu time spent in user space since the last update
119 void account_user_time(struct task_struct *p, u64 cputime)
123 /* Add user time to process. */
125 account_group_user_time(p, cputime);
127 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
129 /* Add user time to cpustat. */
130 task_group_account_field(p, index, cputime);
132 /* Account for user time used */
133 acct_account_cputime(p);
137 * Account guest cpu time to a process.
138 * @p: the process that the cpu time gets accounted to
139 * @cputime: the cpu time spent in virtual machine since the last update
141 void account_guest_time(struct task_struct *p, u64 cputime)
143 u64 *cpustat = kcpustat_this_cpu->cpustat;
145 /* Add guest time to process. */
147 account_group_user_time(p, cputime);
150 /* Add guest time to cpustat. */
151 if (task_nice(p) > 0) {
152 cpustat[CPUTIME_NICE] += cputime;
153 cpustat[CPUTIME_GUEST_NICE] += cputime;
155 cpustat[CPUTIME_USER] += cputime;
156 cpustat[CPUTIME_GUEST] += cputime;
161 * Account system cpu time to a process and desired cpustat field
162 * @p: the process that the cpu time gets accounted to
163 * @cputime: the cpu time spent in kernel space since the last update
164 * @index: pointer to cpustat field that has to be updated
166 void account_system_index_time(struct task_struct *p,
167 u64 cputime, enum cpu_usage_stat index)
169 /* Add system time to process. */
171 account_group_system_time(p, cputime);
173 /* Add system time to cpustat. */
174 task_group_account_field(p, index, cputime);
176 /* Account for system time used */
177 acct_account_cputime(p);
181 * Account system cpu time to a process.
182 * @p: the process that the cpu time gets accounted to
183 * @hardirq_offset: the offset to subtract from hardirq_count()
184 * @cputime: the cpu time spent in kernel space since the last update
186 void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
190 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
191 account_guest_time(p, cputime);
195 if (hardirq_count() - hardirq_offset)
197 else if (in_serving_softirq())
198 index = CPUTIME_SOFTIRQ;
200 index = CPUTIME_SYSTEM;
202 account_system_index_time(p, cputime, index);
206 * Account for involuntary wait time.
207 * @cputime: the cpu time spent in involuntary wait
209 void account_steal_time(u64 cputime)
211 u64 *cpustat = kcpustat_this_cpu->cpustat;
213 cpustat[CPUTIME_STEAL] += cputime;
217 * Account for idle time.
218 * @cputime: the cpu time spent in idle wait
220 void account_idle_time(u64 cputime)
222 u64 *cpustat = kcpustat_this_cpu->cpustat;
223 struct rq *rq = this_rq();
225 if (atomic_read(&rq->nr_iowait) > 0)
226 cpustat[CPUTIME_IOWAIT] += cputime;
228 cpustat[CPUTIME_IDLE] += cputime;
232 * When a guest is interrupted for a longer amount of time, missed clock
233 * ticks are not redelivered later. Due to that, this function may on
234 * occasion account more time than the calling functions think elapsed.
236 static __always_inline u64 steal_account_process_time(u64 maxtime)
238 #ifdef CONFIG_PARAVIRT
239 if (static_key_false(¶virt_steal_enabled)) {
242 steal = paravirt_steal_clock(smp_processor_id());
243 steal -= this_rq()->prev_steal_time;
244 steal = min(steal, maxtime);
245 account_steal_time(steal);
246 this_rq()->prev_steal_time += steal;
255 * Account how much elapsed time was spent in steal, irq, or softirq time.
257 static inline u64 account_other_time(u64 max)
261 /* Shall be converted to a lockdep-enabled lightweight check */
262 WARN_ON_ONCE(!irqs_disabled());
264 accounted = steal_account_process_time(max);
267 accounted += irqtime_tick_accounted(max - accounted);
273 static inline u64 read_sum_exec_runtime(struct task_struct *t)
275 return t->se.sum_exec_runtime;
278 static u64 read_sum_exec_runtime(struct task_struct *t)
284 rq = task_rq_lock(t, &rf);
285 ns = t->se.sum_exec_runtime;
286 task_rq_unlock(rq, t, &rf);
293 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
294 * tasks (sum on group iteration) belonging to @tsk's group.
296 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
298 struct signal_struct *sig = tsk->signal;
300 struct task_struct *t;
301 unsigned int seq, nextseq;
305 * Update current task runtime to account pending time since last
306 * scheduler action or thread_group_cputime() call. This thread group
307 * might have other running tasks on different CPUs, but updating
308 * their runtime can affect syscall performance, so we skip account
309 * those pending times and rely only on values updated on tick or
310 * other scheduler action.
312 if (same_thread_group(current, tsk))
313 (void) task_sched_runtime(current);
316 /* Attempt a lockless read on the first round. */
320 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
321 times->utime = sig->utime;
322 times->stime = sig->stime;
323 times->sum_exec_runtime = sig->sum_sched_runtime;
325 for_each_thread(tsk, t) {
326 task_cputime(t, &utime, &stime);
327 times->utime += utime;
328 times->stime += stime;
329 times->sum_exec_runtime += read_sum_exec_runtime(t);
331 /* If lockless access failed, take the lock. */
333 } while (need_seqretry(&sig->stats_lock, seq));
334 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
338 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
340 * Account a tick to a process and cpustat
341 * @p: the process that the cpu time gets accounted to
342 * @user_tick: is the tick from userspace
343 * @rq: the pointer to rq
345 * Tick demultiplexing follows the order
346 * - pending hardirq update
347 * - pending softirq update
351 * - check for guest_time
352 * - else account as system_time
354 * Check for hardirq is done both for system and user time as there is
355 * no timer going off while we are on hardirq and hence we may never get an
356 * opportunity to update it solely in system time.
357 * p->stime and friends are only updated on system time and not on irq
358 * softirq as those do not count in task exec_runtime any more.
360 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
361 struct rq *rq, int ticks)
363 u64 other, cputime = TICK_NSEC * ticks;
366 * When returning from idle, many ticks can get accounted at
367 * once, including some ticks of steal, irq, and softirq time.
368 * Subtract those ticks from the amount of time accounted to
369 * idle, or potentially user or system time. Due to rounding,
370 * other time can exceed ticks occasionally.
372 other = account_other_time(ULONG_MAX);
373 if (other >= cputime)
378 if (this_cpu_ksoftirqd() == p) {
380 * ksoftirqd time do not get accounted in cpu_softirq_time.
381 * So, we have to handle it separately here.
382 * Also, p->stime needs to be updated for ksoftirqd.
384 account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
385 } else if (user_tick) {
386 account_user_time(p, cputime);
387 } else if (p == rq->idle) {
388 account_idle_time(cputime);
389 } else if (p->flags & PF_VCPU) { /* System time or guest time */
390 account_guest_time(p, cputime);
392 account_system_index_time(p, cputime, CPUTIME_SYSTEM);
396 static void irqtime_account_idle_ticks(int ticks)
398 struct rq *rq = this_rq();
400 irqtime_account_process_tick(current, 0, rq, ticks);
402 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
403 static inline void irqtime_account_idle_ticks(int ticks) {}
404 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
405 struct rq *rq, int nr_ticks) {}
406 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
409 * Use precise platform statistics if available:
411 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
413 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
414 void vtime_common_task_switch(struct task_struct *prev)
416 if (is_idle_task(prev))
417 vtime_account_idle(prev);
419 vtime_account_system(prev);
422 arch_vtime_task_switch(prev);
426 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
429 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
431 * Archs that account the whole time spent in the idle task
432 * (outside irq) as idle time can rely on this and just implement
433 * vtime_account_system() and vtime_account_idle(). Archs that
434 * have other meaning of the idle time (s390 only includes the
435 * time spent by the CPU when it's in low power mode) must override
438 #ifndef __ARCH_HAS_VTIME_ACCOUNT
439 void vtime_account_irq_enter(struct task_struct *tsk)
441 if (!in_interrupt() && is_idle_task(tsk))
442 vtime_account_idle(tsk);
444 vtime_account_system(tsk);
446 EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
447 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
449 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
454 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
456 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
458 struct task_cputime cputime;
460 thread_group_cputime(p, &cputime);
465 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
467 * Account a single tick of cpu time.
468 * @p: the process that the cpu time gets accounted to
469 * @user_tick: indicates if the tick is a user or a system tick
471 void account_process_tick(struct task_struct *p, int user_tick)
474 struct rq *rq = this_rq();
476 if (vtime_accounting_cpu_enabled())
479 if (sched_clock_irqtime) {
480 irqtime_account_process_tick(p, user_tick, rq, 1);
485 steal = steal_account_process_time(ULONG_MAX);
487 if (steal >= cputime)
493 account_user_time(p, cputime);
494 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
495 account_system_time(p, HARDIRQ_OFFSET, cputime);
497 account_idle_time(cputime);
501 * Account multiple ticks of idle time.
502 * @ticks: number of stolen ticks
504 void account_idle_ticks(unsigned long ticks)
508 if (sched_clock_irqtime) {
509 irqtime_account_idle_ticks(ticks);
513 cputime = ticks * TICK_NSEC;
514 steal = steal_account_process_time(ULONG_MAX);
516 if (steal >= cputime)
520 account_idle_time(cputime);
524 * Perform (stime * rtime) / total, but avoid multiplication overflow by
525 * loosing precision when the numbers are big.
527 static u64 scale_stime(u64 stime, u64 rtime, u64 total)
532 /* Make sure "rtime" is the bigger of stime/rtime */
536 /* Make sure 'total' fits in 32 bits */
540 /* Does rtime (and thus stime) fit in 32 bits? */
544 /* Can we just balance rtime/stime rather than dropping bits? */
548 /* We can grow stime and shrink rtime and try to make them both fit */
554 /* We drop from rtime, it has more bits than stime */
560 * Make sure gcc understands that this is a 32x32->64 multiply,
561 * followed by a 64/32->64 divide.
563 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
568 * Adjust tick based cputime random precision against scheduler runtime
571 * Tick based cputime accounting depend on random scheduling timeslices of a
572 * task to be interrupted or not by the timer. Depending on these
573 * circumstances, the number of these interrupts may be over or
574 * under-optimistic, matching the real user and system cputime with a variable
577 * Fix this by scaling these tick based values against the total runtime
578 * accounted by the CFS scheduler.
580 * This code provides the following guarantees:
582 * stime + utime == rtime
583 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
585 * Assuming that rtime_i+1 >= rtime_i.
587 static void cputime_adjust(struct task_cputime *curr,
588 struct prev_cputime *prev,
591 u64 rtime, stime, utime;
594 /* Serialize concurrent callers such that we can honour our guarantees */
595 raw_spin_lock_irqsave(&prev->lock, flags);
596 rtime = curr->sum_exec_runtime;
599 * This is possible under two circumstances:
600 * - rtime isn't monotonic after all (a bug);
601 * - we got reordered by the lock.
603 * In both cases this acts as a filter such that the rest of the code
604 * can assume it is monotonic regardless of anything else.
606 if (prev->stime + prev->utime >= rtime)
613 * If either stime or both stime and utime are 0, assume all runtime is
614 * userspace. Once a task gets some ticks, the monotonicy code at
615 * 'update' will ensure things converge to the observed ratio.
627 stime = scale_stime(stime, rtime, stime + utime);
631 * Make sure stime doesn't go backwards; this preserves monotonicity
632 * for utime because rtime is monotonic.
634 * utime_i+1 = rtime_i+1 - stime_i
635 * = rtime_i+1 - (rtime_i - utime_i)
636 * = (rtime_i+1 - rtime_i) + utime_i
639 if (stime < prev->stime)
641 utime = rtime - stime;
644 * Make sure utime doesn't go backwards; this still preserves
645 * monotonicity for stime, analogous argument to above.
647 if (utime < prev->utime) {
649 stime = rtime - utime;
657 raw_spin_unlock_irqrestore(&prev->lock, flags);
660 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
662 struct task_cputime cputime = {
663 .sum_exec_runtime = p->se.sum_exec_runtime,
666 task_cputime(p, &cputime.utime, &cputime.stime);
667 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
669 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
671 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
673 struct task_cputime cputime;
675 thread_group_cputime(p, &cputime);
676 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
678 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
680 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
681 static u64 vtime_delta(struct task_struct *tsk)
683 unsigned long now = READ_ONCE(jiffies);
685 if (time_before(now, (unsigned long)tsk->vtime_snap))
688 return jiffies_to_nsecs(now - tsk->vtime_snap);
691 static u64 get_vtime_delta(struct task_struct *tsk)
693 unsigned long now = READ_ONCE(jiffies);
697 * Unlike tick based timing, vtime based timing never has lost
698 * ticks, and no need for steal time accounting to make up for
699 * lost ticks. Vtime accounts a rounded version of actual
700 * elapsed time. Limit account_other_time to prevent rounding
701 * errors from causing elapsed vtime to go negative.
703 delta = jiffies_to_nsecs(now - tsk->vtime_snap);
704 other = account_other_time(delta);
705 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
706 tsk->vtime_snap = now;
708 return delta - other;
711 static void __vtime_account_system(struct task_struct *tsk)
713 account_system_time(tsk, irq_count(), get_vtime_delta(tsk));
716 void vtime_account_system(struct task_struct *tsk)
718 if (!vtime_delta(tsk))
721 write_seqcount_begin(&tsk->vtime_seqcount);
722 __vtime_account_system(tsk);
723 write_seqcount_end(&tsk->vtime_seqcount);
726 void vtime_account_user(struct task_struct *tsk)
728 write_seqcount_begin(&tsk->vtime_seqcount);
729 tsk->vtime_snap_whence = VTIME_SYS;
730 if (vtime_delta(tsk))
731 account_user_time(tsk, get_vtime_delta(tsk));
732 write_seqcount_end(&tsk->vtime_seqcount);
735 void vtime_user_enter(struct task_struct *tsk)
737 write_seqcount_begin(&tsk->vtime_seqcount);
738 if (vtime_delta(tsk))
739 __vtime_account_system(tsk);
740 tsk->vtime_snap_whence = VTIME_USER;
741 write_seqcount_end(&tsk->vtime_seqcount);
744 void vtime_guest_enter(struct task_struct *tsk)
747 * The flags must be updated under the lock with
748 * the vtime_snap flush and update.
749 * That enforces a right ordering and update sequence
750 * synchronization against the reader (task_gtime())
751 * that can thus safely catch up with a tickless delta.
753 write_seqcount_begin(&tsk->vtime_seqcount);
754 if (vtime_delta(tsk))
755 __vtime_account_system(tsk);
756 current->flags |= PF_VCPU;
757 write_seqcount_end(&tsk->vtime_seqcount);
759 EXPORT_SYMBOL_GPL(vtime_guest_enter);
761 void vtime_guest_exit(struct task_struct *tsk)
763 write_seqcount_begin(&tsk->vtime_seqcount);
764 __vtime_account_system(tsk);
765 current->flags &= ~PF_VCPU;
766 write_seqcount_end(&tsk->vtime_seqcount);
768 EXPORT_SYMBOL_GPL(vtime_guest_exit);
770 void vtime_account_idle(struct task_struct *tsk)
772 account_idle_time(get_vtime_delta(tsk));
775 void arch_vtime_task_switch(struct task_struct *prev)
777 write_seqcount_begin(&prev->vtime_seqcount);
778 prev->vtime_snap_whence = VTIME_INACTIVE;
779 write_seqcount_end(&prev->vtime_seqcount);
781 write_seqcount_begin(¤t->vtime_seqcount);
782 current->vtime_snap_whence = VTIME_SYS;
783 current->vtime_snap = jiffies;
784 write_seqcount_end(¤t->vtime_seqcount);
787 void vtime_init_idle(struct task_struct *t, int cpu)
791 local_irq_save(flags);
792 write_seqcount_begin(&t->vtime_seqcount);
793 t->vtime_snap_whence = VTIME_SYS;
794 t->vtime_snap = jiffies;
795 write_seqcount_end(&t->vtime_seqcount);
796 local_irq_restore(flags);
799 u64 task_gtime(struct task_struct *t)
804 if (!vtime_accounting_enabled())
808 seq = read_seqcount_begin(&t->vtime_seqcount);
811 if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU)
812 gtime += vtime_delta(t);
814 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
820 * Fetch cputime raw values from fields of task_struct and
821 * add up the pending nohz execution time since the last
824 void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
829 if (!vtime_accounting_enabled()) {
836 seq = read_seqcount_begin(&t->vtime_seqcount);
841 /* Task is sleeping, nothing to add */
842 if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t))
845 delta = vtime_delta(t);
848 * Task runs either in user or kernel space, add pending nohz time to
851 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU)
853 else if (t->vtime_snap_whence == VTIME_SYS)
855 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
857 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */