1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
9 #include <asm/paravirt.h>
13 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
16 * There are no locks covering percpu hardirq/softirq time.
17 * They are only modified in vtime_account, on corresponding CPU
18 * with interrupts disabled. So, writes are safe.
19 * They are read and saved off onto struct rq in update_rq_clock().
20 * This may result in other CPU reading this CPU's irq time and can
21 * race with irq/vtime_account on this CPU. We would either get old
22 * or new value with a side effect of accounting a slice of irq time to wrong
23 * task when irq is in progress while we read rq->clock. That is a worthy
24 * compromise in place of having locks on each irq in account_system_time.
26 DEFINE_PER_CPU(u64, cpu_hardirq_time);
27 DEFINE_PER_CPU(u64, cpu_softirq_time);
29 static DEFINE_PER_CPU(u64, irq_start_time);
30 static int sched_clock_irqtime;
32 void enable_sched_clock_irqtime(void)
34 sched_clock_irqtime = 1;
37 void disable_sched_clock_irqtime(void)
39 sched_clock_irqtime = 0;
43 DEFINE_PER_CPU(seqcount_t, irq_time_seq);
44 #endif /* CONFIG_64BIT */
47 * Called before incrementing preempt_count on {soft,}irq_enter
48 * and before decrementing preempt_count on {soft,}irq_exit.
50 void irqtime_account_irq(struct task_struct *curr)
56 if (!sched_clock_irqtime)
59 local_irq_save(flags);
61 cpu = smp_processor_id();
62 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
63 __this_cpu_add(irq_start_time, delta);
65 irq_time_write_begin();
67 * We do not account for softirq time from ksoftirqd here.
68 * We want to continue accounting softirq time to ksoftirqd thread
69 * in that case, so as not to confuse scheduler with a special task
70 * that do not consume any time, but still wants to run.
73 __this_cpu_add(cpu_hardirq_time, delta);
74 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
75 __this_cpu_add(cpu_softirq_time, delta);
78 local_irq_restore(flags);
80 EXPORT_SYMBOL_GPL(irqtime_account_irq);
82 static int irqtime_account_hi_update(void)
84 u64 *cpustat = kcpustat_this_cpu->cpustat;
89 local_irq_save(flags);
90 latest_ns = this_cpu_read(cpu_hardirq_time);
91 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
93 local_irq_restore(flags);
97 static int irqtime_account_si_update(void)
99 u64 *cpustat = kcpustat_this_cpu->cpustat;
104 local_irq_save(flags);
105 latest_ns = this_cpu_read(cpu_softirq_time);
106 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
108 local_irq_restore(flags);
112 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
114 #define sched_clock_irqtime (0)
116 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
118 static inline void task_group_account_field(struct task_struct *p, int index,
122 * Since all updates are sure to touch the root cgroup, we
123 * get ourselves ahead and touch it first. If the root cgroup
124 * is the only cgroup, then nothing else should be necessary.
127 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
129 cpuacct_account_field(p, index, tmp);
133 * Account user cpu time to a process.
134 * @p: the process that the cpu time gets accounted to
135 * @cputime: the cpu time spent in user space since the last update
136 * @cputime_scaled: cputime scaled by cpu frequency
138 void account_user_time(struct task_struct *p, cputime_t cputime,
139 cputime_t cputime_scaled)
143 /* Add user time to process. */
145 p->utimescaled += cputime_scaled;
146 account_group_user_time(p, cputime);
148 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
150 /* Add user time to cpustat. */
151 task_group_account_field(p, index, (__force u64) cputime);
153 /* Account for user time used */
154 acct_account_cputime(p);
158 * Account guest cpu time to a process.
159 * @p: the process that the cpu time gets accounted to
160 * @cputime: the cpu time spent in virtual machine since the last update
161 * @cputime_scaled: cputime scaled by cpu frequency
163 static void account_guest_time(struct task_struct *p, cputime_t cputime,
164 cputime_t cputime_scaled)
166 u64 *cpustat = kcpustat_this_cpu->cpustat;
168 /* Add guest time to process. */
170 p->utimescaled += cputime_scaled;
171 account_group_user_time(p, cputime);
174 /* Add guest time to cpustat. */
175 if (task_nice(p) > 0) {
176 cpustat[CPUTIME_NICE] += (__force u64) cputime;
177 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
179 cpustat[CPUTIME_USER] += (__force u64) cputime;
180 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
185 * Account system cpu time to a process and desired cpustat field
186 * @p: the process that the cpu time gets accounted to
187 * @cputime: the cpu time spent in kernel space since the last update
188 * @cputime_scaled: cputime scaled by cpu frequency
189 * @target_cputime64: pointer to cpustat field that has to be updated
192 void __account_system_time(struct task_struct *p, cputime_t cputime,
193 cputime_t cputime_scaled, int index)
195 /* Add system time to process. */
197 p->stimescaled += cputime_scaled;
198 account_group_system_time(p, cputime);
200 /* Add system time to cpustat. */
201 task_group_account_field(p, index, (__force u64) cputime);
203 /* Account for system time used */
204 acct_account_cputime(p);
208 * Account system cpu time to a process.
209 * @p: the process that the cpu time gets accounted to
210 * @hardirq_offset: the offset to subtract from hardirq_count()
211 * @cputime: the cpu time spent in kernel space since the last update
212 * @cputime_scaled: cputime scaled by cpu frequency
214 void account_system_time(struct task_struct *p, int hardirq_offset,
215 cputime_t cputime, cputime_t cputime_scaled)
219 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
220 account_guest_time(p, cputime, cputime_scaled);
224 if (hardirq_count() - hardirq_offset)
226 else if (in_serving_softirq())
227 index = CPUTIME_SOFTIRQ;
229 index = CPUTIME_SYSTEM;
231 __account_system_time(p, cputime, cputime_scaled, index);
235 * Account for involuntary wait time.
236 * @cputime: the cpu time spent in involuntary wait
238 void account_steal_time(cputime_t cputime)
240 u64 *cpustat = kcpustat_this_cpu->cpustat;
242 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
246 * Account for idle time.
247 * @cputime: the cpu time spent in idle wait
249 void account_idle_time(cputime_t cputime)
251 u64 *cpustat = kcpustat_this_cpu->cpustat;
252 struct rq *rq = this_rq();
254 if (atomic_read(&rq->nr_iowait) > 0)
255 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
257 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
260 static __always_inline bool steal_account_process_tick(void)
262 #ifdef CONFIG_PARAVIRT
263 if (static_key_false(¶virt_steal_enabled)) {
267 steal = paravirt_steal_clock(smp_processor_id());
268 steal -= this_rq()->prev_steal_time;
271 * cputime_t may be less precise than nsecs (eg: if it's
272 * based on jiffies). Lets cast the result to cputime
273 * granularity and account the rest on the next rounds.
275 steal_ct = nsecs_to_cputime(steal);
276 this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
278 account_steal_time(steal_ct);
286 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
287 * tasks (sum on group iteration) belonging to @tsk's group.
289 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
291 struct signal_struct *sig = tsk->signal;
292 cputime_t utime, stime;
293 struct task_struct *t;
294 unsigned int seq, nextseq;
298 /* Attempt a lockless read on the first round. */
302 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
303 times->utime = sig->utime;
304 times->stime = sig->stime;
305 times->sum_exec_runtime = sig->sum_sched_runtime;
307 for_each_thread(tsk, t) {
308 task_cputime(t, &utime, &stime);
309 times->utime += utime;
310 times->stime += stime;
311 times->sum_exec_runtime += task_sched_runtime(t);
313 /* If lockless access failed, take the lock. */
315 } while (need_seqretry(&sig->stats_lock, seq));
316 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
320 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
322 * Account a tick to a process and cpustat
323 * @p: the process that the cpu time gets accounted to
324 * @user_tick: is the tick from userspace
325 * @rq: the pointer to rq
327 * Tick demultiplexing follows the order
328 * - pending hardirq update
329 * - pending softirq update
333 * - check for guest_time
334 * - else account as system_time
336 * Check for hardirq is done both for system and user time as there is
337 * no timer going off while we are on hardirq and hence we may never get an
338 * opportunity to update it solely in system time.
339 * p->stime and friends are only updated on system time and not on irq
340 * softirq as those do not count in task exec_runtime any more.
342 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
343 struct rq *rq, int ticks)
345 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
346 u64 cputime = (__force u64) cputime_one_jiffy;
347 u64 *cpustat = kcpustat_this_cpu->cpustat;
349 if (steal_account_process_tick())
355 if (irqtime_account_hi_update()) {
356 cpustat[CPUTIME_IRQ] += cputime;
357 } else if (irqtime_account_si_update()) {
358 cpustat[CPUTIME_SOFTIRQ] += cputime;
359 } else if (this_cpu_ksoftirqd() == p) {
361 * ksoftirqd time do not get accounted in cpu_softirq_time.
362 * So, we have to handle it separately here.
363 * Also, p->stime needs to be updated for ksoftirqd.
365 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
366 } else if (user_tick) {
367 account_user_time(p, cputime, scaled);
368 } else if (p == rq->idle) {
369 account_idle_time(cputime);
370 } else if (p->flags & PF_VCPU) { /* System time or guest time */
371 account_guest_time(p, cputime, scaled);
373 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
377 static void irqtime_account_idle_ticks(int ticks)
379 struct rq *rq = this_rq();
381 irqtime_account_process_tick(current, 0, rq, ticks);
383 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
384 static inline void irqtime_account_idle_ticks(int ticks) {}
385 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
386 struct rq *rq, int nr_ticks) {}
387 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
390 * Use precise platform statistics if available:
392 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
394 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
395 void vtime_common_task_switch(struct task_struct *prev)
397 if (is_idle_task(prev))
398 vtime_account_idle(prev);
400 vtime_account_system(prev);
402 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
403 vtime_account_user(prev);
405 arch_vtime_task_switch(prev);
410 * Archs that account the whole time spent in the idle task
411 * (outside irq) as idle time can rely on this and just implement
412 * vtime_account_system() and vtime_account_idle(). Archs that
413 * have other meaning of the idle time (s390 only includes the
414 * time spent by the CPU when it's in low power mode) must override
417 #ifndef __ARCH_HAS_VTIME_ACCOUNT
418 void vtime_common_account_irq_enter(struct task_struct *tsk)
420 if (!in_interrupt()) {
422 * If we interrupted user, context_tracking_in_user()
423 * is 1 because the context tracking don't hook
424 * on irq entry/exit. This way we know if
425 * we need to flush user time on kernel entry.
427 if (context_tracking_in_user()) {
428 vtime_account_user(tsk);
432 if (is_idle_task(tsk)) {
433 vtime_account_idle(tsk);
437 vtime_account_system(tsk);
439 EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
440 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
441 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
444 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
445 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
450 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
452 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
454 struct task_cputime cputime;
456 thread_group_cputime(p, &cputime);
461 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
463 * Account a single tick of cpu time.
464 * @p: the process that the cpu time gets accounted to
465 * @user_tick: indicates if the tick is a user or a system tick
467 void account_process_tick(struct task_struct *p, int user_tick)
469 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
470 struct rq *rq = this_rq();
472 if (vtime_accounting_cpu_enabled())
475 if (sched_clock_irqtime) {
476 irqtime_account_process_tick(p, user_tick, rq, 1);
480 if (steal_account_process_tick())
484 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
485 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
486 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
489 account_idle_time(cputime_one_jiffy);
493 * Account multiple ticks of steal time.
494 * @p: the process from which the cpu time has been stolen
495 * @ticks: number of stolen ticks
497 void account_steal_ticks(unsigned long ticks)
499 account_steal_time(jiffies_to_cputime(ticks));
503 * Account multiple ticks of idle time.
504 * @ticks: number of stolen ticks
506 void account_idle_ticks(unsigned long ticks)
509 if (sched_clock_irqtime) {
510 irqtime_account_idle_ticks(ticks);
514 account_idle_time(jiffies_to_cputime(ticks));
518 * Perform (stime * rtime) / total, but avoid multiplication overflow by
519 * loosing precision when the numbers are big.
521 static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
526 /* Make sure "rtime" is the bigger of stime/rtime */
530 /* Make sure 'total' fits in 32 bits */
534 /* Does rtime (and thus stime) fit in 32 bits? */
538 /* Can we just balance rtime/stime rather than dropping bits? */
542 /* We can grow stime and shrink rtime and try to make them both fit */
548 /* We drop from rtime, it has more bits than stime */
554 * Make sure gcc understands that this is a 32x32->64 multiply,
555 * followed by a 64/32->64 divide.
557 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
558 return (__force cputime_t) scaled;
562 * Adjust tick based cputime random precision against scheduler runtime
565 * Tick based cputime accounting depend on random scheduling timeslices of a
566 * task to be interrupted or not by the timer. Depending on these
567 * circumstances, the number of these interrupts may be over or
568 * under-optimistic, matching the real user and system cputime with a variable
571 * Fix this by scaling these tick based values against the total runtime
572 * accounted by the CFS scheduler.
574 * This code provides the following guarantees:
576 * stime + utime == rtime
577 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
579 * Assuming that rtime_i+1 >= rtime_i.
581 static void cputime_adjust(struct task_cputime *curr,
582 struct prev_cputime *prev,
583 cputime_t *ut, cputime_t *st)
585 cputime_t rtime, stime, utime;
588 /* Serialize concurrent callers such that we can honour our guarantees */
589 raw_spin_lock_irqsave(&prev->lock, flags);
590 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
593 * This is possible under two circumstances:
594 * - rtime isn't monotonic after all (a bug);
595 * - we got reordered by the lock.
597 * In both cases this acts as a filter such that the rest of the code
598 * can assume it is monotonic regardless of anything else.
600 if (prev->stime + prev->utime >= rtime)
616 stime = scale_stime((__force u64)stime, (__force u64)rtime,
617 (__force u64)(stime + utime));
620 * Make sure stime doesn't go backwards; this preserves monotonicity
621 * for utime because rtime is monotonic.
623 * utime_i+1 = rtime_i+1 - stime_i
624 * = rtime_i+1 - (rtime_i - utime_i)
625 * = (rtime_i+1 - rtime_i) + utime_i
628 if (stime < prev->stime)
630 utime = rtime - stime;
633 * Make sure utime doesn't go backwards; this still preserves
634 * monotonicity for stime, analogous argument to above.
636 if (utime < prev->utime) {
638 stime = rtime - utime;
647 raw_spin_unlock_irqrestore(&prev->lock, flags);
650 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
652 struct task_cputime cputime = {
653 .sum_exec_runtime = p->se.sum_exec_runtime,
656 task_cputime(p, &cputime.utime, &cputime.stime);
657 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
659 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
661 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
663 struct task_cputime cputime;
665 thread_group_cputime(p, &cputime);
666 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
668 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
670 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
671 static unsigned long long vtime_delta(struct task_struct *tsk)
673 unsigned long long clock;
675 clock = local_clock();
676 if (clock < tsk->vtime_snap)
679 return clock - tsk->vtime_snap;
682 static cputime_t get_vtime_delta(struct task_struct *tsk)
684 unsigned long long delta = vtime_delta(tsk);
686 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
687 tsk->vtime_snap += delta;
689 /* CHECKME: always safe to convert nsecs to cputime? */
690 return nsecs_to_cputime(delta);
693 static void __vtime_account_system(struct task_struct *tsk)
695 cputime_t delta_cpu = get_vtime_delta(tsk);
697 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
700 void vtime_account_system(struct task_struct *tsk)
702 write_seqcount_begin(&tsk->vtime_seqcount);
703 __vtime_account_system(tsk);
704 write_seqcount_end(&tsk->vtime_seqcount);
707 void vtime_gen_account_irq_exit(struct task_struct *tsk)
709 write_seqcount_begin(&tsk->vtime_seqcount);
710 __vtime_account_system(tsk);
711 if (context_tracking_in_user())
712 tsk->vtime_snap_whence = VTIME_USER;
713 write_seqcount_end(&tsk->vtime_seqcount);
716 void vtime_account_user(struct task_struct *tsk)
720 write_seqcount_begin(&tsk->vtime_seqcount);
721 delta_cpu = get_vtime_delta(tsk);
722 tsk->vtime_snap_whence = VTIME_SYS;
723 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
724 write_seqcount_end(&tsk->vtime_seqcount);
727 void vtime_user_enter(struct task_struct *tsk)
729 write_seqcount_begin(&tsk->vtime_seqcount);
730 __vtime_account_system(tsk);
731 tsk->vtime_snap_whence = VTIME_USER;
732 write_seqcount_end(&tsk->vtime_seqcount);
735 void vtime_guest_enter(struct task_struct *tsk)
738 * The flags must be updated under the lock with
739 * the vtime_snap flush and update.
740 * That enforces a right ordering and update sequence
741 * synchronization against the reader (task_gtime())
742 * that can thus safely catch up with a tickless delta.
744 write_seqcount_begin(&tsk->vtime_seqcount);
745 __vtime_account_system(tsk);
746 current->flags |= PF_VCPU;
747 write_seqcount_end(&tsk->vtime_seqcount);
749 EXPORT_SYMBOL_GPL(vtime_guest_enter);
751 void vtime_guest_exit(struct task_struct *tsk)
753 write_seqcount_begin(&tsk->vtime_seqcount);
754 __vtime_account_system(tsk);
755 current->flags &= ~PF_VCPU;
756 write_seqcount_end(&tsk->vtime_seqcount);
758 EXPORT_SYMBOL_GPL(vtime_guest_exit);
760 void vtime_account_idle(struct task_struct *tsk)
762 cputime_t delta_cpu = get_vtime_delta(tsk);
764 account_idle_time(delta_cpu);
767 void arch_vtime_task_switch(struct task_struct *prev)
769 write_seqcount_begin(&prev->vtime_seqcount);
770 prev->vtime_snap_whence = VTIME_INACTIVE;
771 write_seqcount_end(&prev->vtime_seqcount);
773 write_seqcount_begin(¤t->vtime_seqcount);
774 current->vtime_snap_whence = VTIME_SYS;
775 current->vtime_snap = sched_clock_cpu(smp_processor_id());
776 write_seqcount_end(¤t->vtime_seqcount);
779 void vtime_init_idle(struct task_struct *t, int cpu)
783 local_irq_save(flags);
784 write_seqcount_begin(&t->vtime_seqcount);
785 t->vtime_snap_whence = VTIME_SYS;
786 t->vtime_snap = sched_clock_cpu(cpu);
787 write_seqcount_end(&t->vtime_seqcount);
788 local_irq_restore(flags);
791 cputime_t task_gtime(struct task_struct *t)
796 if (!vtime_accounting_enabled())
800 seq = read_seqcount_begin(&t->vtime_seqcount);
803 if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU)
804 gtime += vtime_delta(t);
806 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
812 * Fetch cputime raw values from fields of task_struct and
813 * add up the pending nohz execution time since the last
817 fetch_task_cputime(struct task_struct *t,
818 cputime_t *u_dst, cputime_t *s_dst,
819 cputime_t *u_src, cputime_t *s_src,
820 cputime_t *udelta, cputime_t *sdelta)
823 unsigned long long delta;
829 seq = read_seqcount_begin(&t->vtime_seqcount);
836 /* Task is sleeping, nothing to add */
837 if (t->vtime_snap_whence == VTIME_INACTIVE ||
841 delta = vtime_delta(t);
844 * Task runs either in user or kernel space, add pending nohz time to
847 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
850 if (t->vtime_snap_whence == VTIME_SYS)
853 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
857 void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
859 cputime_t udelta, sdelta;
861 if (!vtime_accounting_enabled()) {
869 fetch_task_cputime(t, utime, stime, &t->utime,
870 &t->stime, &udelta, &sdelta);
877 void task_cputime_scaled(struct task_struct *t,
878 cputime_t *utimescaled, cputime_t *stimescaled)
880 cputime_t udelta, sdelta;
882 if (!vtime_accounting_enabled()) {
884 *utimescaled = t->utimescaled;
886 *stimescaled = t->stimescaled;
890 fetch_task_cputime(t, utimescaled, stimescaled,
891 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
893 *utimescaled += cputime_to_scaled(udelta);
895 *stimescaled += cputime_to_scaled(sdelta);
897 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */