4 * Kernel internal timers, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
41 #include <asm/uaccess.h>
42 #include <asm/unistd.h>
43 #include <asm/div64.h>
44 #include <asm/timex.h>
47 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
49 EXPORT_SYMBOL(jiffies_64);
52 * per-CPU timer vector definitions:
54 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
55 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
56 #define TVN_SIZE (1 << TVN_BITS)
57 #define TVR_SIZE (1 << TVR_BITS)
58 #define TVN_MASK (TVN_SIZE - 1)
59 #define TVR_MASK (TVR_SIZE - 1)
62 struct list_head vec[TVN_SIZE];
66 struct list_head vec[TVR_SIZE];
71 struct timer_list *running_timer;
72 unsigned long timer_jiffies;
78 } ____cacheline_aligned;
80 struct tvec_base boot_tvec_bases;
81 EXPORT_SYMBOL(boot_tvec_bases);
82 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
85 * Note that all tvec_bases are 2 byte aligned and lower bit of
86 * base in timer_list is guaranteed to be zero. Use the LSB for
87 * the new flag to indicate whether the timer is deferrable
89 #define TBASE_DEFERRABLE_FLAG (0x1)
91 /* Functions below help us manage 'deferrable' flag */
92 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
94 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
97 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
99 return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
102 static inline void timer_set_deferrable(struct timer_list *timer)
104 timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |
105 TBASE_DEFERRABLE_FLAG));
109 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
111 timer->base = (struct tvec_base *)((unsigned long)(new_base) |
112 tbase_get_deferrable(timer->base));
115 static unsigned long round_jiffies_common(unsigned long j, int cpu,
119 unsigned long original = j;
122 * We don't want all cpus firing their timers at once hitting the
123 * same lock or cachelines, so we skew each extra cpu with an extra
124 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
126 * The skew is done by adding 3*cpunr, then round, then subtract this
127 * extra offset again.
134 * If the target jiffie is just after a whole second (which can happen
135 * due to delays of the timer irq, long irq off times etc etc) then
136 * we should round down to the whole second, not up. Use 1/4th second
137 * as cutoff for this rounding as an extreme upper bound for this.
138 * But never round down if @force_up is set.
140 if (rem < HZ/4 && !force_up) /* round down */
145 /* now that we have rounded, subtract the extra skew again */
148 if (j <= jiffies) /* rounding ate our timeout entirely; */
154 * __round_jiffies - function to round jiffies to a full second
155 * @j: the time in (absolute) jiffies that should be rounded
156 * @cpu: the processor number on which the timeout will happen
158 * __round_jiffies() rounds an absolute time in the future (in jiffies)
159 * up or down to (approximately) full seconds. This is useful for timers
160 * for which the exact time they fire does not matter too much, as long as
161 * they fire approximately every X seconds.
163 * By rounding these timers to whole seconds, all such timers will fire
164 * at the same time, rather than at various times spread out. The goal
165 * of this is to have the CPU wake up less, which saves power.
167 * The exact rounding is skewed for each processor to avoid all
168 * processors firing at the exact same time, which could lead
169 * to lock contention or spurious cache line bouncing.
171 * The return value is the rounded version of the @j parameter.
173 unsigned long __round_jiffies(unsigned long j, int cpu)
175 return round_jiffies_common(j, cpu, false);
177 EXPORT_SYMBOL_GPL(__round_jiffies);
180 * __round_jiffies_relative - function to round jiffies to a full second
181 * @j: the time in (relative) jiffies that should be rounded
182 * @cpu: the processor number on which the timeout will happen
184 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
185 * up or down to (approximately) full seconds. This is useful for timers
186 * for which the exact time they fire does not matter too much, as long as
187 * they fire approximately every X seconds.
189 * By rounding these timers to whole seconds, all such timers will fire
190 * at the same time, rather than at various times spread out. The goal
191 * of this is to have the CPU wake up less, which saves power.
193 * The exact rounding is skewed for each processor to avoid all
194 * processors firing at the exact same time, which could lead
195 * to lock contention or spurious cache line bouncing.
197 * The return value is the rounded version of the @j parameter.
199 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
201 unsigned long j0 = jiffies;
203 /* Use j0 because jiffies might change while we run */
204 return round_jiffies_common(j + j0, cpu, false) - j0;
206 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
209 * round_jiffies - function to round jiffies to a full second
210 * @j: the time in (absolute) jiffies that should be rounded
212 * round_jiffies() rounds an absolute time in the future (in jiffies)
213 * up or down to (approximately) full seconds. This is useful for timers
214 * for which the exact time they fire does not matter too much, as long as
215 * they fire approximately every X seconds.
217 * By rounding these timers to whole seconds, all such timers will fire
218 * at the same time, rather than at various times spread out. The goal
219 * of this is to have the CPU wake up less, which saves power.
221 * The return value is the rounded version of the @j parameter.
223 unsigned long round_jiffies(unsigned long j)
225 return round_jiffies_common(j, raw_smp_processor_id(), false);
227 EXPORT_SYMBOL_GPL(round_jiffies);
230 * round_jiffies_relative - function to round jiffies to a full second
231 * @j: the time in (relative) jiffies that should be rounded
233 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
234 * up or down to (approximately) full seconds. This is useful for timers
235 * for which the exact time they fire does not matter too much, as long as
236 * they fire approximately every X seconds.
238 * By rounding these timers to whole seconds, all such timers will fire
239 * at the same time, rather than at various times spread out. The goal
240 * of this is to have the CPU wake up less, which saves power.
242 * The return value is the rounded version of the @j parameter.
244 unsigned long round_jiffies_relative(unsigned long j)
246 return __round_jiffies_relative(j, raw_smp_processor_id());
248 EXPORT_SYMBOL_GPL(round_jiffies_relative);
251 * __round_jiffies_up - function to round jiffies up to a full second
252 * @j: the time in (absolute) jiffies that should be rounded
253 * @cpu: the processor number on which the timeout will happen
255 * This is the same as __round_jiffies() except that it will never
256 * round down. This is useful for timeouts for which the exact time
257 * of firing does not matter too much, as long as they don't fire too
260 unsigned long __round_jiffies_up(unsigned long j, int cpu)
262 return round_jiffies_common(j, cpu, true);
264 EXPORT_SYMBOL_GPL(__round_jiffies_up);
267 * __round_jiffies_up_relative - function to round jiffies up to a full second
268 * @j: the time in (relative) jiffies that should be rounded
269 * @cpu: the processor number on which the timeout will happen
271 * This is the same as __round_jiffies_relative() except that it will never
272 * round down. This is useful for timeouts for which the exact time
273 * of firing does not matter too much, as long as they don't fire too
276 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
278 unsigned long j0 = jiffies;
280 /* Use j0 because jiffies might change while we run */
281 return round_jiffies_common(j + j0, cpu, true) - j0;
283 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
286 * round_jiffies_up - function to round jiffies up to a full second
287 * @j: the time in (absolute) jiffies that should be rounded
289 * This is the same as round_jiffies() except that it will never
290 * round down. This is useful for timeouts for which the exact time
291 * of firing does not matter too much, as long as they don't fire too
294 unsigned long round_jiffies_up(unsigned long j)
296 return round_jiffies_common(j, raw_smp_processor_id(), true);
298 EXPORT_SYMBOL_GPL(round_jiffies_up);
301 * round_jiffies_up_relative - function to round jiffies up to a full second
302 * @j: the time in (relative) jiffies that should be rounded
304 * This is the same as round_jiffies_relative() except that it will never
305 * round down. This is useful for timeouts for which the exact time
306 * of firing does not matter too much, as long as they don't fire too
309 unsigned long round_jiffies_up_relative(unsigned long j)
311 return __round_jiffies_up_relative(j, raw_smp_processor_id());
313 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
316 static inline void set_running_timer(struct tvec_base *base,
317 struct timer_list *timer)
320 base->running_timer = timer;
324 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
326 unsigned long expires = timer->expires;
327 unsigned long idx = expires - base->timer_jiffies;
328 struct list_head *vec;
330 if (idx < TVR_SIZE) {
331 int i = expires & TVR_MASK;
332 vec = base->tv1.vec + i;
333 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
334 int i = (expires >> TVR_BITS) & TVN_MASK;
335 vec = base->tv2.vec + i;
336 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
337 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
338 vec = base->tv3.vec + i;
339 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
340 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
341 vec = base->tv4.vec + i;
342 } else if ((signed long) idx < 0) {
344 * Can happen if you add a timer with expires == jiffies,
345 * or you set a timer to go off in the past
347 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
350 /* If the timeout is larger than 0xffffffff on 64-bit
351 * architectures then we use the maximum timeout:
353 if (idx > 0xffffffffUL) {
355 expires = idx + base->timer_jiffies;
357 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
358 vec = base->tv5.vec + i;
363 list_add_tail(&timer->entry, vec);
366 #ifdef CONFIG_TIMER_STATS
367 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
369 if (timer->start_site)
372 timer->start_site = addr;
373 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
374 timer->start_pid = current->pid;
377 static void timer_stats_account_timer(struct timer_list *timer)
379 unsigned int flag = 0;
381 if (unlikely(tbase_get_deferrable(timer->base)))
382 flag |= TIMER_STATS_FLAG_DEFERRABLE;
384 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
385 timer->function, timer->start_comm, flag);
389 static void timer_stats_account_timer(struct timer_list *timer) {}
392 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
394 static struct debug_obj_descr timer_debug_descr;
397 * fixup_init is called when:
398 * - an active object is initialized
400 static int timer_fixup_init(void *addr, enum debug_obj_state state)
402 struct timer_list *timer = addr;
405 case ODEBUG_STATE_ACTIVE:
406 del_timer_sync(timer);
407 debug_object_init(timer, &timer_debug_descr);
415 * fixup_activate is called when:
416 * - an active object is activated
417 * - an unknown object is activated (might be a statically initialized object)
419 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
421 struct timer_list *timer = addr;
425 case ODEBUG_STATE_NOTAVAILABLE:
427 * This is not really a fixup. The timer was
428 * statically initialized. We just make sure that it
429 * is tracked in the object tracker.
431 if (timer->entry.next == NULL &&
432 timer->entry.prev == TIMER_ENTRY_STATIC) {
433 debug_object_init(timer, &timer_debug_descr);
434 debug_object_activate(timer, &timer_debug_descr);
441 case ODEBUG_STATE_ACTIVE:
450 * fixup_free is called when:
451 * - an active object is freed
453 static int timer_fixup_free(void *addr, enum debug_obj_state state)
455 struct timer_list *timer = addr;
458 case ODEBUG_STATE_ACTIVE:
459 del_timer_sync(timer);
460 debug_object_free(timer, &timer_debug_descr);
467 static struct debug_obj_descr timer_debug_descr = {
468 .name = "timer_list",
469 .fixup_init = timer_fixup_init,
470 .fixup_activate = timer_fixup_activate,
471 .fixup_free = timer_fixup_free,
474 static inline void debug_timer_init(struct timer_list *timer)
476 debug_object_init(timer, &timer_debug_descr);
479 static inline void debug_timer_activate(struct timer_list *timer)
481 debug_object_activate(timer, &timer_debug_descr);
484 static inline void debug_timer_deactivate(struct timer_list *timer)
486 debug_object_deactivate(timer, &timer_debug_descr);
489 static inline void debug_timer_free(struct timer_list *timer)
491 debug_object_free(timer, &timer_debug_descr);
494 static void __init_timer(struct timer_list *timer);
496 void init_timer_on_stack(struct timer_list *timer)
498 debug_object_init_on_stack(timer, &timer_debug_descr);
501 EXPORT_SYMBOL_GPL(init_timer_on_stack);
503 void destroy_timer_on_stack(struct timer_list *timer)
505 debug_object_free(timer, &timer_debug_descr);
507 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
510 static inline void debug_timer_init(struct timer_list *timer) { }
511 static inline void debug_timer_activate(struct timer_list *timer) { }
512 static inline void debug_timer_deactivate(struct timer_list *timer) { }
515 static void __init_timer(struct timer_list *timer)
517 timer->entry.next = NULL;
518 timer->base = __raw_get_cpu_var(tvec_bases);
519 #ifdef CONFIG_TIMER_STATS
520 timer->start_site = NULL;
521 timer->start_pid = -1;
522 memset(timer->start_comm, 0, TASK_COMM_LEN);
527 * init_timer - initialize a timer.
528 * @timer: the timer to be initialized
530 * init_timer() must be done to a timer prior calling *any* of the
531 * other timer functions.
533 void init_timer(struct timer_list *timer)
535 debug_timer_init(timer);
538 EXPORT_SYMBOL(init_timer);
540 void init_timer_deferrable(struct timer_list *timer)
543 timer_set_deferrable(timer);
545 EXPORT_SYMBOL(init_timer_deferrable);
547 static inline void detach_timer(struct timer_list *timer,
550 struct list_head *entry = &timer->entry;
552 debug_timer_deactivate(timer);
554 __list_del(entry->prev, entry->next);
557 entry->prev = LIST_POISON2;
561 * We are using hashed locking: holding per_cpu(tvec_bases).lock
562 * means that all timers which are tied to this base via timer->base are
563 * locked, and the base itself is locked too.
565 * So __run_timers/migrate_timers can safely modify all timers which could
566 * be found on ->tvX lists.
568 * When the timer's base is locked, and the timer removed from list, it is
569 * possible to set timer->base = NULL and drop the lock: the timer remains
572 static struct tvec_base *lock_timer_base(struct timer_list *timer,
573 unsigned long *flags)
574 __acquires(timer->base->lock)
576 struct tvec_base *base;
579 struct tvec_base *prelock_base = timer->base;
580 base = tbase_get_base(prelock_base);
581 if (likely(base != NULL)) {
582 spin_lock_irqsave(&base->lock, *flags);
583 if (likely(prelock_base == timer->base))
585 /* The timer has migrated to another CPU */
586 spin_unlock_irqrestore(&base->lock, *flags);
592 int __mod_timer(struct timer_list *timer, unsigned long expires)
594 struct tvec_base *base, *new_base;
598 timer_stats_timer_set_start_info(timer);
599 BUG_ON(!timer->function);
601 base = lock_timer_base(timer, &flags);
603 if (timer_pending(timer)) {
604 detach_timer(timer, 0);
608 debug_timer_activate(timer);
610 new_base = __get_cpu_var(tvec_bases);
612 if (base != new_base) {
614 * We are trying to schedule the timer on the local CPU.
615 * However we can't change timer's base while it is running,
616 * otherwise del_timer_sync() can't detect that the timer's
617 * handler yet has not finished. This also guarantees that
618 * the timer is serialized wrt itself.
620 if (likely(base->running_timer != timer)) {
621 /* See the comment in lock_timer_base() */
622 timer_set_base(timer, NULL);
623 spin_unlock(&base->lock);
625 spin_lock(&base->lock);
626 timer_set_base(timer, base);
630 timer->expires = expires;
631 internal_add_timer(base, timer);
632 spin_unlock_irqrestore(&base->lock, flags);
637 EXPORT_SYMBOL(__mod_timer);
640 * add_timer_on - start a timer on a particular CPU
641 * @timer: the timer to be added
642 * @cpu: the CPU to start it on
644 * This is not very scalable on SMP. Double adds are not possible.
646 void add_timer_on(struct timer_list *timer, int cpu)
648 struct tvec_base *base = per_cpu(tvec_bases, cpu);
651 timer_stats_timer_set_start_info(timer);
652 BUG_ON(timer_pending(timer) || !timer->function);
653 spin_lock_irqsave(&base->lock, flags);
654 timer_set_base(timer, base);
655 debug_timer_activate(timer);
656 internal_add_timer(base, timer);
658 * Check whether the other CPU is idle and needs to be
659 * triggered to reevaluate the timer wheel when nohz is
660 * active. We are protected against the other CPU fiddling
661 * with the timer by holding the timer base lock. This also
662 * makes sure that a CPU on the way to idle can not evaluate
665 wake_up_idle_cpu(cpu);
666 spin_unlock_irqrestore(&base->lock, flags);
670 * mod_timer - modify a timer's timeout
671 * @timer: the timer to be modified
672 * @expires: new timeout in jiffies
674 * mod_timer() is a more efficient way to update the expire field of an
675 * active timer (if the timer is inactive it will be activated)
677 * mod_timer(timer, expires) is equivalent to:
679 * del_timer(timer); timer->expires = expires; add_timer(timer);
681 * Note that if there are multiple unserialized concurrent users of the
682 * same timer, then mod_timer() is the only safe way to modify the timeout,
683 * since add_timer() cannot modify an already running timer.
685 * The function returns whether it has modified a pending timer or not.
686 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
687 * active timer returns 1.)
689 int mod_timer(struct timer_list *timer, unsigned long expires)
691 BUG_ON(!timer->function);
693 timer_stats_timer_set_start_info(timer);
695 * This is a common optimization triggered by the
696 * networking code - if the timer is re-modified
697 * to be the same thing then just return:
699 if (timer->expires == expires && timer_pending(timer))
702 return __mod_timer(timer, expires);
705 EXPORT_SYMBOL(mod_timer);
708 * del_timer - deactive a timer.
709 * @timer: the timer to be deactivated
711 * del_timer() deactivates a timer - this works on both active and inactive
714 * The function returns whether it has deactivated a pending timer or not.
715 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
716 * active timer returns 1.)
718 int del_timer(struct timer_list *timer)
720 struct tvec_base *base;
724 timer_stats_timer_clear_start_info(timer);
725 if (timer_pending(timer)) {
726 base = lock_timer_base(timer, &flags);
727 if (timer_pending(timer)) {
728 detach_timer(timer, 1);
731 spin_unlock_irqrestore(&base->lock, flags);
737 EXPORT_SYMBOL(del_timer);
741 * try_to_del_timer_sync - Try to deactivate a timer
742 * @timer: timer do del
744 * This function tries to deactivate a timer. Upon successful (ret >= 0)
745 * exit the timer is not queued and the handler is not running on any CPU.
747 * It must not be called from interrupt contexts.
749 int try_to_del_timer_sync(struct timer_list *timer)
751 struct tvec_base *base;
755 base = lock_timer_base(timer, &flags);
757 if (base->running_timer == timer)
761 if (timer_pending(timer)) {
762 detach_timer(timer, 1);
766 spin_unlock_irqrestore(&base->lock, flags);
771 EXPORT_SYMBOL(try_to_del_timer_sync);
774 * del_timer_sync - deactivate a timer and wait for the handler to finish.
775 * @timer: the timer to be deactivated
777 * This function only differs from del_timer() on SMP: besides deactivating
778 * the timer it also makes sure the handler has finished executing on other
781 * Synchronization rules: Callers must prevent restarting of the timer,
782 * otherwise this function is meaningless. It must not be called from
783 * interrupt contexts. The caller must not hold locks which would prevent
784 * completion of the timer's handler. The timer's handler must not call
785 * add_timer_on(). Upon exit the timer is not queued and the handler is
786 * not running on any CPU.
788 * The function returns whether it has deactivated a pending timer or not.
790 int del_timer_sync(struct timer_list *timer)
793 int ret = try_to_del_timer_sync(timer);
800 EXPORT_SYMBOL(del_timer_sync);
803 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
805 /* cascade all the timers from tv up one level */
806 struct timer_list *timer, *tmp;
807 struct list_head tv_list;
809 list_replace_init(tv->vec + index, &tv_list);
812 * We are removing _all_ timers from the list, so we
813 * don't have to detach them individually.
815 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
816 BUG_ON(tbase_get_base(timer->base) != base);
817 internal_add_timer(base, timer);
823 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
826 * __run_timers - run all expired timers (if any) on this CPU.
827 * @base: the timer vector to be processed.
829 * This function cascades all vectors and executes all expired timer
832 static inline void __run_timers(struct tvec_base *base)
834 struct timer_list *timer;
836 spin_lock_irq(&base->lock);
837 while (time_after_eq(jiffies, base->timer_jiffies)) {
838 struct list_head work_list;
839 struct list_head *head = &work_list;
840 int index = base->timer_jiffies & TVR_MASK;
846 (!cascade(base, &base->tv2, INDEX(0))) &&
847 (!cascade(base, &base->tv3, INDEX(1))) &&
848 !cascade(base, &base->tv4, INDEX(2)))
849 cascade(base, &base->tv5, INDEX(3));
850 ++base->timer_jiffies;
851 list_replace_init(base->tv1.vec + index, &work_list);
852 while (!list_empty(head)) {
853 void (*fn)(unsigned long);
856 timer = list_first_entry(head, struct timer_list,entry);
857 fn = timer->function;
860 timer_stats_account_timer(timer);
862 set_running_timer(base, timer);
863 detach_timer(timer, 1);
864 spin_unlock_irq(&base->lock);
866 int preempt_count = preempt_count();
868 if (preempt_count != preempt_count()) {
869 printk(KERN_ERR "huh, entered %p "
870 "with preempt_count %08x, exited"
877 spin_lock_irq(&base->lock);
880 set_running_timer(base, NULL);
881 spin_unlock_irq(&base->lock);
886 * Find out when the next timer event is due to happen. This
887 * is used on S/390 to stop all activity when a cpus is idle.
888 * This functions needs to be called disabled.
890 static unsigned long __next_timer_interrupt(struct tvec_base *base)
892 unsigned long timer_jiffies = base->timer_jiffies;
893 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
894 int index, slot, array, found = 0;
895 struct timer_list *nte;
896 struct tvec *varray[4];
898 /* Look for timer events in tv1. */
899 index = slot = timer_jiffies & TVR_MASK;
901 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
902 if (tbase_get_deferrable(nte->base))
906 expires = nte->expires;
907 /* Look at the cascade bucket(s)? */
908 if (!index || slot < index)
912 slot = (slot + 1) & TVR_MASK;
913 } while (slot != index);
916 /* Calculate the next cascade event */
918 timer_jiffies += TVR_SIZE - index;
919 timer_jiffies >>= TVR_BITS;
922 varray[0] = &base->tv2;
923 varray[1] = &base->tv3;
924 varray[2] = &base->tv4;
925 varray[3] = &base->tv5;
927 for (array = 0; array < 4; array++) {
928 struct tvec *varp = varray[array];
930 index = slot = timer_jiffies & TVN_MASK;
932 list_for_each_entry(nte, varp->vec + slot, entry) {
934 if (time_before(nte->expires, expires))
935 expires = nte->expires;
938 * Do we still search for the first timer or are
939 * we looking up the cascade buckets ?
942 /* Look at the cascade bucket(s)? */
943 if (!index || slot < index)
947 slot = (slot + 1) & TVN_MASK;
948 } while (slot != index);
951 timer_jiffies += TVN_SIZE - index;
952 timer_jiffies >>= TVN_BITS;
958 * Check, if the next hrtimer event is before the next timer wheel
961 static unsigned long cmp_next_hrtimer_event(unsigned long now,
962 unsigned long expires)
964 ktime_t hr_delta = hrtimer_get_next_event();
965 struct timespec tsdelta;
968 if (hr_delta.tv64 == KTIME_MAX)
972 * Expired timer available, let it expire in the next tick
974 if (hr_delta.tv64 <= 0)
977 tsdelta = ktime_to_timespec(hr_delta);
978 delta = timespec_to_jiffies(&tsdelta);
981 * Limit the delta to the max value, which is checked in
982 * tick_nohz_stop_sched_tick():
984 if (delta > NEXT_TIMER_MAX_DELTA)
985 delta = NEXT_TIMER_MAX_DELTA;
988 * Take rounding errors in to account and make sure, that it
989 * expires in the next tick. Otherwise we go into an endless
990 * ping pong due to tick_nohz_stop_sched_tick() retriggering
996 if (time_before(now, expires))
1002 * get_next_timer_interrupt - return the jiffy of the next pending timer
1003 * @now: current time (in jiffies)
1005 unsigned long get_next_timer_interrupt(unsigned long now)
1007 struct tvec_base *base = __get_cpu_var(tvec_bases);
1008 unsigned long expires;
1010 spin_lock(&base->lock);
1011 expires = __next_timer_interrupt(base);
1012 spin_unlock(&base->lock);
1014 if (time_before_eq(expires, now))
1017 return cmp_next_hrtimer_event(now, expires);
1021 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1022 void account_process_tick(struct task_struct *p, int user_tick)
1024 cputime_t one_jiffy = jiffies_to_cputime(1);
1027 account_user_time(p, one_jiffy);
1028 account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
1030 account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
1031 account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
1037 * Called from the timer interrupt handler to charge one tick to the current
1038 * process. user_tick is 1 if the tick is user time, 0 for system.
1040 void update_process_times(int user_tick)
1042 struct task_struct *p = current;
1043 int cpu = smp_processor_id();
1045 /* Note: this timer irq context must be accounted for as well. */
1046 account_process_tick(p, user_tick);
1048 if (rcu_pending(cpu))
1049 rcu_check_callbacks(cpu, user_tick);
1052 run_posix_cpu_timers(p);
1056 * Nr of active tasks - counted in fixed-point numbers
1058 static unsigned long count_active_tasks(void)
1060 return nr_active() * FIXED_1;
1064 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
1065 * imply that avenrun[] is the standard name for this kind of thing.
1066 * Nothing else seems to be standardized: the fractional size etc
1067 * all seem to differ on different machines.
1069 * Requires xtime_lock to access.
1071 unsigned long avenrun[3];
1073 EXPORT_SYMBOL(avenrun);
1076 * calc_load - given tick count, update the avenrun load estimates.
1077 * This is called while holding a write_lock on xtime_lock.
1079 static inline void calc_load(unsigned long ticks)
1081 unsigned long active_tasks; /* fixed-point */
1082 static int count = LOAD_FREQ;
1085 if (unlikely(count < 0)) {
1086 active_tasks = count_active_tasks();
1088 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
1089 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
1090 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
1092 } while (count < 0);
1097 * This function runs timers and the timer-tq in bottom half context.
1099 static void run_timer_softirq(struct softirq_action *h)
1101 struct tvec_base *base = __get_cpu_var(tvec_bases);
1103 hrtimer_run_pending();
1105 if (time_after_eq(jiffies, base->timer_jiffies))
1110 * Called by the local, per-CPU timer interrupt on SMP.
1112 void run_local_timers(void)
1114 hrtimer_run_queues();
1115 raise_softirq(TIMER_SOFTIRQ);
1120 * Called by the timer interrupt. xtime_lock must already be taken
1123 static inline void update_times(unsigned long ticks)
1130 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1131 * without sampling the sequence number in xtime_lock.
1132 * jiffies is defined in the linker script...
1135 void do_timer(unsigned long ticks)
1137 jiffies_64 += ticks;
1138 update_times(ticks);
1141 #ifdef __ARCH_WANT_SYS_ALARM
1144 * For backwards compatibility? This can be done in libc so Alpha
1145 * and all newer ports shouldn't need it.
1147 asmlinkage unsigned long sys_alarm(unsigned int seconds)
1149 return alarm_setitimer(seconds);
1157 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1158 * should be moved into arch/i386 instead?
1162 * sys_getpid - return the thread group id of the current process
1164 * Note, despite the name, this returns the tgid not the pid. The tgid and
1165 * the pid are identical unless CLONE_THREAD was specified on clone() in
1166 * which case the tgid is the same in all threads of the same group.
1168 * This is SMP safe as current->tgid does not change.
1170 asmlinkage long sys_getpid(void)
1172 return task_tgid_vnr(current);
1176 * Accessing ->real_parent is not SMP-safe, it could
1177 * change from under us. However, we can use a stale
1178 * value of ->real_parent under rcu_read_lock(), see
1179 * release_task()->call_rcu(delayed_put_task_struct).
1181 asmlinkage long sys_getppid(void)
1186 pid = task_tgid_vnr(current->real_parent);
1192 asmlinkage long sys_getuid(void)
1194 /* Only we change this so SMP safe */
1195 return current_uid();
1198 asmlinkage long sys_geteuid(void)
1200 /* Only we change this so SMP safe */
1201 return current_euid();
1204 asmlinkage long sys_getgid(void)
1206 /* Only we change this so SMP safe */
1207 return current_gid();
1210 asmlinkage long sys_getegid(void)
1212 /* Only we change this so SMP safe */
1213 return current_egid();
1218 static void process_timeout(unsigned long __data)
1220 wake_up_process((struct task_struct *)__data);
1224 * schedule_timeout - sleep until timeout
1225 * @timeout: timeout value in jiffies
1227 * Make the current task sleep until @timeout jiffies have
1228 * elapsed. The routine will return immediately unless
1229 * the current task state has been set (see set_current_state()).
1231 * You can set the task state as follows -
1233 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1234 * pass before the routine returns. The routine will return 0
1236 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1237 * delivered to the current task. In this case the remaining time
1238 * in jiffies will be returned, or 0 if the timer expired in time
1240 * The current task state is guaranteed to be TASK_RUNNING when this
1243 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1244 * the CPU away without a bound on the timeout. In this case the return
1245 * value will be %MAX_SCHEDULE_TIMEOUT.
1247 * In all cases the return value is guaranteed to be non-negative.
1249 signed long __sched schedule_timeout(signed long timeout)
1251 struct timer_list timer;
1252 unsigned long expire;
1256 case MAX_SCHEDULE_TIMEOUT:
1258 * These two special cases are useful to be comfortable
1259 * in the caller. Nothing more. We could take
1260 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1261 * but I' d like to return a valid offset (>=0) to allow
1262 * the caller to do everything it want with the retval.
1268 * Another bit of PARANOID. Note that the retval will be
1269 * 0 since no piece of kernel is supposed to do a check
1270 * for a negative retval of schedule_timeout() (since it
1271 * should never happens anyway). You just have the printk()
1272 * that will tell you if something is gone wrong and where.
1275 printk(KERN_ERR "schedule_timeout: wrong timeout "
1276 "value %lx\n", timeout);
1278 current->state = TASK_RUNNING;
1283 expire = timeout + jiffies;
1285 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1286 __mod_timer(&timer, expire);
1288 del_singleshot_timer_sync(&timer);
1290 /* Remove the timer from the object tracker */
1291 destroy_timer_on_stack(&timer);
1293 timeout = expire - jiffies;
1296 return timeout < 0 ? 0 : timeout;
1298 EXPORT_SYMBOL(schedule_timeout);
1301 * We can use __set_current_state() here because schedule_timeout() calls
1302 * schedule() unconditionally.
1304 signed long __sched schedule_timeout_interruptible(signed long timeout)
1306 __set_current_state(TASK_INTERRUPTIBLE);
1307 return schedule_timeout(timeout);
1309 EXPORT_SYMBOL(schedule_timeout_interruptible);
1311 signed long __sched schedule_timeout_killable(signed long timeout)
1313 __set_current_state(TASK_KILLABLE);
1314 return schedule_timeout(timeout);
1316 EXPORT_SYMBOL(schedule_timeout_killable);
1318 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1320 __set_current_state(TASK_UNINTERRUPTIBLE);
1321 return schedule_timeout(timeout);
1323 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1325 /* Thread ID - the internal kernel "pid" */
1326 asmlinkage long sys_gettid(void)
1328 return task_pid_vnr(current);
1332 * do_sysinfo - fill in sysinfo struct
1333 * @info: pointer to buffer to fill
1335 int do_sysinfo(struct sysinfo *info)
1337 unsigned long mem_total, sav_total;
1338 unsigned int mem_unit, bitcount;
1341 memset(info, 0, sizeof(struct sysinfo));
1345 seq = read_seqbegin(&xtime_lock);
1348 * This is annoying. The below is the same thing
1349 * posix_get_clock_monotonic() does, but it wants to
1350 * take the lock which we want to cover the loads stuff
1354 getnstimeofday(&tp);
1355 tp.tv_sec += wall_to_monotonic.tv_sec;
1356 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1357 monotonic_to_bootbased(&tp);
1358 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1359 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1362 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1364 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1365 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1366 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1368 info->procs = nr_threads;
1369 } while (read_seqretry(&xtime_lock, seq));
1375 * If the sum of all the available memory (i.e. ram + swap)
1376 * is less than can be stored in a 32 bit unsigned long then
1377 * we can be binary compatible with 2.2.x kernels. If not,
1378 * well, in that case 2.2.x was broken anyways...
1380 * -Erik Andersen <andersee@debian.org>
1383 mem_total = info->totalram + info->totalswap;
1384 if (mem_total < info->totalram || mem_total < info->totalswap)
1387 mem_unit = info->mem_unit;
1388 while (mem_unit > 1) {
1391 sav_total = mem_total;
1393 if (mem_total < sav_total)
1398 * If mem_total did not overflow, multiply all memory values by
1399 * info->mem_unit and set it to 1. This leaves things compatible
1400 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1405 info->totalram <<= bitcount;
1406 info->freeram <<= bitcount;
1407 info->sharedram <<= bitcount;
1408 info->bufferram <<= bitcount;
1409 info->totalswap <<= bitcount;
1410 info->freeswap <<= bitcount;
1411 info->totalhigh <<= bitcount;
1412 info->freehigh <<= bitcount;
1418 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1424 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1430 static int __cpuinit init_timers_cpu(int cpu)
1433 struct tvec_base *base;
1434 static char __cpuinitdata tvec_base_done[NR_CPUS];
1436 if (!tvec_base_done[cpu]) {
1437 static char boot_done;
1441 * The APs use this path later in boot
1443 base = kmalloc_node(sizeof(*base),
1444 GFP_KERNEL | __GFP_ZERO,
1449 /* Make sure that tvec_base is 2 byte aligned */
1450 if (tbase_get_deferrable(base)) {
1455 per_cpu(tvec_bases, cpu) = base;
1458 * This is for the boot CPU - we use compile-time
1459 * static initialisation because per-cpu memory isn't
1460 * ready yet and because the memory allocators are not
1461 * initialised either.
1464 base = &boot_tvec_bases;
1466 tvec_base_done[cpu] = 1;
1468 base = per_cpu(tvec_bases, cpu);
1471 spin_lock_init(&base->lock);
1473 for (j = 0; j < TVN_SIZE; j++) {
1474 INIT_LIST_HEAD(base->tv5.vec + j);
1475 INIT_LIST_HEAD(base->tv4.vec + j);
1476 INIT_LIST_HEAD(base->tv3.vec + j);
1477 INIT_LIST_HEAD(base->tv2.vec + j);
1479 for (j = 0; j < TVR_SIZE; j++)
1480 INIT_LIST_HEAD(base->tv1.vec + j);
1482 base->timer_jiffies = jiffies;
1486 #ifdef CONFIG_HOTPLUG_CPU
1487 static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1489 struct timer_list *timer;
1491 while (!list_empty(head)) {
1492 timer = list_first_entry(head, struct timer_list, entry);
1493 detach_timer(timer, 0);
1494 timer_set_base(timer, new_base);
1495 internal_add_timer(new_base, timer);
1499 static void __cpuinit migrate_timers(int cpu)
1501 struct tvec_base *old_base;
1502 struct tvec_base *new_base;
1505 BUG_ON(cpu_online(cpu));
1506 old_base = per_cpu(tvec_bases, cpu);
1507 new_base = get_cpu_var(tvec_bases);
1509 * The caller is globally serialized and nobody else
1510 * takes two locks at once, deadlock is not possible.
1512 spin_lock_irq(&new_base->lock);
1513 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1515 BUG_ON(old_base->running_timer);
1517 for (i = 0; i < TVR_SIZE; i++)
1518 migrate_timer_list(new_base, old_base->tv1.vec + i);
1519 for (i = 0; i < TVN_SIZE; i++) {
1520 migrate_timer_list(new_base, old_base->tv2.vec + i);
1521 migrate_timer_list(new_base, old_base->tv3.vec + i);
1522 migrate_timer_list(new_base, old_base->tv4.vec + i);
1523 migrate_timer_list(new_base, old_base->tv5.vec + i);
1526 spin_unlock(&old_base->lock);
1527 spin_unlock_irq(&new_base->lock);
1528 put_cpu_var(tvec_bases);
1530 #endif /* CONFIG_HOTPLUG_CPU */
1532 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1533 unsigned long action, void *hcpu)
1535 long cpu = (long)hcpu;
1537 case CPU_UP_PREPARE:
1538 case CPU_UP_PREPARE_FROZEN:
1539 if (init_timers_cpu(cpu) < 0)
1542 #ifdef CONFIG_HOTPLUG_CPU
1544 case CPU_DEAD_FROZEN:
1545 migrate_timers(cpu);
1554 static struct notifier_block __cpuinitdata timers_nb = {
1555 .notifier_call = timer_cpu_notify,
1559 void __init init_timers(void)
1561 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1562 (void *)(long)smp_processor_id());
1566 BUG_ON(err == NOTIFY_BAD);
1567 register_cpu_notifier(&timers_nb);
1568 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1572 * msleep - sleep safely even with waitqueue interruptions
1573 * @msecs: Time in milliseconds to sleep for
1575 void msleep(unsigned int msecs)
1577 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1580 timeout = schedule_timeout_uninterruptible(timeout);
1583 EXPORT_SYMBOL(msleep);
1586 * msleep_interruptible - sleep waiting for signals
1587 * @msecs: Time in milliseconds to sleep for
1589 unsigned long msleep_interruptible(unsigned int msecs)
1591 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1593 while (timeout && !signal_pending(current))
1594 timeout = schedule_timeout_interruptible(timeout);
1595 return jiffies_to_msecs(timeout);
1598 EXPORT_SYMBOL(msleep_interruptible);