2 * linux/arch/ia64/kernel/time.c
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
8 * Copyright (C) 1999-2000 VA Linux Systems
9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/profile.h>
17 #include <linux/sched.h>
18 #include <linux/time.h>
19 #include <linux/interrupt.h>
20 #include <linux/efi.h>
21 #include <linux/timex.h>
22 #include <linux/clocksource.h>
23 #include <linux/platform_device.h>
25 #include <asm/machvec.h>
26 #include <asm/delay.h>
27 #include <asm/hw_irq.h>
28 #include <asm/paravirt.h>
29 #include <asm/ptrace.h>
31 #include <asm/sections.h>
33 #include "fsyscall_gtod_data.h"
35 static cycle_t itc_get_cycles(struct clocksource *cs);
37 struct fsyscall_gtod_data_t fsyscall_gtod_data;
39 struct itc_jitter_data_t itc_jitter_data;
41 volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
43 #ifdef CONFIG_IA64_DEBUG_IRQ
45 unsigned long last_cli_ip;
46 EXPORT_SYMBOL(last_cli_ip);
50 #ifdef CONFIG_PARAVIRT
51 /* We need to define a real function for sched_clock, to override the
52 weak default version */
53 unsigned long long sched_clock(void)
55 return paravirt_sched_clock();
59 #ifdef CONFIG_PARAVIRT
61 paravirt_clocksource_resume(struct clocksource *cs)
63 if (pv_time_ops.clocksource_resume)
64 pv_time_ops.clocksource_resume();
68 static struct clocksource clocksource_itc = {
71 .read = itc_get_cycles,
72 .mask = CLOCKSOURCE_MASK(64),
73 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
74 #ifdef CONFIG_PARAVIRT
75 .resume = paravirt_clocksource_resume,
78 static struct clocksource *itc_clocksource;
80 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
82 #include <linux/kernel_stat.h>
84 extern cputime_t cycle_to_cputime(u64 cyc);
87 * Called from the context switch with interrupts disabled, to charge all
88 * accumulated times to the current process, and to prepare accounting on
91 void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
93 struct thread_info *pi = task_thread_info(prev);
94 struct thread_info *ni = task_thread_info(next);
95 cputime_t delta_stime, delta_utime;
100 delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
101 if (idle_task(smp_processor_id()) != prev)
102 account_system_time(prev, 0, delta_stime, delta_stime);
104 account_idle_time(delta_stime);
107 delta_utime = cycle_to_cputime(pi->ac_utime);
108 account_user_time(prev, delta_utime, delta_utime);
111 pi->ac_stamp = ni->ac_stamp = now;
112 ni->ac_stime = ni->ac_utime = 0;
116 * Account time for a transition between system, hard irq or soft irq state.
117 * Note that this function is called with interrupts enabled.
119 void account_system_vtime(struct task_struct *tsk)
121 struct thread_info *ti = task_thread_info(tsk);
123 cputime_t delta_stime;
126 local_irq_save(flags);
128 now = ia64_get_itc();
130 delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
131 if (irq_count() || idle_task(smp_processor_id()) != tsk)
132 account_system_time(tsk, 0, delta_stime, delta_stime);
134 account_idle_time(delta_stime);
139 local_irq_restore(flags);
141 EXPORT_SYMBOL_GPL(account_system_vtime);
144 * Called from the timer interrupt handler to charge accumulated user time
145 * to the current process. Must be called with interrupts disabled.
147 void account_process_tick(struct task_struct *p, int user_tick)
149 struct thread_info *ti = task_thread_info(p);
150 cputime_t delta_utime;
153 delta_utime = cycle_to_cputime(ti->ac_utime);
154 account_user_time(p, delta_utime, delta_utime);
159 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
162 timer_interrupt (int irq, void *dev_id)
164 unsigned long new_itm;
166 if (cpu_is_offline(smp_processor_id())) {
170 platform_timer_interrupt(irq, dev_id);
172 new_itm = local_cpu_data->itm_next;
174 if (!time_after(ia64_get_itc(), new_itm))
175 printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
176 ia64_get_itc(), new_itm);
178 profile_tick(CPU_PROFILING);
180 if (paravirt_do_steal_accounting(&new_itm))
181 goto skip_process_time_accounting;
184 update_process_times(user_mode(get_irq_regs()));
186 new_itm += local_cpu_data->itm_delta;
188 if (smp_processor_id() == time_keeper_id)
191 local_cpu_data->itm_next = new_itm;
193 if (time_after(new_itm, ia64_get_itc()))
197 * Allow IPIs to interrupt the timer loop.
203 skip_process_time_accounting:
207 * If we're too close to the next clock tick for
208 * comfort, we increase the safety margin by
209 * intentionally dropping the next tick(s). We do NOT
210 * update itm.next because that would force us to call
211 * xtime_update() which in turn would let our clock run
212 * too fast (with the potentially devastating effect
213 * of losing monotony of time).
215 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
216 new_itm += local_cpu_data->itm_delta;
217 ia64_set_itm(new_itm);
218 /* double check, in case we got hit by a (slow) PMI: */
219 } while (time_after_eq(ia64_get_itc(), new_itm));
224 * Encapsulate access to the itm structure for SMP.
227 ia64_cpu_local_tick (void)
229 int cpu = smp_processor_id();
230 unsigned long shift = 0, delta;
232 /* arrange for the cycle counter to generate a timer interrupt: */
233 ia64_set_itv(IA64_TIMER_VECTOR);
235 delta = local_cpu_data->itm_delta;
237 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
241 unsigned long hi = 1UL << ia64_fls(cpu);
242 shift = (2*(cpu - hi) + 1) * delta/hi/2;
244 local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
245 ia64_set_itm(local_cpu_data->itm_next);
250 static int __init nojitter_setup(char *str)
253 printk("Jitter checking for ITC timers disabled\n");
257 __setup("nojitter", nojitter_setup);
263 unsigned long platform_base_freq, itc_freq;
264 struct pal_freq_ratio itc_ratio, proc_ratio;
265 long status, platform_base_drift, itc_drift;
268 * According to SAL v2.6, we need to use a SAL call to determine the platform base
269 * frequency and then a PAL call to determine the frequency ratio between the ITC
270 * and the base frequency.
272 status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
273 &platform_base_freq, &platform_base_drift);
275 printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
277 status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
279 printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
282 /* invent "random" values */
284 "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
285 platform_base_freq = 100000000;
286 platform_base_drift = -1; /* no drift info */
290 if (platform_base_freq < 40000000) {
291 printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
293 platform_base_freq = 75000000;
294 platform_base_drift = -1;
297 proc_ratio.den = 1; /* avoid division by zero */
299 itc_ratio.den = 1; /* avoid division by zero */
301 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
303 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
304 printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
305 "ITC freq=%lu.%03luMHz", smp_processor_id(),
306 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
307 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
309 if (platform_base_drift != -1) {
310 itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
311 printk("+/-%ldppm\n", itc_drift);
317 local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
318 local_cpu_data->itc_freq = itc_freq;
319 local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
320 local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
321 + itc_freq/2)/itc_freq;
323 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
325 /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
326 * Jitter compensation requires a cmpxchg which may limit
327 * the scalability of the syscalls for retrieving time.
328 * The ITC synchronization is usually successful to within a few
329 * ITC ticks but this is not a sure thing. If you need to improve
330 * timer performance in SMP situations then boot the kernel with the
331 * "nojitter" option. However, doing so may result in time fluctuating (maybe
332 * even going backward) if the ITC offsets between the individual CPUs
336 itc_jitter_data.itc_jitter = 1;
340 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
341 * ITC values may fluctuate significantly between processors.
342 * Clock should not be used for hrtimers. Mark itc as only
343 * useful for boot and testing.
345 * Note that jitter compensation is off! There is no point of
346 * synchronizing ITCs since they may be large differentials
347 * that change over time.
349 * The only way to fix this would be to repeatedly sync the
350 * ITCs. Until that time we have to avoid ITC.
352 clocksource_itc.rating = 50;
354 paravirt_init_missing_ticks_accounting(smp_processor_id());
356 /* avoid softlock up message when cpu is unplug and plugged again. */
357 touch_softlockup_watchdog();
359 /* Setup the CPU local timer tick */
360 ia64_cpu_local_tick();
362 if (!itc_clocksource) {
363 clocksource_register_hz(&clocksource_itc,
364 local_cpu_data->itc_freq);
365 itc_clocksource = &clocksource_itc;
369 static cycle_t itc_get_cycles(struct clocksource *cs)
371 unsigned long lcycle, now, ret;
373 if (!itc_jitter_data.itc_jitter)
376 lcycle = itc_jitter_data.itc_lastcycle;
378 if (lcycle && time_after(lcycle, now))
382 * Keep track of the last timer value returned.
383 * In an SMP environment, you could lose out in contention of
384 * cmpxchg. If so, your cmpxchg returns new value which the
385 * winner of contention updated to. Use the new value instead.
387 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
388 if (unlikely(ret != lcycle))
395 static struct irqaction timer_irqaction = {
396 .handler = timer_interrupt,
397 .flags = IRQF_DISABLED | IRQF_IRQPOLL,
401 static struct platform_device rtc_efi_dev = {
406 static int __init rtc_init(void)
408 if (platform_device_register(&rtc_efi_dev) < 0)
409 printk(KERN_ERR "unable to register rtc device...\n");
411 /* not necessarily an error */
414 module_init(rtc_init);
416 void read_persistent_clock(struct timespec *ts)
418 efi_gettimeofday(ts);
424 register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
429 * Generic udelay assumes that if preemption is allowed and the thread
430 * migrates to another CPU, that the ITC values are synchronized across
434 ia64_itc_udelay (unsigned long usecs)
436 unsigned long start = ia64_get_itc();
437 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
439 while (time_before(ia64_get_itc(), end))
443 void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
446 udelay (unsigned long usecs)
448 (*ia64_udelay)(usecs);
450 EXPORT_SYMBOL(udelay);
452 /* IA64 doesn't cache the timezone */
453 void update_vsyscall_tz(void)
457 void update_vsyscall(struct timespec *wall, struct timespec *wtm,
458 struct clocksource *c, u32 mult)
460 write_seqcount_begin(&fsyscall_gtod_data.seq);
462 /* copy fsyscall clock data */
463 fsyscall_gtod_data.clk_mask = c->mask;
464 fsyscall_gtod_data.clk_mult = mult;
465 fsyscall_gtod_data.clk_shift = c->shift;
466 fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
467 fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
469 /* copy kernel time structures */
470 fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
471 fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
472 fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
474 fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
478 while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
479 fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
480 fsyscall_gtod_data.monotonic_time.tv_sec++;
483 write_seqcount_end(&fsyscall_gtod_data.seq);