if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
!apb_timer_block_enabled)
return 0;
- return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL,
+ return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL,
apbt_cpu_dead);
}
fs_initcall(apbt_late_init);
static int apbt_clocksource_register(void)
{
u64 start, now;
- cycle_t t1;
+ u64 t1;
/* Start the counter, use timer 2 as source, timer 0/1 for event */
dw_apb_clocksource_start(clocksource_apbt);
{
int i, scale;
u64 old, new;
- cycle_t t1, t2;
+ u64 t1, t2;
unsigned long khz = 0;
u32 loop, shift;
{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
};
- static cycle_t read_hpet(struct clocksource *cs)
+ static u64 read_hpet(struct clocksource *cs)
{
unsigned long flags;
union hpet_lock old, new;
* Read HPET directly if in NMI.
*/
if (in_nmi())
- return (cycle_t)hpet_readl(HPET_COUNTER);
+ return (u64)hpet_readl(HPET_COUNTER);
/*
* Read the current state of the lock and HPET value atomically.
WRITE_ONCE(hpet.value, new.value);
arch_spin_unlock(&hpet.lock);
local_irq_restore(flags);
- return (cycle_t)new.value;
+ return (u64)new.value;
}
local_irq_restore(flags);
new.lockval = READ_ONCE(hpet.lockval);
} while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
- return (cycle_t)new.value;
+ return (u64)new.value;
}
#else
/*
* For UP or 32-bit.
*/
- static cycle_t read_hpet(struct clocksource *cs)
+ static u64 read_hpet(struct clocksource *cs)
{
- return (cycle_t)hpet_readl(HPET_COUNTER);
+ return (u64)hpet_readl(HPET_COUNTER);
}
#endif
static int hpet_clocksource_register(void)
{
u64 start, now;
- cycle_t t1;
+ u64 t1;
/* Start the counter */
hpet_restart_counter();
return 0;
/* This notifier should be called after workqueue is ready */
- ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE",
+ ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",
hpet_cpuhp_online, NULL);
if (ret)
return ret;
- ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL,
+ ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL,
hpet_cpuhp_dead);
if (ret)
goto err_cpuhp;
struct { /* extract of a clocksource struct */
int vclock_mode;
- cycle_t cycle_last;
- cycle_t mask;
+ u64 cycle_last;
+ u64 mask;
u32 mult;
u32 shift;
} clock;
#ifdef CONFIG_X86_64
- static cycle_t read_tsc(void)
+ static u64 read_tsc(void)
{
- cycle_t ret = (cycle_t)rdtsc_ordered();
+ u64 ret = (u64)rdtsc_ordered();
u64 last = pvclock_gtod_data.clock.cycle_last;
if (likely(ret >= last))
return last;
}
- static inline u64 vgettsc(cycle_t *cycle_now)
+ static inline u64 vgettsc(u64 *cycle_now)
{
long v;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
return v * gtod->clock.mult;
}
- static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
+ static int do_monotonic_boot(s64 *t, u64 *cycle_now)
{
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
unsigned long seq;
}
/* returns true if host is using tsc clocksource */
- static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
+ static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
{
/* checked again under seqlock below */
if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
}
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
- cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE",
+ cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
#ifdef CONFIG_ARC_TIMERS_64BIT
- static cycle_t arc_read_gfrc(struct clocksource *cs)
+ static u64 arc_read_gfrc(struct clocksource *cs)
{
unsigned long flags;
u32 l, h;
local_irq_restore(flags);
- return (((cycle_t)h) << 32) | l;
+ return (((u64)h) << 32) | l;
}
static struct clocksource arc_counter_gfrc = {
#define AUX_RTC_LOW 0x104
#define AUX_RTC_HIGH 0x105
- static cycle_t arc_read_rtc(struct clocksource *cs)
+ static u64 arc_read_rtc(struct clocksource *cs)
{
unsigned long status;
u32 l, h;
status = read_aux_reg(AUX_RTC_CTRL);
} while (!(status & _BITUL(31)));
- return (((cycle_t)h) << 32) | l;
+ return (((u64)h) << 32) | l;
}
static struct clocksource arc_counter_rtc = {
* 32bit TIMER1 to keep counting monotonically and wraparound
*/
- static cycle_t arc_read_timer1(struct clocksource *cs)
+ static u64 arc_read_timer1(struct clocksource *cs)
{
- return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
+ return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
}
static struct clocksource arc_counter_timer1 = {
}
ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
- "AP_ARC_TIMER_STARTING",
+ "clockevents/arc/timer:starting",
arc_timer_starting_cpu,
arc_timer_dying_cpu);
if (ret) {
*/
u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
- static cycle_t arch_counter_read(struct clocksource *cs)
+ static u64 arch_counter_read(struct clocksource *cs)
{
return arch_timer_read_counter();
}
- static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
+ static u64 arch_counter_read_cc(const struct cyclecounter *cc)
{
return arch_timer_read_counter();
}
/* Register and immediately configure the timer on the boot CPU */
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
- "AP_ARM_ARCH_TIMER_STARTING",
+ "clockevents/arm/arch_timer:starting",
arch_timer_starting_cpu, arch_timer_dying_cpu);
if (err)
goto out_unreg_cpupm;
return 0;
}
- static cycle_t gt_clocksource_read(struct clocksource *cs)
+ static u64 gt_clocksource_read(struct clocksource *cs)
{
return gt_counter_read();
}
goto out_irq;
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
- "AP_ARM_GLOBAL_TIMER_STARTING",
+ "clockevents/arm/global_timer:starting",
gt_starting_cpu, gt_dying_cpu);
if (err)
goto out_irq;
hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
} while (hi != hi2);
- return ((cycle_t)hi << 32) | lo;
+ return ((u64)hi << 32) | lo;
}
/**
return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
}
- static cycle_t exynos4_frc_read(struct clocksource *cs)
+ static u64 exynos4_frc_read(struct clocksource *cs)
{
return exynos4_read_count_32();
}
static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
{
unsigned int tcon;
- cycle_t comp_cycle;
+ u64 comp_cycle;
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
/* Install hotplug callbacks which configure the timer on this CPU */
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
- "AP_EXYNOS4_MCT_TIMER_STARTING",
+ "clockevents/exynos4/mct_timer:starting",
exynos4_mct_starting_cpu,
exynos4_mct_dying_cpu);
if (err)
return seclo * NSEC_PER_SEC + nsec;
}
- static cycle_t jcore_clocksource_read(struct clocksource *cs)
+ static u64 jcore_clocksource_read(struct clocksource *cs)
{
return jcore_sched_clock_read();
}
}
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
- "AP_JCORE_TIMER_STARTING",
+ "clockevents/jcore:starting",
jcore_pit_local_init, NULL);
return 0;
return 0;
}
- static cycle_t metag_clocksource_read(struct clocksource *cs)
+ static u64 metag_clocksource_read(struct clocksource *cs)
{
return __core_reg_get(TXTIMER);
}
/* Hook cpu boot to configure the CPU's timers */
return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
- "AP_METAG_TIMER_STARTING",
+ "clockevents/metag:starting",
arch_timer_starting_cpu, NULL);
}
}
cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
- "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
- gic_dying_cpu);
+ "clockevents/mips/gic/timer:starting",
+ gic_starting_cpu, gic_dying_cpu);
return 0;
}
- static cycle_t gic_hpt_read(struct clocksource *cs)
+ static u64 gic_hpt_read(struct clocksource *cs)
{
return gic_read_count();
}
static void __iomem *source_base;
- static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
+ static notrace u64 msm_read_timer_count(struct clocksource *cs)
{
return readl_relaxed(source_base + TIMER_COUNT_VAL);
}
} else {
/* Install and invoke hotplug callbacks */
res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
- "AP_QCOM_TIMER_STARTING",
+ "clockevents/qcom/timer:starting",
msm_local_timer_starting_cpu,
msm_local_timer_dying_cpu);
if (res) {
}
/* read 64-bit timer counter */
- static cycle_t sirfsoc_timer_read(struct clocksource *cs)
+ static u64 sirfsoc_timer_read(struct clocksource *cs)
{
u64 cycles;
/* Install and invoke hotplug callbacks */
return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
- "AP_MARCO_TIMER_STARTING",
+ "clockevents/marco:starting",
sirfsoc_local_timer_starting_cpu,
sirfsoc_local_timer_dying_cpu);
}
vcpu->arch.timer_cpu.active_cleared_last = false;
}
- static cycle_t kvm_phys_timer_read(void)
+ static u64 kvm_phys_timer_read(void)
{
return timecounter->cc->read(timecounter->cc);
}
static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
{
- cycle_t cval, now;
+ u64 cval, now;
cval = vcpu->arch.timer_cpu.cntv_cval;
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- cycle_t cval, now;
+ u64 cval, now;
if (!kvm_timer_irq_can_fire(vcpu))
return false;
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
- "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu,
+ "kvm/arm/timer:starting", kvm_timer_starting_cpu,
kvm_timer_dying_cpu);
return err;
}