]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 25 Dec 2016 22:30:04 +0000 (14:30 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 25 Dec 2016 22:30:04 +0000 (14:30 -0800)
Pull timer type cleanups from Thomas Gleixner:
 "This series does a tree wide cleanup of types related to
  timers/timekeeping.

   - Get rid of cycles_t and use a plain u64. The type is not really
     helpful and caused more confusion than clarity

   - Get rid of the ktime union. The union has become useless as we use
     the scalar nanoseconds storage unconditionally now. The 32bit
     timespec alike storage got removed due to the Y2038 limitations
     some time ago.

     That leaves the odd union access around for no reason. Clean it up.

  Both changes have been done with coccinelle and a small amount of
  manual mopping up"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  ktime: Get rid of ktime_equal()
  ktime: Cleanup ktime_set() usage
  ktime: Get rid of the union
  clocksource: Use a plain u64 instead of cycle_t

13 files changed:
1  2 
arch/x86/kernel/apb_timer.c
arch/x86/kernel/hpet.c
arch/x86/kvm/x86.c
drivers/clocksource/arc_timer.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/arm_global_timer.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/jcore-pit.c
drivers/clocksource/metag_generic.c
drivers/clocksource/mips-gic-timer.c
drivers/clocksource/qcom-timer.c
drivers/clocksource/timer-atlas7.c
virt/kvm/arm/arch_timer.c

index 202a7817beaf4a921c1d1115fdc881652ef42d78,092ea664d2c6b783e176e40b98a4b2515875f6bd..65721dc73bd83b982154c5d5e0f52e06b0ccad29
@@@ -234,7 -234,7 +234,7 @@@ static __init int apbt_late_init(void
        if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
                !apb_timer_block_enabled)
                return 0;
 -      return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL,
 +      return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL,
                                 apbt_cpu_dead);
  }
  fs_initcall(apbt_late_init);
@@@ -247,7 -247,7 +247,7 @@@ void apbt_setup_secondary_clock(void) {
  static int apbt_clocksource_register(void)
  {
        u64 start, now;
-       cycle_t t1;
+       u64 t1;
  
        /* Start the counter, use timer 2 as source, timer 0/1 for event */
        dw_apb_clocksource_start(clocksource_apbt);
@@@ -355,7 -355,7 +355,7 @@@ unsigned long apbt_quick_calibrate(void
  {
        int i, scale;
        u64 old, new;
-       cycle_t t1, t2;
+       u64 t1, t2;
        unsigned long khz = 0;
        u32 loop, shift;
  
diff --combined arch/x86/kernel/hpet.c
index 38c8fd684d38b87bb1310a59215da8c9df57a251,367756d55980fd02b94fe72a7c65f628421e089c..85e87b46c318026ed28d87056c516aec3e5fb9ed
@@@ -791,7 -791,7 +791,7 @@@ static union hpet_lock hpet __cacheline
        { .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
  };
  
- static cycle_t read_hpet(struct clocksource *cs)
+ static u64 read_hpet(struct clocksource *cs)
  {
        unsigned long flags;
        union hpet_lock old, new;
         * Read HPET directly if in NMI.
         */
        if (in_nmi())
-               return (cycle_t)hpet_readl(HPET_COUNTER);
+               return (u64)hpet_readl(HPET_COUNTER);
  
        /*
         * Read the current state of the lock and HPET value atomically.
                WRITE_ONCE(hpet.value, new.value);
                arch_spin_unlock(&hpet.lock);
                local_irq_restore(flags);
-               return (cycle_t)new.value;
+               return (u64)new.value;
        }
        local_irq_restore(flags);
  
@@@ -843,15 -843,15 +843,15 @@@ contended
                new.lockval = READ_ONCE(hpet.lockval);
        } while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
  
-       return (cycle_t)new.value;
+       return (u64)new.value;
  }
  #else
  /*
   * For UP or 32-bit.
   */
- static cycle_t read_hpet(struct clocksource *cs)
+ static u64 read_hpet(struct clocksource *cs)
  {
-       return (cycle_t)hpet_readl(HPET_COUNTER);
+       return (u64)hpet_readl(HPET_COUNTER);
  }
  #endif
  
@@@ -867,7 -867,7 +867,7 @@@ static struct clocksource clocksource_h
  static int hpet_clocksource_register(void)
  {
        u64 start, now;
-       cycle_t t1;
+       u64 t1;
  
        /* Start the counter */
        hpet_restart_counter();
@@@ -1051,11 -1051,11 +1051,11 @@@ static __init int hpet_late_init(void
                return 0;
  
        /* This notifier should be called after workqueue is ready */
 -      ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE",
 +      ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",
                                hpet_cpuhp_online, NULL);
        if (ret)
                return ret;
 -      ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL,
 +      ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL,
                                hpet_cpuhp_dead);
        if (ret)
                goto err_cpuhp;
diff --combined arch/x86/kvm/x86.c
index 6414fa6cb9fd561853fff141b333498c951c968b,ed04398f52c1bf30ed9a1ec0d62c5a4ee91adbd4..51ccfe08e32ff0570517fda01277dc0085721e94
@@@ -1131,8 -1131,8 +1131,8 @@@ struct pvclock_gtod_data 
  
        struct { /* extract of a clocksource struct */
                int vclock_mode;
-               cycle_t cycle_last;
-               cycle_t mask;
+               u64     cycle_last;
+               u64     mask;
                u32     mult;
                u32     shift;
        } clock;
@@@ -1572,9 -1572,9 +1572,9 @@@ static inline void adjust_tsc_offset_ho
  
  #ifdef CONFIG_X86_64
  
- static cycle_t read_tsc(void)
+ static u64 read_tsc(void)
  {
-       cycle_t ret = (cycle_t)rdtsc_ordered();
+       u64 ret = (u64)rdtsc_ordered();
        u64 last = pvclock_gtod_data.clock.cycle_last;
  
        if (likely(ret >= last))
        return last;
  }
  
- static inline u64 vgettsc(cycle_t *cycle_now)
+ static inline u64 vgettsc(u64 *cycle_now)
  {
        long v;
        struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
        return v * gtod->clock.mult;
  }
  
- static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
+ static int do_monotonic_boot(s64 *t, u64 *cycle_now)
  {
        struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
        unsigned long seq;
  }
  
  /* returns true if host is using tsc clocksource */
- static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
+ static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
  {
        /* checked again under seqlock below */
        if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
@@@ -5855,7 -5855,7 +5855,7 @@@ static void kvm_timer_init(void
        }
        pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
  
 -      cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE",
 +      cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
                          kvmclock_cpu_online, kvmclock_cpu_down_prep);
  }
  
index 2b7e87134d1a5631286aa921325e28bd57828d8f,3ea46343024f8fea4b64728c6985210bd8331e20..7517f959cba762e484cdc1fb00a78cb95aee5fe0
@@@ -56,7 -56,7 +56,7 @@@ static int noinline arc_get_timer_clk(s
  
  #ifdef CONFIG_ARC_TIMERS_64BIT
  
- static cycle_t arc_read_gfrc(struct clocksource *cs)
+ static u64 arc_read_gfrc(struct clocksource *cs)
  {
        unsigned long flags;
        u32 l, h;
@@@ -71,7 -71,7 +71,7 @@@
  
        local_irq_restore(flags);
  
-       return (((cycle_t)h) << 32) | l;
+       return (((u64)h) << 32) | l;
  }
  
  static struct clocksource arc_counter_gfrc = {
@@@ -105,7 -105,7 +105,7 @@@ CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,
  #define AUX_RTC_LOW   0x104
  #define AUX_RTC_HIGH  0x105
  
- static cycle_t arc_read_rtc(struct clocksource *cs)
+ static u64 arc_read_rtc(struct clocksource *cs)
  {
        unsigned long status;
        u32 l, h;
                status = read_aux_reg(AUX_RTC_CTRL);
        } while (!(status & _BITUL(31)));
  
-       return (((cycle_t)h) << 32) | l;
+       return (((u64)h) << 32) | l;
  }
  
  static struct clocksource arc_counter_rtc = {
@@@ -166,9 -166,9 +166,9 @@@ CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,a
   * 32bit TIMER1 to keep counting monotonically and wraparound
   */
  
- static cycle_t arc_read_timer1(struct clocksource *cs)
+ static u64 arc_read_timer1(struct clocksource *cs)
  {
-       return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
+       return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
  }
  
  static struct clocksource arc_counter_timer1 = {
@@@ -309,7 -309,7 +309,7 @@@ static int __init arc_clockevent_setup(
        }
  
        ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
 -                              "AP_ARC_TIMER_STARTING",
 +                              "clockevents/arc/timer:starting",
                                arc_timer_starting_cpu,
                                arc_timer_dying_cpu);
        if (ret) {
index cdeca850f29e20d4c6f48be24d421557f20bc686,394e417414d3f297c9f56497f1a00b31afe55d21..4c8c3fb2e8b248b3335d1c4f582f7faab1121d37
@@@ -562,12 -562,12 +562,12 @@@ static u64 arch_counter_get_cntvct_mem(
   */
  u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
  
- static cycle_t arch_counter_read(struct clocksource *cs)
+ static u64 arch_counter_read(struct clocksource *cs)
  {
        return arch_timer_read_counter();
  }
  
- static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
+ static u64 arch_counter_read_cc(const struct cyclecounter *cc)
  {
        return arch_timer_read_counter();
  }
@@@ -738,7 -738,7 +738,7 @@@ static int __init arch_timer_register(v
  
        /* Register and immediately configure the timer on the boot CPU */
        err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
 -                              "AP_ARM_ARCH_TIMER_STARTING",
 +                              "clockevents/arm/arch_timer:starting",
                                arch_timer_starting_cpu, arch_timer_dying_cpu);
        if (err)
                goto out_unreg_cpupm;
index bbfeb2800a94a582b52d5ffe57381cbc5dba72fa,570cc58baec441f018ee8ffe704c4b53ce12a008..123ed20ac2ffd406a4e9225ae0a23909066a2f1b
@@@ -195,7 -195,7 +195,7 @@@ static int gt_dying_cpu(unsigned int cp
        return 0;
  }
  
- static cycle_t gt_clocksource_read(struct clocksource *cs)
+ static u64 gt_clocksource_read(struct clocksource *cs)
  {
        return gt_counter_read();
  }
@@@ -316,7 -316,7 +316,7 @@@ static int __init global_timer_of_regis
                goto out_irq;
        
        err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
 -                              "AP_ARM_GLOBAL_TIMER_STARTING",
 +                              "clockevents/arm/global_timer:starting",
                                gt_starting_cpu, gt_dying_cpu);
        if (err)
                goto out_irq;
index b45b72b9586107e3c99d7ead9b964a47d35d1b23,c8b9f834f4de1ea00443fc5de7de72dc6bb974fe..4da1dc2278bd7fc34caa9e00d29f71ec1ebd015f
@@@ -183,7 -183,7 +183,7 @@@ static u64 exynos4_read_count_64(void
                hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
        } while (hi != hi2);
  
-       return ((cycle_t)hi << 32) | lo;
+       return ((u64)hi << 32) | lo;
  }
  
  /**
@@@ -199,7 -199,7 +199,7 @@@ static u32 notrace exynos4_read_count_3
        return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
  }
  
- static cycle_t exynos4_frc_read(struct clocksource *cs)
+ static u64 exynos4_frc_read(struct clocksource *cs)
  {
        return exynos4_read_count_32();
  }
@@@ -266,7 -266,7 +266,7 @@@ static void exynos4_mct_comp0_stop(void
  static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
  {
        unsigned int tcon;
-       cycle_t comp_cycle;
+       u64 comp_cycle;
  
        tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
  
@@@ -552,7 -552,7 +552,7 @@@ static int __init exynos4_timer_resourc
  
        /* Install hotplug callbacks which configure the timer on this CPU */
        err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
 -                              "AP_EXYNOS4_MCT_TIMER_STARTING",
 +                              "clockevents/exynos4/mct_timer:starting",
                                exynos4_mct_starting_cpu,
                                exynos4_mct_dying_cpu);
        if (err)
index 4e4146f69845777bb7447cecd7e5c9e57c21da2c,e90a6cfcb061f388618bbb09f724ae32163679f7..7c61226f435918ca3a66d64a4c9f195ae8129994
@@@ -57,7 -57,7 +57,7 @@@ static notrace u64 jcore_sched_clock_re
        return seclo * NSEC_PER_SEC + nsec;
  }
  
- static cycle_t jcore_clocksource_read(struct clocksource *cs)
+ static u64 jcore_clocksource_read(struct clocksource *cs)
  {
        return jcore_sched_clock_read();
  }
@@@ -240,7 -240,7 +240,7 @@@ static int __init jcore_pit_init(struc
        }
  
        cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
 -                        "AP_JCORE_TIMER_STARTING",
 +                        "clockevents/jcore:starting",
                          jcore_pit_local_init, NULL);
  
        return 0;
index 172f43d4bc1af073af4329723b1fe290b963e9b3,8d06a0f7ff268dc236a5d22500ee2c851bb9ab8a..6fcf96540631d2d9a53a1f64978e99d4cc2f26ab
@@@ -56,7 -56,7 +56,7 @@@ static int metag_timer_set_next_event(u
        return 0;
  }
  
- static cycle_t metag_clocksource_read(struct clocksource *cs)
+ static u64 metag_clocksource_read(struct clocksource *cs)
  {
        return __core_reg_get(TXTIMER);
  }
@@@ -154,6 -154,6 +154,6 @@@ int __init metag_generic_timer_init(voi
  
        /* Hook cpu boot to configure the CPU's timers */
        return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
 -                               "AP_METAG_TIMER_STARTING",
 +                               "clockevents/metag:starting",
                                 arch_timer_starting_cpu, NULL);
  }
index d9278847ffb25070c4aaca5387319ccd2f0ea9e5,7b86d07c99b4ce0327e0f517be7b328a248880a3..d9ef7a61e093964485e2f078c733a299b33cc409
@@@ -120,12 -120,12 +120,12 @@@ static int gic_clockevent_init(void
        }
  
        cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
 -                        "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
 -                        gic_dying_cpu);
 +                        "clockevents/mips/gic/timer:starting",
 +                        gic_starting_cpu, gic_dying_cpu);
        return 0;
  }
  
- static cycle_t gic_hpt_read(struct clocksource *cs)
+ static u64 gic_hpt_read(struct clocksource *cs)
  {
        return gic_read_count();
  }
index 3bf65fff5c08bef6e8f41a1ee5577b4e0bbd6493,d5d048d890d4e662389a82d7db2ac254962daf17..ee358cdf4a07b37e1952728fc22e21e2834d4864
@@@ -89,7 -89,7 +89,7 @@@ static struct clock_event_device __perc
  
  static void __iomem *source_base;
  
- static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
+ static notrace u64 msm_read_timer_count(struct clocksource *cs)
  {
        return readl_relaxed(source_base + TIMER_COUNT_VAL);
  }
@@@ -182,7 -182,7 +182,7 @@@ static int __init msm_timer_init(u32 dg
        } else {
                /* Install and invoke hotplug callbacks */
                res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
 -                                      "AP_QCOM_TIMER_STARTING",
 +                                      "clockevents/qcom/timer:starting",
                                        msm_local_timer_starting_cpu,
                                        msm_local_timer_dying_cpu);
                if (res) {
index 3c23e1744f4a7b19db773885b5d9a6aab3b41c1f,db0f21e7d7d25ee02a15b0acb74786c021637149..3d8a181f02528346c5d99053cdd87ad6b6569e34
@@@ -85,7 -85,7 +85,7 @@@ static irqreturn_t sirfsoc_timer_interr
  }
  
  /* read 64-bit timer counter */
- static cycle_t sirfsoc_timer_read(struct clocksource *cs)
+ static u64 sirfsoc_timer_read(struct clocksource *cs)
  {
        u64 cycles;
  
@@@ -221,7 -221,7 +221,7 @@@ static int __init sirfsoc_clockevent_in
  
        /* Install and invoke hotplug callbacks */
        return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
 -                               "AP_MARCO_TIMER_STARTING",
 +                               "clockevents/marco:starting",
                                 sirfsoc_local_timer_starting_cpu,
                                 sirfsoc_local_timer_dying_cpu);
  }
index 5b4f60d433142bdd9ae9ceb31db38e9800bf1c0d,97b657adb3bdd870543fd61c4429f00039ac8f51..a2dbbccbb6a3fe96751fafde8cd01658aa1301a5
@@@ -39,7 -39,7 +39,7 @@@ void kvm_timer_vcpu_put(struct kvm_vcp
        vcpu->arch.timer_cpu.active_cleared_last = false;
  }
  
- static cycle_t kvm_phys_timer_read(void)
+ static u64 kvm_phys_timer_read(void)
  {
        return timecounter->cc->read(timecounter->cc);
  }
@@@ -102,7 -102,7 +102,7 @@@ static void kvm_timer_inject_irq_work(s
  
  static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
  {
-       cycle_t cval, now;
+       u64 cval, now;
  
        cval = vcpu->arch.timer_cpu.cntv_cval;
        now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
@@@ -155,7 -155,7 +155,7 @@@ static bool kvm_timer_irq_can_fire(stru
  bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
  {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-       cycle_t cval, now;
+       u64 cval, now;
  
        if (!kvm_timer_irq_can_fire(vcpu))
                return false;
@@@ -456,7 -456,7 +456,7 @@@ int kvm_timer_hyp_init(void
        kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
  
        cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
 -                        "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu,
 +                        "kvm/arm/timer:starting", kvm_timer_starting_cpu,
                          kvm_timer_dying_cpu);
        return err;
  }