From 9cfa1a0279e22063a727fd204a75cf3672860d83 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 25 Jun 2015 18:44:00 +0200 Subject: [PATCH] x86/asm/tsc: Use the full 64-bit TSC in delay_tsc() As a very minor optimization, delay_tsc() was only using the low 32 bits of the TSC. It's a delay function, so just use the whole thing. Signed-off-by: Andy Lutomirski Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Huang Rui Cc: John Stultz Cc: Len Brown Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Ralf Baechle Cc: Thomas Gleixner Cc: kvm ML Link: http://lkml.kernel.org/r/bd1a277c71321b67c4794970cb5ace05efe21ab6.1434501121.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/lib/delay.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 9a52ad0c0758..35115f3786a9 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -49,16 +49,16 @@ static void delay_loop(unsigned long loops) /* TSC based delay: */ static void delay_tsc(unsigned long __loops) { - u32 bclock, now, loops = __loops; + u64 bclock, now, loops = __loops; int cpu; preempt_disable(); cpu = smp_processor_id(); rdtsc_barrier(); - rdtscl(bclock); + bclock = native_read_tsc(); for (;;) { rdtsc_barrier(); - rdtscl(now); + now = native_read_tsc(); if ((now - bclock) >= loops) break; @@ -80,7 +80,7 @@ static void delay_tsc(unsigned long __loops) loops -= (now - bclock); cpu = smp_processor_id(); rdtsc_barrier(); - rdtscl(bclock); + bclock = native_read_tsc(); } } preempt_enable(); -- 2.39.2