]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sched, time: Fix build error with 64 bit cputime_t on 32 bit systems
authorRik van Riel <riel@redhat.com>
Tue, 30 Sep 2014 19:59:47 +0000 (15:59 -0400)
committerIngo Molnar <mingo@kernel.org>
Fri, 3 Oct 2014 03:46:55 +0000 (05:46 +0200)
On 32 bit systems cmpxchg cannot handle 64 bit values, so
some additional magic is required to allow a 32 bit system
with CONFIG_VIRT_CPU_ACCOUNTING_GEN=y enabled to build.

Make sure the correct cmpxchg function is used when doing
an atomic swap of a cputime_t.

Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Rik van Riel <riel@redhat.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: umgwanakikbuti@gmail.com
Cc: fweisbec@gmail.com
Cc: srao@redhat.com
Cc: lwoodman@redhat.com
Cc: atheurer@redhat.com
Cc: oleg@redhat.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: linux390@de.ibm.com
Cc: linux-arch@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-s390@vger.kernel.org
Link: http://lkml.kernel.org/r/20140930155947.070cdb1f@annuminas.surriel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/powerpc/include/asm/cputime.h
arch/s390/include/asm/cputime.h
include/asm-generic/cputime_jiffies.h
include/asm-generic/cputime_nsecs.h
kernel/sched/cputime.c

index 607559ab271ff98b45de1f11964416bebaf32841..6c840ceab8201cc191f4699d15561c62b2e0c9c2 100644 (file)
@@ -32,6 +32,8 @@ static inline void setup_cputime_one_jiffy(void) { }
 typedef u64 __nocast cputime_t;
 typedef u64 __nocast cputime64_t;
 
+#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
+
 #ifdef __KERNEL__
 
 /*
index f65bd36345194db88b71bca1f3740cca8744fa33..3001887f94b7a7c31ca01051db3f3cf9643e3ad0 100644 (file)
@@ -18,6 +18,8 @@
 typedef unsigned long long __nocast cputime_t;
 typedef unsigned long long __nocast cputime64_t;
 
+#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
+
 static inline unsigned long __div(unsigned long long n, unsigned long base)
 {
 #ifndef CONFIG_64BIT
index d5cb78f539861afdc82eae345ee4c8148fb2448e..fe386fc6e85e27f3c4a1b99262fe023db6a31fc3 100644 (file)
@@ -3,6 +3,8 @@
 
 typedef unsigned long __nocast cputime_t;
 
+#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
+
 #define cputime_one_jiffy              jiffies_to_cputime(1)
 #define cputime_to_jiffies(__ct)       (__force unsigned long)(__ct)
 #define cputime_to_scaled(__ct)                (__ct)
index 4e817606c5494126a31ae50e58a6e76242d89f07..0419485891f2a8eb6125f154f52d7b16c5fb05b4 100644 (file)
@@ -21,6 +21,8 @@
 typedef u64 __nocast cputime_t;
 typedef u64 __nocast cputime64_t;
 
+#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
+
 #define cputime_one_jiffy              jiffies_to_cputime(1)
 
 #define cputime_div(__ct, divisor)  div_u64((__force u64)__ct, divisor)
index 64492dff8a819fc56ac37c656ac6831c1209d58b..8394b1ee600c38ba6e9144a6326369b6ef0cdacd 100644 (file)
@@ -554,6 +554,23 @@ drop_precision:
        return (__force cputime_t) scaled;
 }
 
+/*
+ * Atomically advance counter to the new value. Interrupts, vcpu
+ * scheduling, and scaling inaccuracies can cause cputime_advance
+ * to be occasionally called with a new value smaller than counter.
+ * Let's enforce atomicity.
+ *
+ * Normally a caller will only go through this loop once, or not
+ * at all in case a previous caller updated counter the same jiffy.
+ */
+static void cputime_advance(cputime_t *counter, cputime_t new)
+{
+       cputime_t old;
+
+       while (new > (old = ACCESS_ONCE(*counter)))
+               cmpxchg_cputime(counter, old, new);
+}
+
 /*
  * Adjust tick based cputime random precision against scheduler
  * runtime accounting.
@@ -599,16 +616,8 @@ static void cputime_adjust(struct task_cputime *curr,
                utime = rtime - stime;
        }
 
-       /*
-        * If the tick based count grows faster than the scheduler one,
-        * the result of the scaling may go backward.
-        * Let's enforce monotonicity.
-        * Atomic exchange protects against concurrent cputime_adjust().
-        */
-       while (stime > (rtime = ACCESS_ONCE(prev->stime)))
-               cmpxchg(&prev->stime, rtime, stime);
-       while (utime > (rtime = ACCESS_ONCE(prev->utime)))
-               cmpxchg(&prev->utime, rtime, utime);
+       cputime_advance(&prev->stime, stime);
+       cputime_advance(&prev->utime, utime);
 
 out:
        *ut = prev->utime;