]> git.karo-electronics.de Git - linux-beck.git/commitdiff
[PATCH] i386: Clean up spin/rwlocks
authorAndi Kleen <ak@suse.de>
Tue, 26 Sep 2006 08:52:32 +0000 (10:52 +0200)
committerAndi Kleen <andi@basil.nowhere.org>
Tue, 26 Sep 2006 08:52:32 +0000 (10:52 +0200)
- Inline spinlock strings into their inline functions
- Convert macros to typesafe inlines
- Replace some leftover __asm__ __volatile__s with asm volatile

Signed-off-by: Andi Kleen <ak@suse.de>
include/asm-i386/rwlock.h
include/asm-i386/spinlock.h

index f40ccbd8cb7fc690411994ae138a3d09d9b570a3..c3e5db32fa48aca7a1c3d6edaf233a3b432c5922 100644 (file)
 #define RW_LOCK_BIAS            0x01000000
 #define RW_LOCK_BIAS_STR       "0x01000000"
 
-#define __build_read_lock(rw, helper)   \
-       asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" \
-                       "jns 1f\n" \
-                       "call " helper "\n\t" \
-                       "1:\n" \
-                       ::"a" (rw) : "memory")
-
-#define __build_write_lock(rw, helper) \
-       asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
-                       "jz 1f\n" \
-                       "call " helper "\n\t" \
-                       "1:\n" \
-                       ::"a" (rw) : "memory")
+/* Code is in asm-i386/spinlock.h */
 
 #endif
index d1020363c41ab744cebcb2ae4896849fbe10c6b2..324329313af8db5b4ea9b235d191307f4e4d7ce2 100644 (file)
@@ -4,6 +4,7 @@
 #include <asm/atomic.h>
 #include <asm/rwlock.h>
 #include <asm/page.h>
+#include <asm/processor.h>
 #include <linux/compiler.h>
 
 /*
  * (the type definitions are in asm/spinlock_types.h)
  */
 
-#define __raw_spin_is_locked(x) \
-               (*(volatile signed char *)(&(x)->slock) <= 0)
-
-#define __raw_spin_lock_string \
-       "\n1:\t" \
-       LOCK_PREFIX " ; decb %0\n\t" \
-       "jns 3f\n" \
-       "2:\t" \
-       "rep;nop\n\t" \
-       "cmpb $0,%0\n\t" \
-       "jle 2b\n\t" \
-       "jmp 1b\n" \
-       "3:\n\t"
-
-/*
- * NOTE: there's an irqs-on section here, which normally would have to be
- * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
- * __raw_spin_lock_string_flags().
- */
-#define __raw_spin_lock_string_flags \
-       "\n1:\t" \
-       LOCK_PREFIX " ; decb %0\n\t" \
-       "jns 5f\n" \
-       "2:\t" \
-       "testl $0x200, %1\n\t" \
-       "jz 4f\n\t" \
-       "sti\n" \
-       "3:\t" \
-       "rep;nop\n\t" \
-       "cmpb $0, %0\n\t" \
-       "jle 3b\n\t" \
-       "cli\n\t" \
-       "jmp 1b\n" \
-       "4:\t" \
-       "rep;nop\n\t" \
-       "cmpb $0, %0\n\t" \
-       "jg 1b\n\t" \
-       "jmp 4b\n" \
-       "5:\n\t"
+static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+{
+       return *(volatile signed char *)(&(x)->slock) <= 0;
+}
 
 static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
-       asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
+       asm volatile("\n1:\t"
+                    LOCK_PREFIX " ; decb %0\n\t"
+                    "jns 3f\n"
+                    "2:\t"
+                    "rep;nop\n\t"
+                    "cmpb $0,%0\n\t"
+                    "jle 2b\n\t"
+                    "jmp 1b\n"
+                    "3:\n\t"
+                    : "+m" (lock->slock) : : "memory");
 }
 
 /*
  * It is easier for the lock validator if interrupts are not re-enabled
  * in the middle of a lock-acquire. This is a performance feature anyway
  * so we turn it off:
+ *
+ * NOTE: there's an irqs-on section here, which normally would have to be
+ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
  */
 #ifndef CONFIG_PROVE_LOCKING
 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
 {
-       asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
+       asm volatile(
+               "\n1:\t"
+               LOCK_PREFIX " ; decb %0\n\t"
+               "jns 5f\n"
+               "2:\t"
+               "testl $0x200, %1\n\t"
+               "jz 4f\n\t"
+               "sti\n"
+               "3:\t"
+               "rep;nop\n\t"
+               "cmpb $0, %0\n\t"
+               "jle 3b\n\t"
+               "cli\n\t"
+               "jmp 1b\n"
+               "4:\t"
+               "rep;nop\n\t"
+               "cmpb $0, %0\n\t"
+               "jg 1b\n\t"
+               "jmp 4b\n"
+               "5:\n\t"
+               : "+m" (lock->slock) : "r" (flags) : "memory");
 }
 #endif
 
 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        char oldval;
-       __asm__ __volatile__(
+       asm volatile(
                "xchgb %b0,%1"
                :"=q" (oldval), "+m" (lock->slock)
                :"0" (0) : "memory");
@@ -93,38 +91,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 
 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
 
-#define __raw_spin_unlock_string \
-       "movb $1,%0" \
-               :"+m" (lock->slock) : : "memory"
-
-
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-       __asm__ __volatile__(
-               __raw_spin_unlock_string
-       );
+       asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory");
 }
 
 #else
 
-#define __raw_spin_unlock_string \
-       "xchgb %b0, %1" \
-               :"=q" (oldval), "+m" (lock->slock) \
-               :"0" (oldval) : "memory"
-
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        char oldval = 1;
 
-       __asm__ __volatile__(
-               __raw_spin_unlock_string
-       );
+       asm volatile("xchgb %b0, %1"
+                    : "=q" (oldval), "+m" (lock->slock)
+                    : "0" (oldval) : "memory");
 }
 
 #endif
 
-#define __raw_spin_unlock_wait(lock) \
-       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+{
+       while (__raw_spin_is_locked(lock))
+               cpu_relax();
+}
 
 /*
  * Read-write spinlocks, allowing multiple readers
@@ -151,22 +140,36 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_read_can_lock(x)         ((int)(x)->lock > 0)
+static inline int __raw_read_can_lock(raw_rwlock_t *x)
+{
+       return (int)(x)->lock > 0;
+}
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_write_can_lock(x)                ((x)->lock == RW_LOCK_BIAS)
+static inline int __raw_write_can_lock(raw_rwlock_t *x)
+{
+       return (x)->lock == RW_LOCK_BIAS;
+}
 
 static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
-       __build_read_lock(rw, "__read_lock_failed");
+       asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
+                    "jns 1f\n"
+                    "call __read_lock_failed\n\t"
+                    "1:\n"
+                    ::"a" (rw) : "memory");
 }
 
 static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
-       __build_write_lock(rw, "__write_lock_failed");
+       asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t"
+                    "jz 1f\n"
+                    "call __write_lock_failed\n\t"
+                    "1:\n"
+                    ::"a" (rw) : "memory");
 }
 
 static inline int __raw_read_trylock(raw_rwlock_t *lock)