]> git.karo-electronics.de Git - linux-beck.git/blobdiff - arch/powerpc/lib/locks.c
locking: Convert raw_rwlock to arch_rwlock
[linux-beck.git] / arch / powerpc / lib / locks.c
index 077bed7dc52b3726ae4ebd7ecb227832fd52de95..58e14fba11b1d7472b982a9a91a992a20319c9cd 100644 (file)
@@ -23,8 +23,9 @@
 #include <asm/hvcall.h>
 #include <asm/iseries/hv_call.h>
 #include <asm/smp.h>
+#include <asm/firmware.h>
 
-void __spin_yield(raw_spinlock_t *lock)
+void __spin_yield(arch_spinlock_t *lock)
 {
        unsigned int lock_value, holder_cpu, yield_count;
 
@@ -39,12 +40,13 @@ void __spin_yield(raw_spinlock_t *lock)
        rmb();
        if (lock->slock != lock_value)
                return;         /* something has changed */
-#ifdef CONFIG_PPC_ISERIES
-       HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
-               ((u64)holder_cpu << 32) | yield_count);
-#else
-       plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
-                          yield_count);
+       if (firmware_has_feature(FW_FEATURE_ISERIES))
+               HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
+                       ((u64)holder_cpu << 32) | yield_count);
+#ifdef CONFIG_PPC_SPLPAR
+       else
+               plpar_hcall_norets(H_CONFER,
+                       get_hard_smp_processor_id(holder_cpu), yield_count);
 #endif
 }
 
@@ -53,7 +55,7 @@ void __spin_yield(raw_spinlock_t *lock)
  * This turns out to be the same for read and write locks, since
  * we only know the holder if it is write-locked.
  */
-void __rw_yield(raw_rwlock_t *rw)
+void __rw_yield(arch_rwlock_t *rw)
 {
        int lock_value;
        unsigned int holder_cpu, yield_count;
@@ -69,17 +71,18 @@ void __rw_yield(raw_rwlock_t *rw)
        rmb();
        if (rw->lock != lock_value)
                return;         /* something has changed */
-#ifdef CONFIG_PPC_ISERIES
-       HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
-               ((u64)holder_cpu << 32) | yield_count);
-#else
-       plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
-                          yield_count);
+       if (firmware_has_feature(FW_FEATURE_ISERIES))
+               HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
+                       ((u64)holder_cpu << 32) | yield_count);
+#ifdef CONFIG_PPC_SPLPAR
+       else
+               plpar_hcall_norets(H_CONFER,
+                       get_hard_smp_processor_id(holder_cpu), yield_count);
 #endif
 }
 #endif
 
-void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
        while (lock->slock) {
                HMT_low();
@@ -89,4 +92,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock)
        HMT_medium();
 }
 
-EXPORT_SYMBOL(__raw_spin_unlock_wait);
+EXPORT_SYMBOL(arch_spin_unlock_wait);