]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - lib/lockref.c
Merge remote-tracking branch 'nfsd/nfsd-next'
[karo-tx-linux.git] / lib / lockref.c
index 677d036cf3c70d6d72b9aa5f108470dd890752cd..af6e95d0bed6122bf9fd1e4af2bd76a4e2be62b3 100644 (file)
@@ -3,6 +3,22 @@
 
 #ifdef CONFIG_CMPXCHG_LOCKREF
 
+/*
+ * Allow weakly-ordered memory architectures to provide barrier-less
+ * cmpxchg semantics for lockref updates.
+ */
+#ifndef cmpxchg64_relaxed
+# define cmpxchg64_relaxed cmpxchg64
+#endif
+
+/*
+ * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
+ * This is useful for architectures with an expensive cpu_relax().
+ */
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
 /*
  * Note that the "cmpxchg()" reloads the "old" value for the
  * failure case.
        while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
                struct lockref new = old, prev = old;                           \
                CODE                                                            \
-               old.lock_count = cmpxchg64(&lockref->lock_count,                \
-                                          old.lock_count, new.lock_count);     \
+               old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,        \
+                                                  old.lock_count,              \
+                                                  new.lock_count);             \
                if (likely(old.lock_count == prev.lock_count)) {                \
                        SUCCESS;                                                \
                }                                                               \
-               cpu_relax();                                                    \
+               arch_mutex_cpu_relax();                                         \
        }                                                                       \
 } while (0)
 
@@ -136,6 +153,7 @@ void lockref_mark_dead(struct lockref *lockref)
        assert_spin_locked(&lockref->lock);
        lockref->count = -128;
 }
+EXPORT_SYMBOL(lockref_mark_dead);
 
 /**
  * lockref_get_not_dead - Increments count unless the ref is dead