]> git.karo-electronics.de Git - linux-beck.git/commitdiff
seqlock: Use seqcount infrastructure
authorThomas Gleixner <tglx@linutronix.de>
Sat, 16 Jul 2011 16:40:26 +0000 (18:40 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 Feb 2013 07:43:34 +0000 (08:43 +0100)
No point in having different implementations for the same
thing. Change the macro mess to inline functions where possible.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/seqlock.h

index cb0599ca049cadbe3534e96b8671b10dc0e229d1..18299057402f1bf9015cff936f9f2a37c01a5896 100644 (file)
 #include <linux/preempt.h>
 #include <asm/processor.h>
 
-typedef struct {
-       unsigned sequence;
-       spinlock_t lock;
-} seqlock_t;
-
-/*
- * These macros triggered gcc-3.x compile-time problems.  We think these are
- * OK now.  Be cautious.
- */
-#define __SEQLOCK_UNLOCKED(lockname) \
-                { 0, __SPIN_LOCK_UNLOCKED(lockname) }
-
-#define seqlock_init(x)                                        \
-       do {                                            \
-               (x)->sequence = 0;                      \
-               spin_lock_init(&(x)->lock);             \
-       } while (0)
-
-#define DEFINE_SEQLOCK(x) \
-               seqlock_t x = __SEQLOCK_UNLOCKED(x)
-
-/* Lock out other writers and update the count.
- * Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
- */
-static inline void write_seqlock(seqlock_t *sl)
-{
-       spin_lock(&sl->lock);
-       ++sl->sequence;
-       smp_wmb();
-}
-
-static inline void write_sequnlock(seqlock_t *sl)
-{
-       smp_wmb();
-       sl->sequence++;
-       spin_unlock(&sl->lock);
-}
-
-/* Start of read calculation -- fetch last complete writer token */
-static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
-{
-       unsigned ret;
-
-repeat:
-       ret = ACCESS_ONCE(sl->sequence);
-       if (unlikely(ret & 1)) {
-               cpu_relax();
-               goto repeat;
-       }
-       smp_rmb();
-
-       return ret;
-}
-
-/*
- * Test if reader processed invalid data.
- *
- * If sequence value changed then writer changed data while in section.
- */
-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
-{
-       smp_rmb();
-
-       return unlikely(sl->sequence != start);
-}
-
-
 /*
  * Version using sequence counter only.
  * This can be used when code has its own mutex protecting the
  * updating starting before the write_seqcountbeqin() and ending
  * after the write_seqcount_end().
  */
-
 typedef struct seqcount {
        unsigned sequence;
 } seqcount_t;
@@ -207,7 +138,6 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
 {
        smp_rmb();
-
        return __read_seqcount_retry(s, start);
 }
 
@@ -241,21 +171,101 @@ static inline void write_seqcount_barrier(seqcount_t *s)
        s->sequence+=2;
 }
 
+typedef struct {
+       struct seqcount seqcount;
+       spinlock_t lock;
+} seqlock_t;
+
+/*
+ * These macros triggered gcc-3.x compile-time problems.  We think these are
+ * OK now.  Be cautious.
+ */
+#define __SEQLOCK_UNLOCKED(lockname)                   \
+       {                                               \
+               .seqcount = SEQCNT_ZERO,                \
+               .lock = __SPIN_LOCK_UNLOCKED(lockname)  \
+       }
+
+#define seqlock_init(x)                                        \
+       do {                                            \
+               seqcount_init(&(x)->seqcount);          \
+               spin_lock_init(&(x)->lock);             \
+       } while (0)
+
+#define DEFINE_SEQLOCK(x) \
+               seqlock_t x = __SEQLOCK_UNLOCKED(x)
+
+/*
+ * Read side functions for starting and finalizing a read side section.
+ */
+static inline unsigned read_seqbegin(const seqlock_t *sl)
+{
+       return read_seqcount_begin(&sl->seqcount);
+}
+
+static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+{
+       return read_seqcount_retry(&sl->seqcount, start);
+}
+
 /*
- * Possible sw/hw IRQ protected versions of the interfaces.
+ * Lock out other writers and update the count.
+ * Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
  */
+static inline void write_seqlock(seqlock_t *sl)
+{
+       spin_lock(&sl->lock);
+       write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock(seqlock_t *sl)
+{
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock(&sl->lock);
+}
+
+static inline void write_seqlock_bh(seqlock_t *sl)
+{
+       spin_lock_bh(&sl->lock);
+       write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_bh(seqlock_t *sl)
+{
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock_bh(&sl->lock);
+}
+
+static inline void write_seqlock_irq(seqlock_t *sl)
+{
+       spin_lock_irq(&sl->lock);
+       write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_irq(seqlock_t *sl)
+{
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sl->lock, flags);
+       write_seqcount_begin(&sl->seqcount);
+       return flags;
+}
+
 #define write_seqlock_irqsave(lock, flags)                             \
-       do { local_irq_save(flags); write_seqlock(lock); } while (0)
-#define write_seqlock_irq(lock)                                                \
-       do { local_irq_disable();   write_seqlock(lock); } while (0)
-#define write_seqlock_bh(lock)                                         \
-        do { local_bh_disable();    write_seqlock(lock); } while (0)
+       do { flags = __write_seqlock_irqsave(lock); } while (0)
 
-#define write_sequnlock_irqrestore(lock, flags)                                \
-       do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
-#define write_sequnlock_irq(lock)                                      \
-       do { write_sequnlock(lock); local_irq_enable(); } while(0)
-#define write_sequnlock_bh(lock)                                       \
-       do { write_sequnlock(lock); local_bh_enable(); } while(0)
+static inline void
+write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+{
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock_irqrestore(&sl->lock, flags);
+}
 
 #endif /* __LINUX_SEQLOCK_H */