1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
7 * here's the role of the various spinlock/rwlock related include files:
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
20 * (also included on UP-debug builds:)
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
25 * linux/spinlock.h: builds the final spin_*() APIs.
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
41 * (included on UP-non-debug builds:)
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
46 * linux/spinlock.h: builds the final spin_*() APIs.
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
60 #include <asm/system.h>
63 * Must define these before including other files, inline functions need them
65 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
67 #define LOCK_SECTION_START(extra) \
70 ".ifndef " LOCK_SECTION_NAME "\n\t" \
71 LOCK_SECTION_NAME ":\n\t" \
74 #define LOCK_SECTION_END \
77 #define __lockfunc __attribute__((section(".spinlock.text")))
80 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
82 #include <linux/spinlock_types.h>
85 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
88 # include <asm/spinlock.h>
90 # include <linux/spinlock_up.h>
93 #ifdef CONFIG_DEBUG_SPINLOCK
94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95 struct lock_class_key *key);
96 # define raw_spin_lock_init(lock) \
98 static struct lock_class_key __key; \
100 __raw_spin_lock_init((lock), #lock, &__key); \
104 # define raw_spin_lock_init(lock) \
105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
108 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
110 #ifdef CONFIG_GENERIC_LOCKBREAK
111 #define raw_spin_is_contended(lock) ((lock)->break_lock)
114 #ifdef arch_spin_is_contended
115 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
117 #define raw_spin_is_contended(lock) (((void)(lock), 0))
118 #endif /*arch_spin_is_contended*/
121 /* The lock does not imply full memory barrier. */
122 #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
123 static inline void smp_mb__after_lock(void) { smp_mb(); }
127 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
128 * @lock: the spinlock in question.
130 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
132 #ifdef CONFIG_DEBUG_SPINLOCK
133 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
134 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
135 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
136 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
138 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
141 arch_spin_lock(&lock->raw_lock);
145 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
148 arch_spin_lock_flags(&lock->raw_lock, *flags);
151 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
153 return arch_spin_trylock(&(lock)->raw_lock);
156 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
158 arch_spin_unlock(&lock->raw_lock);
164 * Define the various spin_lock methods. Note we define these
165 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
166 * various methods are defined as nops in the case they are not
169 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
171 #define raw_spin_lock(lock) _raw_spin_lock(lock)
173 #ifdef CONFIG_DEBUG_LOCK_ALLOC
174 # define raw_spin_lock_nested(lock, subclass) \
175 _raw_spin_lock_nested(lock, subclass)
177 # define raw_spin_lock_nest_lock(lock, nest_lock) \
179 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
180 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
183 # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
184 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
187 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
189 #define raw_spin_lock_irqsave(lock, flags) \
191 typecheck(unsigned long, flags); \
192 flags = _raw_spin_lock_irqsave(lock); \
195 #ifdef CONFIG_DEBUG_LOCK_ALLOC
196 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
198 typecheck(unsigned long, flags); \
199 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
202 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
204 typecheck(unsigned long, flags); \
205 flags = _raw_spin_lock_irqsave(lock); \
211 #define raw_spin_lock_irqsave(lock, flags) \
213 typecheck(unsigned long, flags); \
214 _raw_spin_lock_irqsave(lock, flags); \
217 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
218 raw_spin_lock_irqsave(lock, flags)
222 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
223 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
224 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
225 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
227 #define raw_spin_unlock_irqrestore(lock, flags) \
229 typecheck(unsigned long, flags); \
230 _raw_spin_unlock_irqrestore(lock, flags); \
232 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
234 #define raw_spin_trylock_bh(lock) \
235 __cond_lock(lock, _raw_spin_trylock_bh(lock))
237 #define raw_spin_trylock_irq(lock) \
239 local_irq_disable(); \
240 raw_spin_trylock(lock) ? \
241 1 : ({ local_irq_enable(); 0; }); \
244 #define raw_spin_trylock_irqsave(lock, flags) \
246 local_irq_save(flags); \
247 raw_spin_trylock(lock) ? \
248 1 : ({ local_irq_restore(flags); 0; }); \
252 * raw_spin_can_lock - would raw_spin_trylock() succeed?
253 * @lock: the spinlock in question.
255 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
257 /* Include rwlock functions */
258 #include <linux/rwlock.h>
261 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
263 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
264 # include <linux/spinlock_api_smp.h>
266 # include <linux/spinlock_api_up.h>
270 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
273 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
278 #define spin_lock_init(_lock) \
280 spinlock_check(_lock); \
281 raw_spin_lock_init(&(_lock)->rlock); \
284 static inline void spin_lock(spinlock_t *lock)
286 raw_spin_lock(&lock->rlock);
289 static inline void spin_lock_bh(spinlock_t *lock)
291 raw_spin_lock_bh(&lock->rlock);
294 static inline int spin_trylock(spinlock_t *lock)
296 return raw_spin_trylock(&lock->rlock);
299 #define spin_lock_nested(lock, subclass) \
301 raw_spin_lock_nested(spinlock_check(lock), subclass); \
304 #define spin_lock_nest_lock(lock, nest_lock) \
306 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
309 static inline void spin_lock_irq(spinlock_t *lock)
311 raw_spin_lock_irq(&lock->rlock);
314 #define spin_lock_irqsave(lock, flags) \
316 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
319 #define spin_lock_irqsave_nested(lock, flags, subclass) \
321 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
324 static inline void spin_unlock(spinlock_t *lock)
326 raw_spin_unlock(&lock->rlock);
329 static inline void spin_unlock_bh(spinlock_t *lock)
331 raw_spin_unlock_bh(&lock->rlock);
334 static inline void spin_unlock_irq(spinlock_t *lock)
336 raw_spin_unlock_irq(&lock->rlock);
339 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
341 raw_spin_unlock_irqrestore(&lock->rlock, flags);
344 static inline int spin_trylock_bh(spinlock_t *lock)
346 return raw_spin_trylock_bh(&lock->rlock);
349 static inline int spin_trylock_irq(spinlock_t *lock)
351 return raw_spin_trylock_irq(&lock->rlock);
354 #define spin_trylock_irqsave(lock, flags) \
356 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
359 static inline void spin_unlock_wait(spinlock_t *lock)
361 raw_spin_unlock_wait(&lock->rlock);
364 static inline int spin_is_locked(spinlock_t *lock)
366 return raw_spin_is_locked(&lock->rlock);
369 static inline int spin_is_contended(spinlock_t *lock)
371 return raw_spin_is_contended(&lock->rlock);
374 static inline int spin_can_lock(spinlock_t *lock)
376 return raw_spin_can_lock(&lock->rlock);
379 static inline void assert_spin_locked(spinlock_t *lock)
381 assert_raw_spin_locked(&lock->rlock);
385 * Pull the atomic_t declaration:
386 * (asm-mips/atomic.h needs above definitions)
388 #include <linux/atomic.h>
390 * atomic_dec_and_lock - lock on reaching reference count zero
391 * @atomic: the atomic counter
392 * @lock: the spinlock in question
394 * Decrements @atomic by 1. If the result is 0, returns true and locks
395 * @lock. Returns false for all other cases.
397 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
398 #define atomic_dec_and_lock(atomic, lock) \
399 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
401 #endif /* __LINUX_SPINLOCK_H */