1 /* Atomic operations usable in machine independent code */
2 #ifndef _LINUX_ATOMIC_H
3 #define _LINUX_ATOMIC_H
4 #include <asm/atomic.h>
5 #include <asm/barrier.h>
7 #ifndef atomic_read_ctrl
8 static inline int atomic_read_ctrl(const atomic_t *v)
10 int val = atomic_read(v);
11 smp_read_barrier_depends(); /* Enforce control dependency. */
17 * Relaxed variants of xchg, cmpxchg and some atomic operations.
19 * We support four variants:
21 * - Fully ordered: The default implementation, no suffix required.
22 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
23 * - Release: Provides RELEASE semantics, _release suffix.
24 * - Relaxed: No ordering guarantees, _relaxed suffix.
26 * For compound atomics performing both a load and a store, ACQUIRE
27 * semantics apply only to the load and RELEASE semantics only to the
28 * store portion of the operation. Note that a failed cmpxchg_acquire
29 * does -not- imply any memory ordering constraints.
31 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
34 #ifndef atomic_read_acquire
35 #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
38 #ifndef atomic_set_release
39 #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
43 * The idea here is to build acquire/release variants by adding explicit
44 * barriers on top of the relaxed variant. In the case where the relaxed
45 * variant is already fully ordered, no additional barriers are needed.
47 #define __atomic_op_acquire(op, args...) \
49 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
50 smp_mb__after_atomic(); \
54 #define __atomic_op_release(op, args...) \
56 smp_mb__before_atomic(); \
60 #define __atomic_op_fence(op, args...) \
62 typeof(op##_relaxed(args)) __ret; \
63 smp_mb__before_atomic(); \
64 __ret = op##_relaxed(args); \
65 smp_mb__after_atomic(); \
69 /* atomic_add_return_relaxed */
70 #ifndef atomic_add_return_relaxed
71 #define atomic_add_return_relaxed atomic_add_return
72 #define atomic_add_return_acquire atomic_add_return
73 #define atomic_add_return_release atomic_add_return
75 #else /* atomic_add_return_relaxed */
77 #ifndef atomic_add_return_acquire
78 #define atomic_add_return_acquire(...) \
79 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
82 #ifndef atomic_add_return_release
83 #define atomic_add_return_release(...) \
84 __atomic_op_release(atomic_add_return, __VA_ARGS__)
87 #ifndef atomic_add_return
88 #define atomic_add_return(...) \
89 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
91 #endif /* atomic_add_return_relaxed */
93 /* atomic_inc_return_relaxed */
94 #ifndef atomic_inc_return_relaxed
95 #define atomic_inc_return_relaxed atomic_inc_return
96 #define atomic_inc_return_acquire atomic_inc_return
97 #define atomic_inc_return_release atomic_inc_return
99 #else /* atomic_inc_return_relaxed */
101 #ifndef atomic_inc_return_acquire
102 #define atomic_inc_return_acquire(...) \
103 __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
106 #ifndef atomic_inc_return_release
107 #define atomic_inc_return_release(...) \
108 __atomic_op_release(atomic_inc_return, __VA_ARGS__)
111 #ifndef atomic_inc_return
112 #define atomic_inc_return(...) \
113 __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
115 #endif /* atomic_inc_return_relaxed */
117 /* atomic_sub_return_relaxed */
118 #ifndef atomic_sub_return_relaxed
119 #define atomic_sub_return_relaxed atomic_sub_return
120 #define atomic_sub_return_acquire atomic_sub_return
121 #define atomic_sub_return_release atomic_sub_return
123 #else /* atomic_sub_return_relaxed */
125 #ifndef atomic_sub_return_acquire
126 #define atomic_sub_return_acquire(...) \
127 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
130 #ifndef atomic_sub_return_release
131 #define atomic_sub_return_release(...) \
132 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
135 #ifndef atomic_sub_return
136 #define atomic_sub_return(...) \
137 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
139 #endif /* atomic_sub_return_relaxed */
141 /* atomic_dec_return_relaxed */
142 #ifndef atomic_dec_return_relaxed
143 #define atomic_dec_return_relaxed atomic_dec_return
144 #define atomic_dec_return_acquire atomic_dec_return
145 #define atomic_dec_return_release atomic_dec_return
147 #else /* atomic_dec_return_relaxed */
149 #ifndef atomic_dec_return_acquire
150 #define atomic_dec_return_acquire(...) \
151 __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
154 #ifndef atomic_dec_return_release
155 #define atomic_dec_return_release(...) \
156 __atomic_op_release(atomic_dec_return, __VA_ARGS__)
159 #ifndef atomic_dec_return
160 #define atomic_dec_return(...) \
161 __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
163 #endif /* atomic_dec_return_relaxed */
165 /* atomic_xchg_relaxed */
166 #ifndef atomic_xchg_relaxed
167 #define atomic_xchg_relaxed atomic_xchg
168 #define atomic_xchg_acquire atomic_xchg
169 #define atomic_xchg_release atomic_xchg
171 #else /* atomic_xchg_relaxed */
173 #ifndef atomic_xchg_acquire
174 #define atomic_xchg_acquire(...) \
175 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
178 #ifndef atomic_xchg_release
179 #define atomic_xchg_release(...) \
180 __atomic_op_release(atomic_xchg, __VA_ARGS__)
184 #define atomic_xchg(...) \
185 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
187 #endif /* atomic_xchg_relaxed */
189 /* atomic_cmpxchg_relaxed */
190 #ifndef atomic_cmpxchg_relaxed
191 #define atomic_cmpxchg_relaxed atomic_cmpxchg
192 #define atomic_cmpxchg_acquire atomic_cmpxchg
193 #define atomic_cmpxchg_release atomic_cmpxchg
195 #else /* atomic_cmpxchg_relaxed */
197 #ifndef atomic_cmpxchg_acquire
198 #define atomic_cmpxchg_acquire(...) \
199 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
202 #ifndef atomic_cmpxchg_release
203 #define atomic_cmpxchg_release(...) \
204 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
207 #ifndef atomic_cmpxchg
208 #define atomic_cmpxchg(...) \
209 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
211 #endif /* atomic_cmpxchg_relaxed */
213 #ifndef atomic64_read_acquire
214 #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
217 #ifndef atomic64_set_release
218 #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
221 /* atomic64_add_return_relaxed */
222 #ifndef atomic64_add_return_relaxed
223 #define atomic64_add_return_relaxed atomic64_add_return
224 #define atomic64_add_return_acquire atomic64_add_return
225 #define atomic64_add_return_release atomic64_add_return
227 #else /* atomic64_add_return_relaxed */
229 #ifndef atomic64_add_return_acquire
230 #define atomic64_add_return_acquire(...) \
231 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
234 #ifndef atomic64_add_return_release
235 #define atomic64_add_return_release(...) \
236 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
239 #ifndef atomic64_add_return
240 #define atomic64_add_return(...) \
241 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
243 #endif /* atomic64_add_return_relaxed */
245 /* atomic64_inc_return_relaxed */
246 #ifndef atomic64_inc_return_relaxed
247 #define atomic64_inc_return_relaxed atomic64_inc_return
248 #define atomic64_inc_return_acquire atomic64_inc_return
249 #define atomic64_inc_return_release atomic64_inc_return
251 #else /* atomic64_inc_return_relaxed */
253 #ifndef atomic64_inc_return_acquire
254 #define atomic64_inc_return_acquire(...) \
255 __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
258 #ifndef atomic64_inc_return_release
259 #define atomic64_inc_return_release(...) \
260 __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
263 #ifndef atomic64_inc_return
264 #define atomic64_inc_return(...) \
265 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
267 #endif /* atomic64_inc_return_relaxed */
270 /* atomic64_sub_return_relaxed */
271 #ifndef atomic64_sub_return_relaxed
272 #define atomic64_sub_return_relaxed atomic64_sub_return
273 #define atomic64_sub_return_acquire atomic64_sub_return
274 #define atomic64_sub_return_release atomic64_sub_return
276 #else /* atomic64_sub_return_relaxed */
278 #ifndef atomic64_sub_return_acquire
279 #define atomic64_sub_return_acquire(...) \
280 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
283 #ifndef atomic64_sub_return_release
284 #define atomic64_sub_return_release(...) \
285 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
288 #ifndef atomic64_sub_return
289 #define atomic64_sub_return(...) \
290 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
292 #endif /* atomic64_sub_return_relaxed */
294 /* atomic64_dec_return_relaxed */
295 #ifndef atomic64_dec_return_relaxed
296 #define atomic64_dec_return_relaxed atomic64_dec_return
297 #define atomic64_dec_return_acquire atomic64_dec_return
298 #define atomic64_dec_return_release atomic64_dec_return
300 #else /* atomic64_dec_return_relaxed */
302 #ifndef atomic64_dec_return_acquire
303 #define atomic64_dec_return_acquire(...) \
304 __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
307 #ifndef atomic64_dec_return_release
308 #define atomic64_dec_return_release(...) \
309 __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
312 #ifndef atomic64_dec_return
313 #define atomic64_dec_return(...) \
314 __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
316 #endif /* atomic64_dec_return_relaxed */
318 /* atomic64_xchg_relaxed */
319 #ifndef atomic64_xchg_relaxed
320 #define atomic64_xchg_relaxed atomic64_xchg
321 #define atomic64_xchg_acquire atomic64_xchg
322 #define atomic64_xchg_release atomic64_xchg
324 #else /* atomic64_xchg_relaxed */
326 #ifndef atomic64_xchg_acquire
327 #define atomic64_xchg_acquire(...) \
328 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
331 #ifndef atomic64_xchg_release
332 #define atomic64_xchg_release(...) \
333 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
336 #ifndef atomic64_xchg
337 #define atomic64_xchg(...) \
338 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
340 #endif /* atomic64_xchg_relaxed */
342 /* atomic64_cmpxchg_relaxed */
343 #ifndef atomic64_cmpxchg_relaxed
344 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg
345 #define atomic64_cmpxchg_acquire atomic64_cmpxchg
346 #define atomic64_cmpxchg_release atomic64_cmpxchg
348 #else /* atomic64_cmpxchg_relaxed */
350 #ifndef atomic64_cmpxchg_acquire
351 #define atomic64_cmpxchg_acquire(...) \
352 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
355 #ifndef atomic64_cmpxchg_release
356 #define atomic64_cmpxchg_release(...) \
357 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
360 #ifndef atomic64_cmpxchg
361 #define atomic64_cmpxchg(...) \
362 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
364 #endif /* atomic64_cmpxchg_relaxed */
366 /* cmpxchg_relaxed */
367 #ifndef cmpxchg_relaxed
368 #define cmpxchg_relaxed cmpxchg
369 #define cmpxchg_acquire cmpxchg
370 #define cmpxchg_release cmpxchg
372 #else /* cmpxchg_relaxed */
374 #ifndef cmpxchg_acquire
375 #define cmpxchg_acquire(...) \
376 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
379 #ifndef cmpxchg_release
380 #define cmpxchg_release(...) \
381 __atomic_op_release(cmpxchg, __VA_ARGS__)
385 #define cmpxchg(...) \
386 __atomic_op_fence(cmpxchg, __VA_ARGS__)
388 #endif /* cmpxchg_relaxed */
390 /* cmpxchg64_relaxed */
391 #ifndef cmpxchg64_relaxed
392 #define cmpxchg64_relaxed cmpxchg64
393 #define cmpxchg64_acquire cmpxchg64
394 #define cmpxchg64_release cmpxchg64
396 #else /* cmpxchg64_relaxed */
398 #ifndef cmpxchg64_acquire
399 #define cmpxchg64_acquire(...) \
400 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
403 #ifndef cmpxchg64_release
404 #define cmpxchg64_release(...) \
405 __atomic_op_release(cmpxchg64, __VA_ARGS__)
409 #define cmpxchg64(...) \
410 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
412 #endif /* cmpxchg64_relaxed */
416 #define xchg_relaxed xchg
417 #define xchg_acquire xchg
418 #define xchg_release xchg
420 #else /* xchg_relaxed */
423 #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
427 #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
431 #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
433 #endif /* xchg_relaxed */
436 * atomic_add_unless - add unless the number is already a given value
437 * @v: pointer of type atomic_t
438 * @a: the amount to add to v...
439 * @u: ...unless v is equal to u.
441 * Atomically adds @a to @v, so long as @v was not already @u.
442 * Returns non-zero if @v was not @u, and zero otherwise.
444 static inline int atomic_add_unless(atomic_t *v, int a, int u)
446 return __atomic_add_unless(v, a, u) != u;
450 * atomic_inc_not_zero - increment unless the number is zero
451 * @v: pointer of type atomic_t
453 * Atomically increments @v by 1, so long as @v is non-zero.
454 * Returns non-zero if @v was non-zero, and zero otherwise.
456 #ifndef atomic_inc_not_zero
457 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
460 #ifndef atomic_andnot
461 static inline void atomic_andnot(int i, atomic_t *v)
467 static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
469 atomic_andnot(mask, v);
472 static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
478 * atomic_inc_not_zero_hint - increment if not null
479 * @v: pointer of type atomic_t
480 * @hint: probable value of the atomic before the increment
482 * This version of atomic_inc_not_zero() gives a hint of probable
483 * value of the atomic. This helps processor to not read the memory
484 * before doing the atomic read/modify/write cycle, lowering
485 * number of bus transactions on some arches.
487 * Returns: 0 if increment was not done, 1 otherwise.
489 #ifndef atomic_inc_not_zero_hint
490 static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
494 /* sanity test, should be removed by compiler if hint is a constant */
496 return atomic_inc_not_zero(v);
499 val = atomic_cmpxchg(v, c, c + 1);
509 #ifndef atomic_inc_unless_negative
510 static inline int atomic_inc_unless_negative(atomic_t *p)
513 for (v = 0; v >= 0; v = v1) {
514 v1 = atomic_cmpxchg(p, v, v + 1);
522 #ifndef atomic_dec_unless_positive
523 static inline int atomic_dec_unless_positive(atomic_t *p)
526 for (v = 0; v <= 0; v = v1) {
527 v1 = atomic_cmpxchg(p, v, v - 1);
536 * atomic_dec_if_positive - decrement by 1 if old value positive
537 * @v: pointer of type atomic_t
539 * The function returns the old value of *v minus 1, even if
540 * the atomic variable, v, was not decremented.
542 #ifndef atomic_dec_if_positive
543 static inline int atomic_dec_if_positive(atomic_t *v)
549 if (unlikely(dec < 0))
551 old = atomic_cmpxchg((v), c, dec);
552 if (likely(old == c))
560 #ifdef CONFIG_GENERIC_ATOMIC64
561 #include <asm-generic/atomic64.h>
564 #ifndef atomic64_read_ctrl
565 static inline long long atomic64_read_ctrl(const atomic64_t *v)
567 long long val = atomic64_read(v);
568 smp_read_barrier_depends(); /* Enforce control dependency. */
573 #ifndef atomic64_andnot
574 static inline void atomic64_andnot(long long i, atomic64_t *v)
580 #include <asm-generic/atomic-long.h>
582 #endif /* _LINUX_ATOMIC_H */