1 #ifndef _LINUX_REFCOUNT_H
2 #define _LINUX_REFCOUNT_H
5 * Variant of atomic_t specialized for reference counts.
7 * The interface matches the atomic_t interface (to aid in porting) but only
8 * provides the few functions one should use for reference counting.
10 * It differs in that the counter saturates at UINT_MAX and will not move once
11 * there. This avoids wrapping the counter and causing 'spurious'
12 * use-after-free issues.
14 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
15 * and provide only what is strictly required for refcounts.
17 * The increments are fully relaxed; these will not provide ordering. The
18 * rationale is that whatever is used to obtain the object we're increasing the
19 * reference count on will provide the ordering. For locked data structures,
20 * its the lock acquire, for RCU/lockless data structures its the dependent
23 * Do note that inc_not_zero() provides a control dependency which will order
24 * future stores against the inc, this ensures we'll never modify the object
25 * if we did not in fact acquire a reference.
27 * The decrements will provide release order, such that all the prior loads and
28 * stores will be issued before, it also provides a control dependency, which
29 * will order us against the subsequent free().
31 * The control dependency is against the load of the cmpxchg (ll/sc) that
32 * succeeded. This means the stores aren't fully ordered, but this is fine
33 * because the 1->0 transition indicates no concurrency.
35 * Note that the allocator is responsible for ordering things between free()
40 #include <linux/atomic.h>
41 #include <linux/bug.h>
42 #include <linux/mutex.h>
43 #include <linux/spinlock.h>
45 #ifdef CONFIG_DEBUG_REFCOUNT
46 #define REFCOUNT_WARN(cond, str) WARN_ON(cond)
47 #define __refcount_check __must_check
49 #define REFCOUNT_WARN(cond, str) (void)(cond)
50 #define __refcount_check
53 typedef struct refcount_struct {
57 #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
59 static inline void refcount_set(refcount_t *r, unsigned int n)
61 atomic_set(&r->refs, n);
64 static inline unsigned int refcount_read(const refcount_t *r)
66 return atomic_read(&r->refs);
69 static inline __refcount_check
70 bool refcount_add_not_zero(unsigned int i, refcount_t *r)
72 unsigned int old, new, val = atomic_read(&r->refs);
78 if (unlikely(val == UINT_MAX))
84 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
91 REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
96 static inline void refcount_add(unsigned int i, refcount_t *r)
98 REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
102 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
104 * Provides no memory ordering, it is assumed the caller has guaranteed the
105 * object memory to be stable (RCU, etc.). It does provide a control dependency
106 * and thereby orders future stores. See the comment on top.
108 static inline __refcount_check
109 bool refcount_inc_not_zero(refcount_t *r)
111 unsigned int old, new, val = atomic_read(&r->refs);
122 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
129 REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
135 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
137 * Provides no memory ordering, it is assumed the caller already has a
138 * reference on the object, will WARN when this is not so.
140 static inline void refcount_inc(refcount_t *r)
142 REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
146 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
147 * decrement when saturated at UINT_MAX.
149 * Provides release memory ordering, such that prior loads and stores are done
150 * before, and provides a control dependency such that free() must come after.
151 * See the comment on top.
153 static inline __refcount_check
154 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
156 unsigned int old, new, val = atomic_read(&r->refs);
159 if (unlikely(val == UINT_MAX))
164 REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
168 old = atomic_cmpxchg_release(&r->refs, val, new);
178 static inline __refcount_check
179 bool refcount_dec_and_test(refcount_t *r)
181 return refcount_sub_and_test(1, r);
185 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
186 * when saturated at UINT_MAX.
188 * Provides release memory ordering, such that prior loads and stores are done
192 void refcount_dec(refcount_t *r)
194 REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
198 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
201 * Like all decrement operations, it provides release memory order and provides
202 * a control dependency.
204 * It can be used like a try-delete operator; this explicit case is provided
205 * and not cmpxchg in generic, because that would allow implementing unsafe
208 static inline __refcount_check
209 bool refcount_dec_if_one(refcount_t *r)
211 return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
215 * No atomic_t counterpart, it decrements unless the value is 1, in which case
216 * it will return false.
218 * Was often done like: atomic_add_unless(&var, -1, 1)
220 static inline __refcount_check
221 bool refcount_dec_not_one(refcount_t *r)
223 unsigned int old, new, val = atomic_read(&r->refs);
226 if (unlikely(val == UINT_MAX))
234 REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
238 old = atomic_cmpxchg_release(&r->refs, val, new);
249 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
250 * to decrement when saturated at UINT_MAX.
252 * Provides release memory ordering, such that prior loads and stores are done
253 * before, and provides a control dependency such that free() must come after.
254 * See the comment on top.
256 static inline __refcount_check
257 bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
259 if (refcount_dec_not_one(r))
263 if (!refcount_dec_and_test(r)) {
272 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
273 * decrement when saturated at UINT_MAX.
275 * Provides release memory ordering, such that prior loads and stores are done
276 * before, and provides a control dependency such that free() must come after.
277 * See the comment on top.
279 static inline __refcount_check
280 bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
282 if (refcount_dec_not_one(r))
286 if (!refcount_dec_and_test(r)) {
294 #endif /* _LINUX_REFCOUNT_H */