2 * Variant of atomic_t specialized for reference counts.
4 * The interface matches the atomic_t interface (to aid in porting) but only
5 * provides the few functions one should use for reference counting.
7 * It differs in that the counter saturates at UINT_MAX and will not move once
8 * there. This avoids wrapping the counter and causing 'spurious'
9 * use-after-free issues.
11 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
12 * and provide only what is strictly required for refcounts.
14 * The increments are fully relaxed; these will not provide ordering. The
15 * rationale is that whatever is used to obtain the object we're increasing the
16 * reference count on will provide the ordering. For locked data structures,
17 * its the lock acquire, for RCU/lockless data structures its the dependent
20 * Do note that inc_not_zero() provides a control dependency which will order
21 * future stores against the inc, this ensures we'll never modify the object
22 * if we did not in fact acquire a reference.
24 * The decrements will provide release order, such that all the prior loads and
25 * stores will be issued before, it also provides a control dependency, which
26 * will order us against the subsequent free().
28 * The control dependency is against the load of the cmpxchg (ll/sc) that
29 * succeeded. This means the stores aren't fully ordered, but this is fine
30 * because the 1->0 transition indicates no concurrency.
32 * Note that the allocator is responsible for ordering things between free()
37 #include <linux/refcount.h>
38 #include <linux/bug.h>
41 * refcount_add_not_zero - add a value to a refcount unless it is 0
42 * @i: the value to add to the refcount
45 * Will saturate at UINT_MAX and WARN.
47 * Provides no memory ordering, it is assumed the caller has guaranteed the
48 * object memory to be stable (RCU, etc.). It does provide a control dependency
49 * and thereby orders future stores. See the comment on top.
51 * Use of this function is not recommended for the normal reference counting
52 * use case in which references are taken and released one at a time. In these
53 * cases, refcount_inc(), or one of its variants, should instead be used to
54 * increment a reference count.
56 * Return: false if the passed refcount is 0, true otherwise
58 bool refcount_add_not_zero(unsigned int i, refcount_t *r)
60 unsigned int old, new, val = atomic_read(&r->refs);
66 if (unlikely(val == UINT_MAX))
72 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
79 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
83 EXPORT_SYMBOL_GPL(refcount_add_not_zero);
86 * refcount_add - add a value to a refcount
87 * @i: the value to add to the refcount
90 * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
92 * Provides no memory ordering, it is assumed the caller has guaranteed the
93 * object memory to be stable (RCU, etc.). It does provide a control dependency
94 * and thereby orders future stores. See the comment on top.
96 * Use of this function is not recommended for the normal reference counting
97 * use case in which references are taken and released one at a time. In these
98 * cases, refcount_inc(), or one of its variants, should instead be used to
99 * increment a reference count.
101 void refcount_add(unsigned int i, refcount_t *r)
103 WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
105 EXPORT_SYMBOL_GPL(refcount_add);
108 * refcount_inc_not_zero - increment a refcount unless it is 0
109 * @r: the refcount to increment
111 * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
113 * Provides no memory ordering, it is assumed the caller has guaranteed the
114 * object memory to be stable (RCU, etc.). It does provide a control dependency
115 * and thereby orders future stores. See the comment on top.
117 * Return: true if the increment was successful, false otherwise
119 bool refcount_inc_not_zero(refcount_t *r)
121 unsigned int old, new, val = atomic_read(&r->refs);
132 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
139 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
143 EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
146 * refcount_inc - increment a refcount
147 * @r: the refcount to increment
149 * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
151 * Provides no memory ordering, it is assumed the caller already has a
152 * reference on the object.
154 * Will WARN if the refcount is 0, as this represents a possible use-after-free
157 void refcount_inc(refcount_t *r)
159 WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
161 EXPORT_SYMBOL_GPL(refcount_inc);
164 * refcount_sub_and_test - subtract from a refcount and test if it is 0
165 * @i: amount to subtract from the refcount
168 * Similar to atomic_dec_and_test(), but it will WARN, return false and
169 * ultimately leak on underflow and will fail to decrement when saturated
172 * Provides release memory ordering, such that prior loads and stores are done
173 * before, and provides a control dependency such that free() must come after.
174 * See the comment on top.
176 * Use of this function is not recommended for the normal reference counting
177 * use case in which references are taken and released one at a time. In these
178 * cases, refcount_dec(), or one of its variants, should instead be used to
179 * decrement a reference count.
181 * Return: true if the resulting refcount is 0, false otherwise
183 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
185 unsigned int old, new, val = atomic_read(&r->refs);
188 if (unlikely(val == UINT_MAX))
193 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
197 old = atomic_cmpxchg_release(&r->refs, val, new);
206 EXPORT_SYMBOL_GPL(refcount_sub_and_test);
209 * refcount_dec_and_test - decrement a refcount and test if it is 0
212 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
213 * decrement when saturated at UINT_MAX.
215 * Provides release memory ordering, such that prior loads and stores are done
216 * before, and provides a control dependency such that free() must come after.
217 * See the comment on top.
219 * Return: true if the resulting refcount is 0, false otherwise
221 bool refcount_dec_and_test(refcount_t *r)
223 return refcount_sub_and_test(1, r);
225 EXPORT_SYMBOL_GPL(refcount_dec_and_test);
228 * refcount_dec - decrement a refcount
231 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
232 * when saturated at UINT_MAX.
234 * Provides release memory ordering, such that prior loads and stores are done
237 void refcount_dec(refcount_t *r)
239 WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
241 EXPORT_SYMBOL_GPL(refcount_dec);
244 * refcount_dec_if_one - decrement a refcount if it is 1
247 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
250 * Like all decrement operations, it provides release memory order and provides
251 * a control dependency.
253 * It can be used like a try-delete operator; this explicit case is provided
254 * and not cmpxchg in generic, because that would allow implementing unsafe
257 * Return: true if the resulting refcount is 0, false otherwise
259 bool refcount_dec_if_one(refcount_t *r)
261 return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
263 EXPORT_SYMBOL_GPL(refcount_dec_if_one);
266 * refcount_dec_not_one - decrement a refcount if it is not 1
269 * No atomic_t counterpart, it decrements unless the value is 1, in which case
270 * it will return false.
272 * Was often done like: atomic_add_unless(&var, -1, 1)
274 * Return: true if the decrement operation was successful, false otherwise
276 bool refcount_dec_not_one(refcount_t *r)
278 unsigned int old, new, val = atomic_read(&r->refs);
281 if (unlikely(val == UINT_MAX))
289 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
293 old = atomic_cmpxchg_release(&r->refs, val, new);
302 EXPORT_SYMBOL_GPL(refcount_dec_not_one);
305 * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
308 * @lock: the mutex to be locked
310 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
311 * to decrement when saturated at UINT_MAX.
313 * Provides release memory ordering, such that prior loads and stores are done
314 * before, and provides a control dependency such that free() must come after.
315 * See the comment on top.
317 * Return: true and hold mutex if able to decrement refcount to 0, false
320 bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
322 if (refcount_dec_not_one(r))
326 if (!refcount_dec_and_test(r)) {
333 EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
336 * refcount_dec_and_lock - return holding spinlock if able to decrement
339 * @lock: the spinlock to be locked
341 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
342 * decrement when saturated at UINT_MAX.
344 * Provides release memory ordering, such that prior loads and stores are done
345 * before, and provides a control dependency such that free() must come after.
346 * See the comment on top.
348 * Return: true and hold spinlock if able to decrement refcount to 0, false
351 bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
353 if (refcount_dec_not_one(r))
357 if (!refcount_dec_and_test(r)) {
364 EXPORT_SYMBOL_GPL(refcount_dec_and_lock);