1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_REFCOUNT_H
3 #define _LINUX_REFCOUNT_H
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/limits.h>
8 #include <linux/spinlock_types.h>
13 * struct refcount_t - variant of atomic_t specialized for reference counts
14 * @refs: atomic_t counter field
16 * The counter saturates at REFCOUNT_SATURATED and will not move once
17 * there. This avoids wrapping the counter and causing 'spurious'
18 * use-after-free bugs.
20 typedef struct refcount_struct {
24 #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
26 enum refcount_saturation_type {
27 REFCOUNT_ADD_NOT_ZERO_OVF,
34 void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
37 * refcount_set - set a refcount's value
39 * @n: value to which the refcount will be set
41 static inline void refcount_set(refcount_t *r, int n)
43 atomic_set(&r->refs, n);
47 * refcount_read - get a refcount's value
50 * Return: the refcount's value
52 static inline unsigned int refcount_read(const refcount_t *r)
54 return atomic_read(&r->refs);
57 #ifdef CONFIG_REFCOUNT_FULL
58 #include <linux/bug.h>
60 #define REFCOUNT_MAX INT_MAX
61 #define REFCOUNT_SATURATED (INT_MIN / 2)
64 * Variant of atomic_t specialized for reference counts.
66 * The interface matches the atomic_t interface (to aid in porting) but only
67 * provides the few functions one should use for reference counting.
69 * Saturation semantics
70 * ====================
72 * refcount_t differs from atomic_t in that the counter saturates at
73 * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
74 * counter and causing 'spurious' use-after-free issues. In order to avoid the
75 * cost associated with introducing cmpxchg() loops into all of the saturating
76 * operations, we temporarily allow the counter to take on an unchecked value
77 * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
78 * or overflow has occurred. Although this is racy when multiple threads
79 * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
80 * equidistant from 0 and INT_MAX we minimise the scope for error:
82 * INT_MAX REFCOUNT_SATURATED UINT_MAX
83 * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
84 * +--------------------------------+----------------+----------------+
85 * <---------- bad value! ---------->
87 * (in a signed view of the world, the "bad value" range corresponds to
88 * a negative counter value).
90 * As an example, consider a refcount_inc() operation that causes the counter
93 * int old = atomic_fetch_add_relaxed(r);
94 * // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
96 * atomic_set(r, REFCOUNT_SATURATED);
98 * If another thread also performs a refcount_inc() operation between the two
99 * atomic operations, then the count will continue to edge closer to 0. If it
100 * reaches a value of 1 before /any/ of the threads reset it to the saturated
101 * value, then a concurrent refcount_dec_and_test() may erroneously free the
102 * underlying object. Given the precise timing details involved with the
103 * round-robin scheduling of each thread manipulating the refcount and the need
104 * to hit the race multiple times in succession, there doesn't appear to be a
105 * practical avenue of attack even if using refcount_add() operations with
111 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
112 * and provide only what is strictly required for refcounts.
114 * The increments are fully relaxed; these will not provide ordering. The
115 * rationale is that whatever is used to obtain the object we're increasing the
116 * reference count on will provide the ordering. For locked data structures,
117 * its the lock acquire, for RCU/lockless data structures its the dependent
120 * Do note that inc_not_zero() provides a control dependency which will order
121 * future stores against the inc, this ensures we'll never modify the object
122 * if we did not in fact acquire a reference.
124 * The decrements will provide release order, such that all the prior loads and
125 * stores will be issued before, it also provides a control dependency, which
126 * will order us against the subsequent free().
128 * The control dependency is against the load of the cmpxchg (ll/sc) that
129 * succeeded. This means the stores aren't fully ordered, but this is fine
130 * because the 1->0 transition indicates no concurrency.
132 * Note that the allocator is responsible for ordering things between free()
135 * The decrements dec_and_test() and sub_and_test() also provide acquire
136 * ordering on success.
141 * refcount_add_not_zero - add a value to a refcount unless it is 0
142 * @i: the value to add to the refcount
145 * Will saturate at REFCOUNT_SATURATED and WARN.
147 * Provides no memory ordering, it is assumed the caller has guaranteed the
148 * object memory to be stable (RCU, etc.). It does provide a control dependency
149 * and thereby orders future stores. See the comment on top.
151 * Use of this function is not recommended for the normal reference counting
152 * use case in which references are taken and released one at a time. In these
153 * cases, refcount_inc(), or one of its variants, should instead be used to
154 * increment a reference count.
156 * Return: false if the passed refcount is 0, true otherwise
158 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
160 int old = refcount_read(r);
165 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
167 if (unlikely(old < 0 || old + i < 0))
168 refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
174 * refcount_add - add a value to a refcount
175 * @i: the value to add to the refcount
178 * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
180 * Provides no memory ordering, it is assumed the caller has guaranteed the
181 * object memory to be stable (RCU, etc.). It does provide a control dependency
182 * and thereby orders future stores. See the comment on top.
184 * Use of this function is not recommended for the normal reference counting
185 * use case in which references are taken and released one at a time. In these
186 * cases, refcount_inc(), or one of its variants, should instead be used to
187 * increment a reference count.
189 static inline void refcount_add(int i, refcount_t *r)
191 int old = atomic_fetch_add_relaxed(i, &r->refs);
194 refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
195 else if (unlikely(old < 0 || old + i < 0))
196 refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
200 * refcount_inc_not_zero - increment a refcount unless it is 0
201 * @r: the refcount to increment
203 * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
206 * Provides no memory ordering, it is assumed the caller has guaranteed the
207 * object memory to be stable (RCU, etc.). It does provide a control dependency
208 * and thereby orders future stores. See the comment on top.
210 * Return: true if the increment was successful, false otherwise
212 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
214 return refcount_add_not_zero(1, r);
218 * refcount_inc - increment a refcount
219 * @r: the refcount to increment
221 * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
223 * Provides no memory ordering, it is assumed the caller already has a
224 * reference on the object.
226 * Will WARN if the refcount is 0, as this represents a possible use-after-free
229 static inline void refcount_inc(refcount_t *r)
235 * refcount_sub_and_test - subtract from a refcount and test if it is 0
236 * @i: amount to subtract from the refcount
239 * Similar to atomic_dec_and_test(), but it will WARN, return false and
240 * ultimately leak on underflow and will fail to decrement when saturated
241 * at REFCOUNT_SATURATED.
243 * Provides release memory ordering, such that prior loads and stores are done
244 * before, and provides an acquire ordering on success such that free()
247 * Use of this function is not recommended for the normal reference counting
248 * use case in which references are taken and released one at a time. In these
249 * cases, refcount_dec(), or one of its variants, should instead be used to
250 * decrement a reference count.
252 * Return: true if the resulting refcount is 0, false otherwise
254 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
256 int old = atomic_fetch_sub_release(i, &r->refs);
259 smp_acquire__after_ctrl_dep();
263 if (unlikely(old < 0 || old - i < 0))
264 refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
270 * refcount_dec_and_test - decrement a refcount and test if it is 0
273 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
274 * decrement when saturated at REFCOUNT_SATURATED.
276 * Provides release memory ordering, such that prior loads and stores are done
277 * before, and provides an acquire ordering on success such that free()
280 * Return: true if the resulting refcount is 0, false otherwise
282 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
284 return refcount_sub_and_test(1, r);
288 * refcount_dec - decrement a refcount
291 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
292 * when saturated at REFCOUNT_SATURATED.
294 * Provides release memory ordering, such that prior loads and stores are done
297 static inline void refcount_dec(refcount_t *r)
299 if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
300 refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
302 #else /* CONFIG_REFCOUNT_FULL */
304 #define REFCOUNT_MAX INT_MAX
305 #define REFCOUNT_SATURATED (INT_MIN / 2)
307 # ifdef CONFIG_ARCH_HAS_REFCOUNT
308 # include <asm/refcount.h>
310 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
312 return atomic_add_unless(&r->refs, i, 0);
315 static inline void refcount_add(int i, refcount_t *r)
317 atomic_add(i, &r->refs);
320 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
322 return atomic_add_unless(&r->refs, 1, 0);
325 static inline void refcount_inc(refcount_t *r)
327 atomic_inc(&r->refs);
330 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
332 return atomic_sub_and_test(i, &r->refs);
335 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
337 return atomic_dec_and_test(&r->refs);
340 static inline void refcount_dec(refcount_t *r)
342 atomic_dec(&r->refs);
344 # endif /* !CONFIG_ARCH_HAS_REFCOUNT */
345 #endif /* !CONFIG_REFCOUNT_FULL */
347 extern __must_check bool refcount_dec_if_one(refcount_t *r);
348 extern __must_check bool refcount_dec_not_one(refcount_t *r);
349 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
350 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
351 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
353 unsigned long *flags);
354 #endif /* _LINUX_REFCOUNT_H */