1 /* SPDX-License-Identifier: GPL-2.0 */
3 * The least significant 2 bits of the owner value has the following
5 * - RWSEM_READER_OWNED (bit 0): The rwsem is owned by readers
6 * - RWSEM_ANONYMOUSLY_OWNED (bit 1): The rwsem is anonymously owned,
7 * i.e. the owner(s) cannot be readily determined. It can be reader
8 * owned or the owning writer is indeterminate.
10 * When a writer acquires a rwsem, it puts its task_struct pointer
11 * into the owner field. It is cleared after an unlock.
13 * When a reader acquires a rwsem, it will also puts its task_struct
14 * pointer into the owner field with both the RWSEM_READER_OWNED and
15 * RWSEM_ANONYMOUSLY_OWNED bits set. On unlock, the owner field will
16 * largely be left untouched. So for a free or reader-owned rwsem,
17 * the owner value may contain information about the last reader that
18 * acquires the rwsem. The anonymous bit is set because that particular
19 * reader may or may not still own the lock.
21 * That information may be helpful in debugging cases where the system
22 * seems to hang on a reader owned rwsem especially if only one reader
23 * is involved. Ideally we would like to track all the readers that own
24 * a rwsem, but the overhead is simply too big.
26 #define RWSEM_READER_OWNED (1UL << 0)
27 #define RWSEM_ANONYMOUSLY_OWNED (1UL << 1)
29 #ifdef CONFIG_DEBUG_RWSEMS
30 # define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
32 # define DEBUG_RWSEMS_WARN_ON(c)
36 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
37 * Adapted largely from include/asm-i386/rwsem.h
38 * by Paul Mackerras <paulus@samba.org>.
42 * the semaphore definition
45 # define RWSEM_ACTIVE_MASK 0xffffffffL
47 # define RWSEM_ACTIVE_MASK 0x0000ffffL
50 #define RWSEM_ACTIVE_BIAS 0x00000001L
51 #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
52 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
53 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
55 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
57 * All writes to owner are protected by WRITE_ONCE() to make sure that
58 * store tearing can't happen as optimistic spinners may read and use
59 * the owner value concurrently without lock. Read from owner, however,
60 * may not need READ_ONCE() as long as the pointer value is only used
61 * for comparison and isn't being dereferenced.
63 static inline void rwsem_set_owner(struct rw_semaphore *sem)
65 WRITE_ONCE(sem->owner, current);
68 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
70 WRITE_ONCE(sem->owner, NULL);
74 * The task_struct pointer of the last owning reader will be left in
77 * Note that the owner value just indicates the task has owned the rwsem
78 * previously, it may not be the real owner or one of the real owners
79 * anymore when that field is examined, so take it with a grain of salt.
81 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
82 struct task_struct *owner)
84 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED
85 | RWSEM_ANONYMOUSLY_OWNED;
87 WRITE_ONCE(sem->owner, (struct task_struct *)val);
90 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
92 __rwsem_set_reader_owned(sem, current);
96 * Return true if the a rwsem waiter can spin on the rwsem's owner
97 * and steal the lock, i.e. the lock is not anonymously owned.
98 * N.B. !owner is considered spinnable.
100 static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
102 return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
106 * Return true if rwsem is owned by an anonymous writer or readers.
108 static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
110 return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
113 #ifdef CONFIG_DEBUG_RWSEMS
115 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
116 * is a task pointer in owner of a reader-owned rwsem, it will be the
117 * real owner or one of the real owners. The only exception is when the
118 * unlock is done by up_read_non_owner().
120 #define rwsem_clear_reader_owned rwsem_clear_reader_owned
121 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
123 unsigned long val = (unsigned long)current | RWSEM_READER_OWNED
124 | RWSEM_ANONYMOUSLY_OWNED;
125 if (READ_ONCE(sem->owner) == (struct task_struct *)val)
126 cmpxchg_relaxed((unsigned long *)&sem->owner, val,
127 RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED);
132 static inline void rwsem_set_owner(struct rw_semaphore *sem)
136 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
140 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
141 struct task_struct *owner)
145 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
150 #ifndef rwsem_clear_reader_owned
151 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
156 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
157 extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
158 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
159 extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
160 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
161 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
166 static inline void __down_read(struct rw_semaphore *sem)
168 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
169 rwsem_down_read_failed(sem);
170 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
171 RWSEM_READER_OWNED));
173 rwsem_set_reader_owned(sem);
177 static inline int __down_read_killable(struct rw_semaphore *sem)
179 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
180 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
182 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
183 RWSEM_READER_OWNED));
185 rwsem_set_reader_owned(sem);
190 static inline int __down_read_trylock(struct rw_semaphore *sem)
193 * Optimize for the case when the rwsem is not locked at all.
195 long tmp = RWSEM_UNLOCKED_VALUE;
198 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
199 tmp + RWSEM_ACTIVE_READ_BIAS)) {
200 rwsem_set_reader_owned(sem);
210 static inline void __down_write(struct rw_semaphore *sem)
214 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
216 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
217 rwsem_down_write_failed(sem);
218 rwsem_set_owner(sem);
221 static inline int __down_write_killable(struct rw_semaphore *sem)
225 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
227 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
228 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
230 rwsem_set_owner(sem);
234 static inline int __down_write_trylock(struct rw_semaphore *sem)
238 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
239 RWSEM_ACTIVE_WRITE_BIAS);
240 if (tmp == RWSEM_UNLOCKED_VALUE) {
241 rwsem_set_owner(sem);
248 * unlock after reading
250 static inline void __up_read(struct rw_semaphore *sem)
254 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
255 rwsem_clear_reader_owned(sem);
256 tmp = atomic_long_dec_return_release(&sem->count);
257 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
262 * unlock after writing
264 static inline void __up_write(struct rw_semaphore *sem)
266 DEBUG_RWSEMS_WARN_ON(sem->owner != current);
267 rwsem_clear_owner(sem);
268 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
274 * downgrade write lock to read lock
276 static inline void __downgrade_write(struct rw_semaphore *sem)
281 * When downgrading from exclusive to shared ownership,
282 * anything inside the write-locked region cannot leak
283 * into the read side. In contrast, anything in the
284 * read-locked region is ok to be re-ordered into the
285 * write side. As such, rely on RELEASE semantics.
287 DEBUG_RWSEMS_WARN_ON(sem->owner != current);
288 tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
289 rwsem_set_reader_owned(sem);
291 rwsem_downgrade_wake(sem);