]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/locking/rwsem.h
64877f5294e35b194232b1dca59e0db84d48d32c
[linux.git] / kernel / locking / rwsem.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * The least significant 2 bits of the owner value has the following
4  * meanings when set.
5  *  - RWSEM_READER_OWNED (bit 0): The rwsem is owned by readers
6  *  - RWSEM_ANONYMOUSLY_OWNED (bit 1): The rwsem is anonymously owned,
7  *    i.e. the owner(s) cannot be readily determined. It can be reader
8  *    owned or the owning writer is indeterminate.
9  *
10  * When a writer acquires a rwsem, it puts its task_struct pointer
11  * into the owner field. It is cleared after an unlock.
12  *
13  * When a reader acquires a rwsem, it will also puts its task_struct
14  * pointer into the owner field with both the RWSEM_READER_OWNED and
15  * RWSEM_ANONYMOUSLY_OWNED bits set. On unlock, the owner field will
16  * largely be left untouched. So for a free or reader-owned rwsem,
17  * the owner value may contain information about the last reader that
18  * acquires the rwsem. The anonymous bit is set because that particular
19  * reader may or may not still own the lock.
20  *
21  * That information may be helpful in debugging cases where the system
22  * seems to hang on a reader owned rwsem especially if only one reader
23  * is involved. Ideally we would like to track all the readers that own
24  * a rwsem, but the overhead is simply too big.
25  */
26 #include "lock_events.h"
27
28 #define RWSEM_READER_OWNED      (1UL << 0)
29 #define RWSEM_ANONYMOUSLY_OWNED (1UL << 1)
30
31 #ifdef CONFIG_DEBUG_RWSEMS
32 # define DEBUG_RWSEMS_WARN_ON(c, sem)   do {                    \
33         if (!debug_locks_silent &&                              \
34             WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
35                 #c, atomic_long_read(&(sem)->count),            \
36                 (long)((sem)->owner), (long)current,            \
37                 list_empty(&(sem)->wait_list) ? "" : "not "))   \
38                         debug_locks_off();                      \
39         } while (0)
40 #else
41 # define DEBUG_RWSEMS_WARN_ON(c, sem)
42 #endif
43
44 /*
45  * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
46  * Adapted largely from include/asm-i386/rwsem.h
47  * by Paul Mackerras <paulus@samba.org>.
48  */
49
50 /*
51  * the semaphore definition
52  */
53 #ifdef CONFIG_64BIT
54 # define RWSEM_ACTIVE_MASK              0xffffffffL
55 #else
56 # define RWSEM_ACTIVE_MASK              0x0000ffffL
57 #endif
58
59 #define RWSEM_ACTIVE_BIAS               0x00000001L
60 #define RWSEM_WAITING_BIAS              (-RWSEM_ACTIVE_MASK-1)
61 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
62 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
63
64 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
65 /*
66  * All writes to owner are protected by WRITE_ONCE() to make sure that
67  * store tearing can't happen as optimistic spinners may read and use
68  * the owner value concurrently without lock. Read from owner, however,
69  * may not need READ_ONCE() as long as the pointer value is only used
70  * for comparison and isn't being dereferenced.
71  */
72 static inline void rwsem_set_owner(struct rw_semaphore *sem)
73 {
74         WRITE_ONCE(sem->owner, current);
75 }
76
77 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
78 {
79         WRITE_ONCE(sem->owner, NULL);
80 }
81
82 /*
83  * The task_struct pointer of the last owning reader will be left in
84  * the owner field.
85  *
86  * Note that the owner value just indicates the task has owned the rwsem
87  * previously, it may not be the real owner or one of the real owners
88  * anymore when that field is examined, so take it with a grain of salt.
89  */
90 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
91                                             struct task_struct *owner)
92 {
93         unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED
94                                                  | RWSEM_ANONYMOUSLY_OWNED;
95
96         WRITE_ONCE(sem->owner, (struct task_struct *)val);
97 }
98
99 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
100 {
101         __rwsem_set_reader_owned(sem, current);
102 }
103
104 /*
105  * Return true if the a rwsem waiter can spin on the rwsem's owner
106  * and steal the lock, i.e. the lock is not anonymously owned.
107  * N.B. !owner is considered spinnable.
108  */
109 static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
110 {
111         return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
112 }
113
114 /*
115  * Return true if rwsem is owned by an anonymous writer or readers.
116  */
117 static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
118 {
119         return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
120 }
121
122 #ifdef CONFIG_DEBUG_RWSEMS
123 /*
124  * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
125  * is a task pointer in owner of a reader-owned rwsem, it will be the
126  * real owner or one of the real owners. The only exception is when the
127  * unlock is done by up_read_non_owner().
128  */
129 #define rwsem_clear_reader_owned rwsem_clear_reader_owned
130 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
131 {
132         unsigned long val = (unsigned long)current | RWSEM_READER_OWNED
133                                                    | RWSEM_ANONYMOUSLY_OWNED;
134         if (READ_ONCE(sem->owner) == (struct task_struct *)val)
135                 cmpxchg_relaxed((unsigned long *)&sem->owner, val,
136                                 RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED);
137 }
138 #endif
139
140 #else
141 static inline void rwsem_set_owner(struct rw_semaphore *sem)
142 {
143 }
144
145 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
146 {
147 }
148
149 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
150                                            struct task_struct *owner)
151 {
152 }
153
154 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
155 {
156 }
157 #endif
158
159 #ifndef rwsem_clear_reader_owned
160 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
161 {
162 }
163 #endif
164
165 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
166 extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
167 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
168 extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
169 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
170 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
171
172 /*
173  * lock for reading
174  */
175 static inline void __down_read(struct rw_semaphore *sem)
176 {
177         if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
178                 rwsem_down_read_failed(sem);
179                 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
180                                         RWSEM_READER_OWNED), sem);
181         } else {
182                 rwsem_set_reader_owned(sem);
183         }
184 }
185
186 static inline int __down_read_killable(struct rw_semaphore *sem)
187 {
188         if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
189                 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
190                         return -EINTR;
191                 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
192                                         RWSEM_READER_OWNED), sem);
193         } else {
194                 rwsem_set_reader_owned(sem);
195         }
196         return 0;
197 }
198
199 static inline int __down_read_trylock(struct rw_semaphore *sem)
200 {
201         /*
202          * Optimize for the case when the rwsem is not locked at all.
203          */
204         long tmp = RWSEM_UNLOCKED_VALUE;
205
206         lockevent_inc(rwsem_rtrylock);
207         do {
208                 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
209                                         tmp + RWSEM_ACTIVE_READ_BIAS)) {
210                         rwsem_set_reader_owned(sem);
211                         return 1;
212                 }
213         } while (tmp >= 0);
214         return 0;
215 }
216
217 /*
218  * lock for writing
219  */
220 static inline void __down_write(struct rw_semaphore *sem)
221 {
222         long tmp;
223
224         tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
225                                              &sem->count);
226         if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
227                 rwsem_down_write_failed(sem);
228         rwsem_set_owner(sem);
229 }
230
231 static inline int __down_write_killable(struct rw_semaphore *sem)
232 {
233         long tmp;
234
235         tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
236                                              &sem->count);
237         if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
238                 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
239                         return -EINTR;
240         rwsem_set_owner(sem);
241         return 0;
242 }
243
244 static inline int __down_write_trylock(struct rw_semaphore *sem)
245 {
246         long tmp;
247
248         lockevent_inc(rwsem_wtrylock);
249         tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
250                       RWSEM_ACTIVE_WRITE_BIAS);
251         if (tmp == RWSEM_UNLOCKED_VALUE) {
252                 rwsem_set_owner(sem);
253                 return true;
254         }
255         return false;
256 }
257
258 /*
259  * unlock after reading
260  */
261 static inline void __up_read(struct rw_semaphore *sem)
262 {
263         long tmp;
264
265         DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
266                                 sem);
267         rwsem_clear_reader_owned(sem);
268         tmp = atomic_long_dec_return_release(&sem->count);
269         if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
270                 rwsem_wake(sem);
271 }
272
273 /*
274  * unlock after writing
275  */
276 static inline void __up_write(struct rw_semaphore *sem)
277 {
278         DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
279         rwsem_clear_owner(sem);
280         if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
281                                                     &sem->count) < 0))
282                 rwsem_wake(sem);
283 }
284
285 /*
286  * downgrade write lock to read lock
287  */
288 static inline void __downgrade_write(struct rw_semaphore *sem)
289 {
290         long tmp;
291
292         /*
293          * When downgrading from exclusive to shared ownership,
294          * anything inside the write-locked region cannot leak
295          * into the read side. In contrast, anything in the
296          * read-locked region is ok to be re-ordered into the
297          * write side. As such, rely on RELEASE semantics.
298          */
299         DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
300         tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
301         rwsem_set_reader_owned(sem);
302         if (tmp < 0)
303                 rwsem_downgrade_wake(sem);
304 }