]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/btrfs/locking.c
btrfs: use assertion helpers for spinning writers
[linux.git] / fs / btrfs / locking.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "ctree.h"
12 #include "extent_io.h"
13 #include "locking.h"
14
15 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
16
17 #ifdef CONFIG_BTRFS_DEBUG
18 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
19 {
20         WARN_ON(atomic_read(&eb->spinning_writers));
21         atomic_inc(&eb->spinning_writers);
22 }
23
24 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
25 {
26         WARN_ON(atomic_read(&eb->spinning_writers) != 1);
27         atomic_dec(&eb->spinning_writers);
28 }
29
30 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
31 {
32         WARN_ON(atomic_read(&eb->spinning_writers));
33 }
34
35 #else
36 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
37 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
38 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
39 #endif
40
41 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
42 {
43         /*
44          * No lock is required.  The lock owner may change if we have a read
45          * lock, but it won't change to or away from us.  If we have the write
46          * lock, we are the owner and it'll never change.
47          */
48         if (eb->lock_nested && current->pid == eb->lock_owner)
49                 return;
50         btrfs_assert_tree_read_locked(eb);
51         atomic_inc(&eb->blocking_readers);
52         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
53         atomic_dec(&eb->spinning_readers);
54         read_unlock(&eb->lock);
55 }
56
57 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
58 {
59         /*
60          * No lock is required.  The lock owner may change if we have a read
61          * lock, but it won't change to or away from us.  If we have the write
62          * lock, we are the owner and it'll never change.
63          */
64         if (eb->lock_nested && current->pid == eb->lock_owner)
65                 return;
66         if (atomic_read(&eb->blocking_writers) == 0) {
67                 btrfs_assert_spinning_writers_put(eb);
68                 btrfs_assert_tree_locked(eb);
69                 atomic_inc(&eb->blocking_writers);
70                 write_unlock(&eb->lock);
71         }
72 }
73
74 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
75 {
76         /*
77          * No lock is required.  The lock owner may change if we have a read
78          * lock, but it won't change to or away from us.  If we have the write
79          * lock, we are the owner and it'll never change.
80          */
81         if (eb->lock_nested && current->pid == eb->lock_owner)
82                 return;
83         BUG_ON(atomic_read(&eb->blocking_readers) == 0);
84         read_lock(&eb->lock);
85         atomic_inc(&eb->spinning_readers);
86         /* atomic_dec_and_test implies a barrier */
87         if (atomic_dec_and_test(&eb->blocking_readers))
88                 cond_wake_up_nomb(&eb->read_lock_wq);
89 }
90
91 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
92 {
93         /*
94          * no lock is required.  The lock owner may change if
95          * we have a read lock, but it won't change to or away
96          * from us.  If we have the write lock, we are the owner
97          * and it'll never change.
98          */
99         if (eb->lock_nested && current->pid == eb->lock_owner)
100                 return;
101         BUG_ON(atomic_read(&eb->blocking_writers) != 1);
102         write_lock(&eb->lock);
103         btrfs_assert_spinning_writers_get(eb);
104         /* atomic_dec_and_test implies a barrier */
105         if (atomic_dec_and_test(&eb->blocking_writers))
106                 cond_wake_up_nomb(&eb->write_lock_wq);
107 }
108
109 /*
110  * take a spinning read lock.  This will wait for any blocking
111  * writers
112  */
113 void btrfs_tree_read_lock(struct extent_buffer *eb)
114 {
115 again:
116         BUG_ON(!atomic_read(&eb->blocking_writers) &&
117                current->pid == eb->lock_owner);
118
119         read_lock(&eb->lock);
120         if (atomic_read(&eb->blocking_writers) &&
121             current->pid == eb->lock_owner) {
122                 /*
123                  * This extent is already write-locked by our thread. We allow
124                  * an additional read lock to be added because it's for the same
125                  * thread. btrfs_find_all_roots() depends on this as it may be
126                  * called on a partly (write-)locked tree.
127                  */
128                 BUG_ON(eb->lock_nested);
129                 eb->lock_nested = 1;
130                 read_unlock(&eb->lock);
131                 return;
132         }
133         if (atomic_read(&eb->blocking_writers)) {
134                 read_unlock(&eb->lock);
135                 wait_event(eb->write_lock_wq,
136                            atomic_read(&eb->blocking_writers) == 0);
137                 goto again;
138         }
139         atomic_inc(&eb->read_locks);
140         atomic_inc(&eb->spinning_readers);
141 }
142
143 /*
144  * take a spinning read lock.
145  * returns 1 if we get the read lock and 0 if we don't
146  * this won't wait for blocking writers
147  */
148 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
149 {
150         if (atomic_read(&eb->blocking_writers))
151                 return 0;
152
153         read_lock(&eb->lock);
154         if (atomic_read(&eb->blocking_writers)) {
155                 read_unlock(&eb->lock);
156                 return 0;
157         }
158         atomic_inc(&eb->read_locks);
159         atomic_inc(&eb->spinning_readers);
160         return 1;
161 }
162
163 /*
164  * returns 1 if we get the read lock and 0 if we don't
165  * this won't wait for blocking writers
166  */
167 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
168 {
169         if (atomic_read(&eb->blocking_writers))
170                 return 0;
171
172         if (!read_trylock(&eb->lock))
173                 return 0;
174
175         if (atomic_read(&eb->blocking_writers)) {
176                 read_unlock(&eb->lock);
177                 return 0;
178         }
179         atomic_inc(&eb->read_locks);
180         atomic_inc(&eb->spinning_readers);
181         return 1;
182 }
183
184 /*
185  * returns 1 if we get the read lock and 0 if we don't
186  * this won't wait for blocking writers or readers
187  */
188 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
189 {
190         if (atomic_read(&eb->blocking_writers) ||
191             atomic_read(&eb->blocking_readers))
192                 return 0;
193
194         write_lock(&eb->lock);
195         if (atomic_read(&eb->blocking_writers) ||
196             atomic_read(&eb->blocking_readers)) {
197                 write_unlock(&eb->lock);
198                 return 0;
199         }
200         atomic_inc(&eb->write_locks);
201         btrfs_assert_spinning_writers_get(eb);
202         eb->lock_owner = current->pid;
203         return 1;
204 }
205
206 /*
207  * drop a spinning read lock
208  */
209 void btrfs_tree_read_unlock(struct extent_buffer *eb)
210 {
211         /*
212          * if we're nested, we have the write lock.  No new locking
213          * is needed as long as we are the lock owner.
214          * The write unlock will do a barrier for us, and the lock_nested
215          * field only matters to the lock owner.
216          */
217         if (eb->lock_nested && current->pid == eb->lock_owner) {
218                 eb->lock_nested = 0;
219                 return;
220         }
221         btrfs_assert_tree_read_locked(eb);
222         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
223         atomic_dec(&eb->spinning_readers);
224         atomic_dec(&eb->read_locks);
225         read_unlock(&eb->lock);
226 }
227
228 /*
229  * drop a blocking read lock
230  */
231 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
232 {
233         /*
234          * if we're nested, we have the write lock.  No new locking
235          * is needed as long as we are the lock owner.
236          * The write unlock will do a barrier for us, and the lock_nested
237          * field only matters to the lock owner.
238          */
239         if (eb->lock_nested && current->pid == eb->lock_owner) {
240                 eb->lock_nested = 0;
241                 return;
242         }
243         btrfs_assert_tree_read_locked(eb);
244         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
245         /* atomic_dec_and_test implies a barrier */
246         if (atomic_dec_and_test(&eb->blocking_readers))
247                 cond_wake_up_nomb(&eb->read_lock_wq);
248         atomic_dec(&eb->read_locks);
249 }
250
251 /*
252  * take a spinning write lock.  This will wait for both
253  * blocking readers or writers
254  */
255 void btrfs_tree_lock(struct extent_buffer *eb)
256 {
257         WARN_ON(eb->lock_owner == current->pid);
258 again:
259         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
260         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
261         write_lock(&eb->lock);
262         if (atomic_read(&eb->blocking_readers) ||
263             atomic_read(&eb->blocking_writers)) {
264                 write_unlock(&eb->lock);
265                 goto again;
266         }
267         btrfs_assert_spinning_writers_get(eb);
268         atomic_inc(&eb->write_locks);
269         eb->lock_owner = current->pid;
270 }
271
272 /*
273  * drop a spinning or a blocking write lock.
274  */
275 void btrfs_tree_unlock(struct extent_buffer *eb)
276 {
277         int blockers = atomic_read(&eb->blocking_writers);
278
279         BUG_ON(blockers > 1);
280
281         btrfs_assert_tree_locked(eb);
282         eb->lock_owner = 0;
283         atomic_dec(&eb->write_locks);
284
285         if (blockers) {
286                 btrfs_assert_no_spinning_writers(eb);
287                 atomic_dec(&eb->blocking_writers);
288                 /* Use the lighter barrier after atomic */
289                 smp_mb__after_atomic();
290                 cond_wake_up_nomb(&eb->write_lock_wq);
291         } else {
292                 btrfs_assert_spinning_writers_put(eb);
293                 write_unlock(&eb->lock);
294         }
295 }
296
297 void btrfs_assert_tree_locked(struct extent_buffer *eb)
298 {
299         BUG_ON(!atomic_read(&eb->write_locks));
300 }
301
302 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
303 {
304         BUG_ON(!atomic_read(&eb->read_locks));
305 }