]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/btrfs/locking.c
Merge tag 'for-linus-5.2b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / fs / btrfs / locking.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "ctree.h"
12 #include "extent_io.h"
13 #include "locking.h"
14
15 #ifdef CONFIG_BTRFS_DEBUG
16 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
17 {
18         WARN_ON(atomic_read(&eb->spinning_writers));
19         atomic_inc(&eb->spinning_writers);
20 }
21
22 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
23 {
24         WARN_ON(atomic_read(&eb->spinning_writers) != 1);
25         atomic_dec(&eb->spinning_writers);
26 }
27
28 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
29 {
30         WARN_ON(atomic_read(&eb->spinning_writers));
31 }
32
33 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
34 {
35         atomic_inc(&eb->spinning_readers);
36 }
37
38 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
39 {
40         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
41         atomic_dec(&eb->spinning_readers);
42 }
43
44 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
45 {
46         atomic_inc(&eb->read_locks);
47 }
48
49 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
50 {
51         atomic_dec(&eb->read_locks);
52 }
53
54 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
55 {
56         BUG_ON(!atomic_read(&eb->read_locks));
57 }
58
59 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
60 {
61         atomic_inc(&eb->write_locks);
62 }
63
64 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
65 {
66         atomic_dec(&eb->write_locks);
67 }
68
69 void btrfs_assert_tree_locked(struct extent_buffer *eb)
70 {
71         BUG_ON(!atomic_read(&eb->write_locks));
72 }
73
74 #else
75 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
76 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
77 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
78 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
79 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
80 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
81 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
82 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
83 void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
84 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
85 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
86 #endif
87
88 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
89 {
90         trace_btrfs_set_lock_blocking_read(eb);
91         /*
92          * No lock is required.  The lock owner may change if we have a read
93          * lock, but it won't change to or away from us.  If we have the write
94          * lock, we are the owner and it'll never change.
95          */
96         if (eb->lock_nested && current->pid == eb->lock_owner)
97                 return;
98         btrfs_assert_tree_read_locked(eb);
99         atomic_inc(&eb->blocking_readers);
100         btrfs_assert_spinning_readers_put(eb);
101         read_unlock(&eb->lock);
102 }
103
104 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
105 {
106         trace_btrfs_set_lock_blocking_write(eb);
107         /*
108          * No lock is required.  The lock owner may change if we have a read
109          * lock, but it won't change to or away from us.  If we have the write
110          * lock, we are the owner and it'll never change.
111          */
112         if (eb->lock_nested && current->pid == eb->lock_owner)
113                 return;
114         if (atomic_read(&eb->blocking_writers) == 0) {
115                 btrfs_assert_spinning_writers_put(eb);
116                 btrfs_assert_tree_locked(eb);
117                 atomic_inc(&eb->blocking_writers);
118                 write_unlock(&eb->lock);
119         }
120 }
121
122 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
123 {
124         trace_btrfs_clear_lock_blocking_read(eb);
125         /*
126          * No lock is required.  The lock owner may change if we have a read
127          * lock, but it won't change to or away from us.  If we have the write
128          * lock, we are the owner and it'll never change.
129          */
130         if (eb->lock_nested && current->pid == eb->lock_owner)
131                 return;
132         BUG_ON(atomic_read(&eb->blocking_readers) == 0);
133         read_lock(&eb->lock);
134         btrfs_assert_spinning_readers_get(eb);
135         /* atomic_dec_and_test implies a barrier */
136         if (atomic_dec_and_test(&eb->blocking_readers))
137                 cond_wake_up_nomb(&eb->read_lock_wq);
138 }
139
140 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
141 {
142         trace_btrfs_clear_lock_blocking_write(eb);
143         /*
144          * no lock is required.  The lock owner may change if
145          * we have a read lock, but it won't change to or away
146          * from us.  If we have the write lock, we are the owner
147          * and it'll never change.
148          */
149         if (eb->lock_nested && current->pid == eb->lock_owner)
150                 return;
151         BUG_ON(atomic_read(&eb->blocking_writers) != 1);
152         write_lock(&eb->lock);
153         btrfs_assert_spinning_writers_get(eb);
154         /* atomic_dec_and_test implies a barrier */
155         if (atomic_dec_and_test(&eb->blocking_writers))
156                 cond_wake_up_nomb(&eb->write_lock_wq);
157 }
158
159 /*
160  * take a spinning read lock.  This will wait for any blocking
161  * writers
162  */
163 void btrfs_tree_read_lock(struct extent_buffer *eb)
164 {
165         u64 start_ns = 0;
166
167         if (trace_btrfs_tree_read_lock_enabled())
168                 start_ns = ktime_get_ns();
169 again:
170         BUG_ON(!atomic_read(&eb->blocking_writers) &&
171                current->pid == eb->lock_owner);
172
173         read_lock(&eb->lock);
174         if (atomic_read(&eb->blocking_writers) &&
175             current->pid == eb->lock_owner) {
176                 /*
177                  * This extent is already write-locked by our thread. We allow
178                  * an additional read lock to be added because it's for the same
179                  * thread. btrfs_find_all_roots() depends on this as it may be
180                  * called on a partly (write-)locked tree.
181                  */
182                 BUG_ON(eb->lock_nested);
183                 eb->lock_nested = true;
184                 read_unlock(&eb->lock);
185                 trace_btrfs_tree_read_lock(eb, start_ns);
186                 return;
187         }
188         if (atomic_read(&eb->blocking_writers)) {
189                 read_unlock(&eb->lock);
190                 wait_event(eb->write_lock_wq,
191                            atomic_read(&eb->blocking_writers) == 0);
192                 goto again;
193         }
194         btrfs_assert_tree_read_locks_get(eb);
195         btrfs_assert_spinning_readers_get(eb);
196         trace_btrfs_tree_read_lock(eb, start_ns);
197 }
198
199 /*
200  * take a spinning read lock.
201  * returns 1 if we get the read lock and 0 if we don't
202  * this won't wait for blocking writers
203  */
204 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
205 {
206         if (atomic_read(&eb->blocking_writers))
207                 return 0;
208
209         read_lock(&eb->lock);
210         if (atomic_read(&eb->blocking_writers)) {
211                 read_unlock(&eb->lock);
212                 return 0;
213         }
214         btrfs_assert_tree_read_locks_get(eb);
215         btrfs_assert_spinning_readers_get(eb);
216         trace_btrfs_tree_read_lock_atomic(eb);
217         return 1;
218 }
219
220 /*
221  * returns 1 if we get the read lock and 0 if we don't
222  * this won't wait for blocking writers
223  */
224 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
225 {
226         if (atomic_read(&eb->blocking_writers))
227                 return 0;
228
229         if (!read_trylock(&eb->lock))
230                 return 0;
231
232         if (atomic_read(&eb->blocking_writers)) {
233                 read_unlock(&eb->lock);
234                 return 0;
235         }
236         btrfs_assert_tree_read_locks_get(eb);
237         btrfs_assert_spinning_readers_get(eb);
238         trace_btrfs_try_tree_read_lock(eb);
239         return 1;
240 }
241
242 /*
243  * returns 1 if we get the read lock and 0 if we don't
244  * this won't wait for blocking writers or readers
245  */
246 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
247 {
248         if (atomic_read(&eb->blocking_writers) ||
249             atomic_read(&eb->blocking_readers))
250                 return 0;
251
252         write_lock(&eb->lock);
253         if (atomic_read(&eb->blocking_writers) ||
254             atomic_read(&eb->blocking_readers)) {
255                 write_unlock(&eb->lock);
256                 return 0;
257         }
258         btrfs_assert_tree_write_locks_get(eb);
259         btrfs_assert_spinning_writers_get(eb);
260         eb->lock_owner = current->pid;
261         trace_btrfs_try_tree_write_lock(eb);
262         return 1;
263 }
264
265 /*
266  * drop a spinning read lock
267  */
268 void btrfs_tree_read_unlock(struct extent_buffer *eb)
269 {
270         trace_btrfs_tree_read_unlock(eb);
271         /*
272          * if we're nested, we have the write lock.  No new locking
273          * is needed as long as we are the lock owner.
274          * The write unlock will do a barrier for us, and the lock_nested
275          * field only matters to the lock owner.
276          */
277         if (eb->lock_nested && current->pid == eb->lock_owner) {
278                 eb->lock_nested = false;
279                 return;
280         }
281         btrfs_assert_tree_read_locked(eb);
282         btrfs_assert_spinning_readers_put(eb);
283         btrfs_assert_tree_read_locks_put(eb);
284         read_unlock(&eb->lock);
285 }
286
287 /*
288  * drop a blocking read lock
289  */
290 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
291 {
292         trace_btrfs_tree_read_unlock_blocking(eb);
293         /*
294          * if we're nested, we have the write lock.  No new locking
295          * is needed as long as we are the lock owner.
296          * The write unlock will do a barrier for us, and the lock_nested
297          * field only matters to the lock owner.
298          */
299         if (eb->lock_nested && current->pid == eb->lock_owner) {
300                 eb->lock_nested = false;
301                 return;
302         }
303         btrfs_assert_tree_read_locked(eb);
304         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
305         /* atomic_dec_and_test implies a barrier */
306         if (atomic_dec_and_test(&eb->blocking_readers))
307                 cond_wake_up_nomb(&eb->read_lock_wq);
308         btrfs_assert_tree_read_locks_put(eb);
309 }
310
311 /*
312  * take a spinning write lock.  This will wait for both
313  * blocking readers or writers
314  */
315 void btrfs_tree_lock(struct extent_buffer *eb)
316 {
317         u64 start_ns = 0;
318
319         if (trace_btrfs_tree_lock_enabled())
320                 start_ns = ktime_get_ns();
321
322         WARN_ON(eb->lock_owner == current->pid);
323 again:
324         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
325         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
326         write_lock(&eb->lock);
327         if (atomic_read(&eb->blocking_readers) ||
328             atomic_read(&eb->blocking_writers)) {
329                 write_unlock(&eb->lock);
330                 goto again;
331         }
332         btrfs_assert_spinning_writers_get(eb);
333         btrfs_assert_tree_write_locks_get(eb);
334         eb->lock_owner = current->pid;
335         trace_btrfs_tree_lock(eb, start_ns);
336 }
337
338 /*
339  * drop a spinning or a blocking write lock.
340  */
341 void btrfs_tree_unlock(struct extent_buffer *eb)
342 {
343         int blockers = atomic_read(&eb->blocking_writers);
344
345         BUG_ON(blockers > 1);
346
347         btrfs_assert_tree_locked(eb);
348         trace_btrfs_tree_unlock(eb);
349         eb->lock_owner = 0;
350         btrfs_assert_tree_write_locks_put(eb);
351
352         if (blockers) {
353                 btrfs_assert_no_spinning_writers(eb);
354                 atomic_dec(&eb->blocking_writers);
355                 /* Use the lighter barrier after atomic */
356                 smp_mb__after_atomic();
357                 cond_wake_up_nomb(&eb->write_lock_wq);
358         } else {
359                 btrfs_assert_spinning_writers_put(eb);
360                 write_unlock(&eb->lock);
361         }
362 }