1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
12 #include "extent_io.h"
15 #ifdef CONFIG_BTRFS_DEBUG
16 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
18 WARN_ON(atomic_read(&eb->spinning_writers));
19 atomic_inc(&eb->spinning_writers);
22 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
24 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
25 atomic_dec(&eb->spinning_writers);
28 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
30 WARN_ON(atomic_read(&eb->spinning_writers));
33 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
35 atomic_inc(&eb->spinning_readers);
38 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
40 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
41 atomic_dec(&eb->spinning_readers);
44 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
46 atomic_inc(&eb->read_locks);
49 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
51 atomic_dec(&eb->read_locks);
54 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
56 BUG_ON(!atomic_read(&eb->read_locks));
59 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
61 atomic_inc(&eb->write_locks);
64 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
66 atomic_dec(&eb->write_locks);
69 void btrfs_assert_tree_locked(struct extent_buffer *eb)
71 BUG_ON(!atomic_read(&eb->write_locks));
75 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
76 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
77 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
78 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
79 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
80 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
81 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
82 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
83 void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
84 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
85 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
88 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
90 trace_btrfs_set_lock_blocking_read(eb);
92 * No lock is required. The lock owner may change if we have a read
93 * lock, but it won't change to or away from us. If we have the write
94 * lock, we are the owner and it'll never change.
96 if (eb->lock_nested && current->pid == eb->lock_owner)
98 btrfs_assert_tree_read_locked(eb);
99 atomic_inc(&eb->blocking_readers);
100 btrfs_assert_spinning_readers_put(eb);
101 read_unlock(&eb->lock);
104 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
106 trace_btrfs_set_lock_blocking_write(eb);
108 * No lock is required. The lock owner may change if we have a read
109 * lock, but it won't change to or away from us. If we have the write
110 * lock, we are the owner and it'll never change.
112 if (eb->lock_nested && current->pid == eb->lock_owner)
114 if (atomic_read(&eb->blocking_writers) == 0) {
115 btrfs_assert_spinning_writers_put(eb);
116 btrfs_assert_tree_locked(eb);
117 atomic_inc(&eb->blocking_writers);
118 write_unlock(&eb->lock);
122 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
124 trace_btrfs_clear_lock_blocking_read(eb);
126 * No lock is required. The lock owner may change if we have a read
127 * lock, but it won't change to or away from us. If we have the write
128 * lock, we are the owner and it'll never change.
130 if (eb->lock_nested && current->pid == eb->lock_owner)
132 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
133 read_lock(&eb->lock);
134 btrfs_assert_spinning_readers_get(eb);
135 /* atomic_dec_and_test implies a barrier */
136 if (atomic_dec_and_test(&eb->blocking_readers))
137 cond_wake_up_nomb(&eb->read_lock_wq);
140 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
142 trace_btrfs_clear_lock_blocking_write(eb);
144 * no lock is required. The lock owner may change if
145 * we have a read lock, but it won't change to or away
146 * from us. If we have the write lock, we are the owner
147 * and it'll never change.
149 if (eb->lock_nested && current->pid == eb->lock_owner)
151 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
152 write_lock(&eb->lock);
153 btrfs_assert_spinning_writers_get(eb);
154 /* atomic_dec_and_test implies a barrier */
155 if (atomic_dec_and_test(&eb->blocking_writers))
156 cond_wake_up_nomb(&eb->write_lock_wq);
160 * take a spinning read lock. This will wait for any blocking
163 void btrfs_tree_read_lock(struct extent_buffer *eb)
167 if (trace_btrfs_tree_read_lock_enabled())
168 start_ns = ktime_get_ns();
170 BUG_ON(!atomic_read(&eb->blocking_writers) &&
171 current->pid == eb->lock_owner);
173 read_lock(&eb->lock);
174 if (atomic_read(&eb->blocking_writers) &&
175 current->pid == eb->lock_owner) {
177 * This extent is already write-locked by our thread. We allow
178 * an additional read lock to be added because it's for the same
179 * thread. btrfs_find_all_roots() depends on this as it may be
180 * called on a partly (write-)locked tree.
182 BUG_ON(eb->lock_nested);
183 eb->lock_nested = true;
184 read_unlock(&eb->lock);
185 trace_btrfs_tree_read_lock(eb, start_ns);
188 if (atomic_read(&eb->blocking_writers)) {
189 read_unlock(&eb->lock);
190 wait_event(eb->write_lock_wq,
191 atomic_read(&eb->blocking_writers) == 0);
194 btrfs_assert_tree_read_locks_get(eb);
195 btrfs_assert_spinning_readers_get(eb);
196 trace_btrfs_tree_read_lock(eb, start_ns);
200 * take a spinning read lock.
201 * returns 1 if we get the read lock and 0 if we don't
202 * this won't wait for blocking writers
204 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
206 if (atomic_read(&eb->blocking_writers))
209 read_lock(&eb->lock);
210 if (atomic_read(&eb->blocking_writers)) {
211 read_unlock(&eb->lock);
214 btrfs_assert_tree_read_locks_get(eb);
215 btrfs_assert_spinning_readers_get(eb);
216 trace_btrfs_tree_read_lock_atomic(eb);
221 * returns 1 if we get the read lock and 0 if we don't
222 * this won't wait for blocking writers
224 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
226 if (atomic_read(&eb->blocking_writers))
229 if (!read_trylock(&eb->lock))
232 if (atomic_read(&eb->blocking_writers)) {
233 read_unlock(&eb->lock);
236 btrfs_assert_tree_read_locks_get(eb);
237 btrfs_assert_spinning_readers_get(eb);
238 trace_btrfs_try_tree_read_lock(eb);
243 * returns 1 if we get the read lock and 0 if we don't
244 * this won't wait for blocking writers or readers
246 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
248 if (atomic_read(&eb->blocking_writers) ||
249 atomic_read(&eb->blocking_readers))
252 write_lock(&eb->lock);
253 if (atomic_read(&eb->blocking_writers) ||
254 atomic_read(&eb->blocking_readers)) {
255 write_unlock(&eb->lock);
258 btrfs_assert_tree_write_locks_get(eb);
259 btrfs_assert_spinning_writers_get(eb);
260 eb->lock_owner = current->pid;
261 trace_btrfs_try_tree_write_lock(eb);
266 * drop a spinning read lock
268 void btrfs_tree_read_unlock(struct extent_buffer *eb)
270 trace_btrfs_tree_read_unlock(eb);
272 * if we're nested, we have the write lock. No new locking
273 * is needed as long as we are the lock owner.
274 * The write unlock will do a barrier for us, and the lock_nested
275 * field only matters to the lock owner.
277 if (eb->lock_nested && current->pid == eb->lock_owner) {
278 eb->lock_nested = false;
281 btrfs_assert_tree_read_locked(eb);
282 btrfs_assert_spinning_readers_put(eb);
283 btrfs_assert_tree_read_locks_put(eb);
284 read_unlock(&eb->lock);
288 * drop a blocking read lock
290 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
292 trace_btrfs_tree_read_unlock_blocking(eb);
294 * if we're nested, we have the write lock. No new locking
295 * is needed as long as we are the lock owner.
296 * The write unlock will do a barrier for us, and the lock_nested
297 * field only matters to the lock owner.
299 if (eb->lock_nested && current->pid == eb->lock_owner) {
300 eb->lock_nested = false;
303 btrfs_assert_tree_read_locked(eb);
304 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
305 /* atomic_dec_and_test implies a barrier */
306 if (atomic_dec_and_test(&eb->blocking_readers))
307 cond_wake_up_nomb(&eb->read_lock_wq);
308 btrfs_assert_tree_read_locks_put(eb);
312 * take a spinning write lock. This will wait for both
313 * blocking readers or writers
315 void btrfs_tree_lock(struct extent_buffer *eb)
319 if (trace_btrfs_tree_lock_enabled())
320 start_ns = ktime_get_ns();
322 WARN_ON(eb->lock_owner == current->pid);
324 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
325 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
326 write_lock(&eb->lock);
327 if (atomic_read(&eb->blocking_readers) ||
328 atomic_read(&eb->blocking_writers)) {
329 write_unlock(&eb->lock);
332 btrfs_assert_spinning_writers_get(eb);
333 btrfs_assert_tree_write_locks_get(eb);
334 eb->lock_owner = current->pid;
335 trace_btrfs_tree_lock(eb, start_ns);
339 * drop a spinning or a blocking write lock.
341 void btrfs_tree_unlock(struct extent_buffer *eb)
343 int blockers = atomic_read(&eb->blocking_writers);
345 BUG_ON(blockers > 1);
347 btrfs_assert_tree_locked(eb);
348 trace_btrfs_tree_unlock(eb);
350 btrfs_assert_tree_write_locks_put(eb);
353 btrfs_assert_no_spinning_writers(eb);
354 atomic_dec(&eb->blocking_writers);
355 /* Use the lighter barrier after atomic */
356 smp_mb__after_atomic();
357 cond_wake_up_nomb(&eb->write_lock_wq);
359 btrfs_assert_spinning_writers_put(eb);
360 write_unlock(&eb->lock);