1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
12 #include "extent_io.h"
15 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
17 #ifdef CONFIG_BTRFS_DEBUG
18 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
20 WARN_ON(atomic_read(&eb->spinning_writers));
21 atomic_inc(&eb->spinning_writers);
24 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
26 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
27 atomic_dec(&eb->spinning_writers);
30 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
32 WARN_ON(atomic_read(&eb->spinning_writers));
36 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
37 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
38 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
41 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
44 * No lock is required. The lock owner may change if we have a read
45 * lock, but it won't change to or away from us. If we have the write
46 * lock, we are the owner and it'll never change.
48 if (eb->lock_nested && current->pid == eb->lock_owner)
50 btrfs_assert_tree_read_locked(eb);
51 atomic_inc(&eb->blocking_readers);
52 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
53 atomic_dec(&eb->spinning_readers);
54 read_unlock(&eb->lock);
57 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
60 * No lock is required. The lock owner may change if we have a read
61 * lock, but it won't change to or away from us. If we have the write
62 * lock, we are the owner and it'll never change.
64 if (eb->lock_nested && current->pid == eb->lock_owner)
66 if (atomic_read(&eb->blocking_writers) == 0) {
67 btrfs_assert_spinning_writers_put(eb);
68 btrfs_assert_tree_locked(eb);
69 atomic_inc(&eb->blocking_writers);
70 write_unlock(&eb->lock);
74 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
77 * No lock is required. The lock owner may change if we have a read
78 * lock, but it won't change to or away from us. If we have the write
79 * lock, we are the owner and it'll never change.
81 if (eb->lock_nested && current->pid == eb->lock_owner)
83 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
85 atomic_inc(&eb->spinning_readers);
86 /* atomic_dec_and_test implies a barrier */
87 if (atomic_dec_and_test(&eb->blocking_readers))
88 cond_wake_up_nomb(&eb->read_lock_wq);
91 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
94 * no lock is required. The lock owner may change if
95 * we have a read lock, but it won't change to or away
96 * from us. If we have the write lock, we are the owner
97 * and it'll never change.
99 if (eb->lock_nested && current->pid == eb->lock_owner)
101 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
102 write_lock(&eb->lock);
103 btrfs_assert_spinning_writers_get(eb);
104 /* atomic_dec_and_test implies a barrier */
105 if (atomic_dec_and_test(&eb->blocking_writers))
106 cond_wake_up_nomb(&eb->write_lock_wq);
110 * take a spinning read lock. This will wait for any blocking
113 void btrfs_tree_read_lock(struct extent_buffer *eb)
116 BUG_ON(!atomic_read(&eb->blocking_writers) &&
117 current->pid == eb->lock_owner);
119 read_lock(&eb->lock);
120 if (atomic_read(&eb->blocking_writers) &&
121 current->pid == eb->lock_owner) {
123 * This extent is already write-locked by our thread. We allow
124 * an additional read lock to be added because it's for the same
125 * thread. btrfs_find_all_roots() depends on this as it may be
126 * called on a partly (write-)locked tree.
128 BUG_ON(eb->lock_nested);
130 read_unlock(&eb->lock);
133 if (atomic_read(&eb->blocking_writers)) {
134 read_unlock(&eb->lock);
135 wait_event(eb->write_lock_wq,
136 atomic_read(&eb->blocking_writers) == 0);
139 atomic_inc(&eb->read_locks);
140 atomic_inc(&eb->spinning_readers);
144 * take a spinning read lock.
145 * returns 1 if we get the read lock and 0 if we don't
146 * this won't wait for blocking writers
148 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
150 if (atomic_read(&eb->blocking_writers))
153 read_lock(&eb->lock);
154 if (atomic_read(&eb->blocking_writers)) {
155 read_unlock(&eb->lock);
158 atomic_inc(&eb->read_locks);
159 atomic_inc(&eb->spinning_readers);
164 * returns 1 if we get the read lock and 0 if we don't
165 * this won't wait for blocking writers
167 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
169 if (atomic_read(&eb->blocking_writers))
172 if (!read_trylock(&eb->lock))
175 if (atomic_read(&eb->blocking_writers)) {
176 read_unlock(&eb->lock);
179 atomic_inc(&eb->read_locks);
180 atomic_inc(&eb->spinning_readers);
185 * returns 1 if we get the read lock and 0 if we don't
186 * this won't wait for blocking writers or readers
188 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
190 if (atomic_read(&eb->blocking_writers) ||
191 atomic_read(&eb->blocking_readers))
194 write_lock(&eb->lock);
195 if (atomic_read(&eb->blocking_writers) ||
196 atomic_read(&eb->blocking_readers)) {
197 write_unlock(&eb->lock);
200 atomic_inc(&eb->write_locks);
201 btrfs_assert_spinning_writers_get(eb);
202 eb->lock_owner = current->pid;
207 * drop a spinning read lock
209 void btrfs_tree_read_unlock(struct extent_buffer *eb)
212 * if we're nested, we have the write lock. No new locking
213 * is needed as long as we are the lock owner.
214 * The write unlock will do a barrier for us, and the lock_nested
215 * field only matters to the lock owner.
217 if (eb->lock_nested && current->pid == eb->lock_owner) {
221 btrfs_assert_tree_read_locked(eb);
222 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
223 atomic_dec(&eb->spinning_readers);
224 atomic_dec(&eb->read_locks);
225 read_unlock(&eb->lock);
229 * drop a blocking read lock
231 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
234 * if we're nested, we have the write lock. No new locking
235 * is needed as long as we are the lock owner.
236 * The write unlock will do a barrier for us, and the lock_nested
237 * field only matters to the lock owner.
239 if (eb->lock_nested && current->pid == eb->lock_owner) {
243 btrfs_assert_tree_read_locked(eb);
244 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
245 /* atomic_dec_and_test implies a barrier */
246 if (atomic_dec_and_test(&eb->blocking_readers))
247 cond_wake_up_nomb(&eb->read_lock_wq);
248 atomic_dec(&eb->read_locks);
252 * take a spinning write lock. This will wait for both
253 * blocking readers or writers
255 void btrfs_tree_lock(struct extent_buffer *eb)
257 WARN_ON(eb->lock_owner == current->pid);
259 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
260 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
261 write_lock(&eb->lock);
262 if (atomic_read(&eb->blocking_readers) ||
263 atomic_read(&eb->blocking_writers)) {
264 write_unlock(&eb->lock);
267 btrfs_assert_spinning_writers_get(eb);
268 atomic_inc(&eb->write_locks);
269 eb->lock_owner = current->pid;
273 * drop a spinning or a blocking write lock.
275 void btrfs_tree_unlock(struct extent_buffer *eb)
277 int blockers = atomic_read(&eb->blocking_writers);
279 BUG_ON(blockers > 1);
281 btrfs_assert_tree_locked(eb);
283 atomic_dec(&eb->write_locks);
286 btrfs_assert_no_spinning_writers(eb);
287 atomic_dec(&eb->blocking_writers);
288 /* Use the lighter barrier after atomic */
289 smp_mb__after_atomic();
290 cond_wake_up_nomb(&eb->write_lock_wq);
292 btrfs_assert_spinning_writers_put(eb);
293 write_unlock(&eb->lock);
297 void btrfs_assert_tree_locked(struct extent_buffer *eb)
299 BUG_ON(!atomic_read(&eb->write_locks));
302 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
304 BUG_ON(!atomic_read(&eb->read_locks));