1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
13 #include "extent_io.h"
16 #ifdef CONFIG_BTRFS_DEBUG
17 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
19 WARN_ON(eb->spinning_writers);
20 eb->spinning_writers++;
23 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
25 WARN_ON(eb->spinning_writers != 1);
26 eb->spinning_writers--;
29 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
31 WARN_ON(eb->spinning_writers);
34 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
36 atomic_inc(&eb->spinning_readers);
39 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
41 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
42 atomic_dec(&eb->spinning_readers);
45 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
47 atomic_inc(&eb->read_locks);
50 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
52 atomic_dec(&eb->read_locks);
55 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
57 BUG_ON(!atomic_read(&eb->read_locks));
60 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
65 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
70 void btrfs_assert_tree_locked(struct extent_buffer *eb)
72 BUG_ON(!eb->write_locks);
76 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
77 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
78 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
79 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
80 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
81 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
82 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
83 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
84 void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
85 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
86 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
89 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
91 trace_btrfs_set_lock_blocking_read(eb);
93 * No lock is required. The lock owner may change if we have a read
94 * lock, but it won't change to or away from us. If we have the write
95 * lock, we are the owner and it'll never change.
97 if (eb->lock_nested && current->pid == eb->lock_owner)
99 btrfs_assert_tree_read_locked(eb);
100 atomic_inc(&eb->blocking_readers);
101 btrfs_assert_spinning_readers_put(eb);
102 read_unlock(&eb->lock);
105 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
107 trace_btrfs_set_lock_blocking_write(eb);
109 * No lock is required. The lock owner may change if we have a read
110 * lock, but it won't change to or away from us. If we have the write
111 * lock, we are the owner and it'll never change.
113 if (eb->lock_nested && current->pid == eb->lock_owner)
115 if (eb->blocking_writers == 0) {
116 btrfs_assert_spinning_writers_put(eb);
117 btrfs_assert_tree_locked(eb);
118 eb->blocking_writers++;
119 write_unlock(&eb->lock);
124 * take a spinning read lock. This will wait for any blocking
127 void btrfs_tree_read_lock(struct extent_buffer *eb)
131 if (trace_btrfs_tree_read_lock_enabled())
132 start_ns = ktime_get_ns();
134 read_lock(&eb->lock);
135 BUG_ON(eb->blocking_writers == 0 &&
136 current->pid == eb->lock_owner);
137 if (eb->blocking_writers && current->pid == eb->lock_owner) {
139 * This extent is already write-locked by our thread. We allow
140 * an additional read lock to be added because it's for the same
141 * thread. btrfs_find_all_roots() depends on this as it may be
142 * called on a partly (write-)locked tree.
144 BUG_ON(eb->lock_nested);
145 eb->lock_nested = true;
146 read_unlock(&eb->lock);
147 trace_btrfs_tree_read_lock(eb, start_ns);
150 if (eb->blocking_writers) {
151 read_unlock(&eb->lock);
152 wait_event(eb->write_lock_wq,
153 eb->blocking_writers == 0);
156 btrfs_assert_tree_read_locks_get(eb);
157 btrfs_assert_spinning_readers_get(eb);
158 trace_btrfs_tree_read_lock(eb, start_ns);
162 * take a spinning read lock.
163 * returns 1 if we get the read lock and 0 if we don't
164 * this won't wait for blocking writers
166 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
168 if (eb->blocking_writers)
171 read_lock(&eb->lock);
172 if (eb->blocking_writers) {
173 read_unlock(&eb->lock);
176 btrfs_assert_tree_read_locks_get(eb);
177 btrfs_assert_spinning_readers_get(eb);
178 trace_btrfs_tree_read_lock_atomic(eb);
183 * returns 1 if we get the read lock and 0 if we don't
184 * this won't wait for blocking writers
186 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
188 if (eb->blocking_writers)
191 if (!read_trylock(&eb->lock))
194 if (eb->blocking_writers) {
195 read_unlock(&eb->lock);
198 btrfs_assert_tree_read_locks_get(eb);
199 btrfs_assert_spinning_readers_get(eb);
200 trace_btrfs_try_tree_read_lock(eb);
205 * returns 1 if we get the read lock and 0 if we don't
206 * this won't wait for blocking writers or readers
208 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
210 if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
213 write_lock(&eb->lock);
214 if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
215 write_unlock(&eb->lock);
218 btrfs_assert_tree_write_locks_get(eb);
219 btrfs_assert_spinning_writers_get(eb);
220 eb->lock_owner = current->pid;
221 trace_btrfs_try_tree_write_lock(eb);
226 * drop a spinning read lock
228 void btrfs_tree_read_unlock(struct extent_buffer *eb)
230 trace_btrfs_tree_read_unlock(eb);
232 * if we're nested, we have the write lock. No new locking
233 * is needed as long as we are the lock owner.
234 * The write unlock will do a barrier for us, and the lock_nested
235 * field only matters to the lock owner.
237 if (eb->lock_nested && current->pid == eb->lock_owner) {
238 eb->lock_nested = false;
241 btrfs_assert_tree_read_locked(eb);
242 btrfs_assert_spinning_readers_put(eb);
243 btrfs_assert_tree_read_locks_put(eb);
244 read_unlock(&eb->lock);
248 * drop a blocking read lock
250 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
252 trace_btrfs_tree_read_unlock_blocking(eb);
254 * if we're nested, we have the write lock. No new locking
255 * is needed as long as we are the lock owner.
256 * The write unlock will do a barrier for us, and the lock_nested
257 * field only matters to the lock owner.
259 if (eb->lock_nested && current->pid == eb->lock_owner) {
260 eb->lock_nested = false;
263 btrfs_assert_tree_read_locked(eb);
264 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
265 /* atomic_dec_and_test implies a barrier */
266 if (atomic_dec_and_test(&eb->blocking_readers))
267 cond_wake_up_nomb(&eb->read_lock_wq);
268 btrfs_assert_tree_read_locks_put(eb);
272 * take a spinning write lock. This will wait for both
273 * blocking readers or writers
275 void btrfs_tree_lock(struct extent_buffer *eb)
279 if (trace_btrfs_tree_lock_enabled())
280 start_ns = ktime_get_ns();
282 WARN_ON(eb->lock_owner == current->pid);
284 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
285 wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
286 write_lock(&eb->lock);
287 if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
288 write_unlock(&eb->lock);
291 btrfs_assert_spinning_writers_get(eb);
292 btrfs_assert_tree_write_locks_get(eb);
293 eb->lock_owner = current->pid;
294 trace_btrfs_tree_lock(eb, start_ns);
298 * drop a spinning or a blocking write lock.
300 void btrfs_tree_unlock(struct extent_buffer *eb)
302 int blockers = eb->blocking_writers;
304 BUG_ON(blockers > 1);
306 btrfs_assert_tree_locked(eb);
307 trace_btrfs_tree_unlock(eb);
309 btrfs_assert_tree_write_locks_put(eb);
312 btrfs_assert_no_spinning_writers(eb);
313 eb->blocking_writers--;
315 * We need to order modifying blocking_writers above with
316 * actually waking up the sleepers to ensure they see the
317 * updated value of blocking_writers
319 cond_wake_up(&eb->write_lock_wq);
321 btrfs_assert_spinning_writers_put(eb);
322 write_unlock(&eb->lock);