1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 * Rwsem count bit fields re-definition and rwsem rearchitecture by
14 * Waiman Long <longman@redhat.com> and
15 * Peter Zijlstra <peterz@infradead.org>.
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
32 #include "lock_events.h"
35 * The least significant 3 bits of the owner value has the following
37 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
38 * - Bit 1: RWSEM_RD_NONSPINNABLE - Readers cannot spin on this lock.
39 * - Bit 2: RWSEM_WR_NONSPINNABLE - Writers cannot spin on this lock.
41 * When the rwsem is either owned by an anonymous writer, or it is
42 * reader-owned, but a spinning writer has timed out, both nonspinnable
43 * bits will be set to disable optimistic spinning by readers and writers.
44 * In the later case, the last unlocking reader should then check the
45 * writer nonspinnable bit and clear it only to give writers preference
46 * to acquire the lock via optimistic spinning, but not readers. Similar
47 * action is also done in the reader slowpath.
49 * When a writer acquires a rwsem, it puts its task_struct pointer
50 * into the owner field. It is cleared after an unlock.
52 * When a reader acquires a rwsem, it will also puts its task_struct
53 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
54 * On unlock, the owner field will largely be left untouched. So
55 * for a free or reader-owned rwsem, the owner value may contain
56 * information about the last reader that acquires the rwsem.
58 * That information may be helpful in debugging cases where the system
59 * seems to hang on a reader owned rwsem especially if only one reader
60 * is involved. Ideally we would like to track all the readers that own
61 * a rwsem, but the overhead is simply too big.
63 #define RWSEM_READER_OWNED (1UL << 0)
64 #define RWSEM_RD_NONSPINNABLE (1UL << 1)
65 #define RWSEM_WR_NONSPINNABLE (1UL << 2)
66 #define RWSEM_NONSPINNABLE (RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE)
67 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
69 #ifdef CONFIG_DEBUG_RWSEMS
70 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
71 if (!debug_locks_silent && \
72 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
73 #c, atomic_long_read(&(sem)->count), \
74 atomic_long_read(&(sem)->owner), (long)current, \
75 list_empty(&(sem)->wait_list) ? "" : "not ")) \
79 # define DEBUG_RWSEMS_WARN_ON(c, sem)
83 * The definition of the atomic counter in the semaphore:
85 * Bit 0 - writer locked bit
86 * Bit 1 - waiters present bit
87 * Bit 2 - lock handoff bit
89 * Bits 8-X - 24-bit (32-bit) or 56-bit reader count
91 * atomic_long_fetch_add() is used to obtain reader lock, whereas
92 * atomic_long_cmpxchg() will be used to obtain writer lock.
94 * There are three places where the lock handoff bit may be set or cleared.
95 * 1) rwsem_mark_wake() for readers.
96 * 2) rwsem_try_write_lock() for writers.
97 * 3) Error path of rwsem_down_write_slowpath().
99 * For all the above cases, wait_lock will be held. A writer must also
100 * be the first one in the wait_list to be eligible for setting the handoff
101 * bit. So concurrent setting/clearing of handoff bit is not possible.
103 #define RWSEM_WRITER_LOCKED (1UL << 0)
104 #define RWSEM_FLAG_WAITERS (1UL << 1)
105 #define RWSEM_FLAG_HANDOFF (1UL << 2)
107 #define RWSEM_READER_SHIFT 8
108 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
109 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
110 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
111 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
112 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
116 * All writes to owner are protected by WRITE_ONCE() to make sure that
117 * store tearing can't happen as optimistic spinners may read and use
118 * the owner value concurrently without lock. Read from owner, however,
119 * may not need READ_ONCE() as long as the pointer value is only used
120 * for comparison and isn't being dereferenced.
122 static inline void rwsem_set_owner(struct rw_semaphore *sem)
124 atomic_long_set(&sem->owner, (long)current);
127 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
129 atomic_long_set(&sem->owner, 0);
133 * Test the flags in the owner field.
135 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
137 return atomic_long_read(&sem->owner) & flags;
141 * The task_struct pointer of the last owning reader will be left in
144 * Note that the owner value just indicates the task has owned the rwsem
145 * previously, it may not be the real owner or one of the real owners
146 * anymore when that field is examined, so take it with a grain of salt.
148 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
149 struct task_struct *owner)
151 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED;
153 atomic_long_set(&sem->owner, val);
156 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
158 __rwsem_set_reader_owned(sem, current);
162 * Return true if the rwsem is owned by a reader.
164 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
166 #ifdef CONFIG_DEBUG_RWSEMS
168 * Check the count to see if it is write-locked.
170 long count = atomic_long_read(&sem->count);
172 if (count & RWSEM_WRITER_MASK)
175 return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
178 #ifdef CONFIG_DEBUG_RWSEMS
180 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
181 * is a task pointer in owner of a reader-owned rwsem, it will be the
182 * real owner or one of the real owners. The only exception is when the
183 * unlock is done by up_read_non_owner().
185 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
187 unsigned long val = atomic_long_read(&sem->owner);
189 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
190 if (atomic_long_try_cmpxchg(&sem->owner, &val,
191 val & RWSEM_OWNER_FLAGS_MASK))
196 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
202 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
203 * remains set. Otherwise, the operation will be aborted.
205 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
207 unsigned long owner = atomic_long_read(&sem->owner);
210 if (!(owner & RWSEM_READER_OWNED))
212 if (owner & RWSEM_NONSPINNABLE)
214 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
215 owner | RWSEM_NONSPINNABLE));
219 * Return just the real task structure pointer of the owner
221 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
223 return (struct task_struct *)
224 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
228 * Return the real task structure pointer of the owner and the embedded
229 * flags in the owner. pflags must be non-NULL.
231 static inline struct task_struct *
232 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
234 unsigned long owner = atomic_long_read(&sem->owner);
236 *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
237 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
241 * Guide to the rw_semaphore's count field.
243 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
246 * The lock is owned by readers when
247 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
248 * (2) some of the reader bits are set in count, and
249 * (3) the owner field has RWSEM_READ_OWNED bit set.
251 * Having some reader bits set is not enough to guarantee a readers owned
252 * lock as the readers may be in the process of backing out from the count
253 * and a writer has just released the lock. So another writer may steal
254 * the lock immediately after that.
258 * Initialize an rwsem:
260 void __init_rwsem(struct rw_semaphore *sem, const char *name,
261 struct lock_class_key *key)
263 #ifdef CONFIG_DEBUG_LOCK_ALLOC
265 * Make sure we are not reinitializing a held semaphore:
267 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
268 lockdep_init_map(&sem->dep_map, name, key, 0);
270 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
271 raw_spin_lock_init(&sem->wait_lock);
272 INIT_LIST_HEAD(&sem->wait_list);
273 atomic_long_set(&sem->owner, 0L);
274 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
275 osq_lock_init(&sem->osq);
278 EXPORT_SYMBOL(__init_rwsem);
280 enum rwsem_waiter_type {
281 RWSEM_WAITING_FOR_WRITE,
282 RWSEM_WAITING_FOR_READ
285 struct rwsem_waiter {
286 struct list_head list;
287 struct task_struct *task;
288 enum rwsem_waiter_type type;
289 unsigned long timeout;
291 #define rwsem_first_waiter(sem) \
292 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
294 enum rwsem_wake_type {
295 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
296 RWSEM_WAKE_READERS, /* Wake readers only */
297 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
300 enum writer_wait_state {
301 WRITER_NOT_FIRST, /* Writer is not first in wait list */
302 WRITER_FIRST, /* Writer is first in wait list */
303 WRITER_HANDOFF /* Writer is first & handoff needed */
307 * The typical HZ value is either 250 or 1000. So set the minimum waiting
308 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
309 * queue before initiating the handoff protocol.
311 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
314 * Magic number to batch-wakeup waiting readers, even when writers are
315 * also present in the queue. This both limits the amount of work the
316 * waking thread must do and also prevents any potential counter overflow,
319 #define MAX_READERS_WAKEUP 0x100
322 * handle the lock release when processes blocked on it that can now run
323 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
325 * - there must be someone on the queue
326 * - the wait_lock must be held by the caller
327 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
328 * to actually wakeup the blocked task(s) and drop the reference count,
329 * preferably when the wait_lock is released
330 * - woken process blocks are discarded from the list after having task zeroed
331 * - writers are only marked woken if downgrading is false
333 static void rwsem_mark_wake(struct rw_semaphore *sem,
334 enum rwsem_wake_type wake_type,
335 struct wake_q_head *wake_q)
337 struct rwsem_waiter *waiter, *tmp;
338 long oldcount, woken = 0, adjustment = 0;
339 struct list_head wlist;
341 lockdep_assert_held(&sem->wait_lock);
344 * Take a peek at the queue head waiter such that we can determine
345 * the wakeup(s) to perform.
347 waiter = rwsem_first_waiter(sem);
349 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
350 if (wake_type == RWSEM_WAKE_ANY) {
352 * Mark writer at the front of the queue for wakeup.
353 * Until the task is actually later awoken later by
354 * the caller, other writers are able to steal it.
355 * Readers, on the other hand, will block as they
356 * will notice the queued writer.
358 wake_q_add(wake_q, waiter->task);
359 lockevent_inc(rwsem_wake_writer);
366 * Writers might steal the lock before we grant it to the next reader.
367 * We prefer to do the first reader grant before counting readers
368 * so we can bail out early if a writer stole the lock.
370 if (wake_type != RWSEM_WAKE_READ_OWNED) {
371 adjustment = RWSEM_READER_BIAS;
372 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
373 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
375 * When we've been waiting "too" long (for writers
376 * to give up the lock), request a HANDOFF to
379 if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
380 time_after(jiffies, waiter->timeout)) {
381 adjustment -= RWSEM_FLAG_HANDOFF;
382 lockevent_inc(rwsem_rlock_handoff);
385 atomic_long_add(-adjustment, &sem->count);
389 * Set it to reader-owned to give spinners an early
390 * indication that readers now have the lock.
392 __rwsem_set_reader_owned(sem, waiter->task);
396 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
397 * queue. We know that the woken will be at least 1 as we accounted
398 * for above. Note we increment the 'active part' of the count by the
399 * number of readers before waking any processes up.
401 * This is an adaptation of the phase-fair R/W locks where at the
402 * reader phase (first waiter is a reader), all readers are eligible
403 * to acquire the lock at the same time irrespective of their order
404 * in the queue. The writers acquire the lock according to their
405 * order in the queue.
407 * We have to do wakeup in 2 passes to prevent the possibility that
408 * the reader count may be decremented before it is incremented. It
409 * is because the to-be-woken waiter may not have slept yet. So it
410 * may see waiter->task got cleared, finish its critical section and
411 * do an unlock before the reader count increment.
413 * 1) Collect the read-waiters in a separate list, count them and
414 * fully increment the reader count in rwsem.
415 * 2) For each waiters in the new list, clear waiter->task and
416 * put them into wake_q to be woken up later.
418 INIT_LIST_HEAD(&wlist);
419 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
420 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
424 list_move_tail(&waiter->list, &wlist);
427 * Limit # of readers that can be woken up per wakeup call.
429 if (woken >= MAX_READERS_WAKEUP)
433 adjustment = woken * RWSEM_READER_BIAS - adjustment;
434 lockevent_cond_inc(rwsem_wake_reader, woken);
435 if (list_empty(&sem->wait_list)) {
436 /* hit end of list above */
437 adjustment -= RWSEM_FLAG_WAITERS;
441 * When we've woken a reader, we no longer need to force writers
442 * to give up the lock and we can clear HANDOFF.
444 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
445 adjustment -= RWSEM_FLAG_HANDOFF;
448 atomic_long_add(adjustment, &sem->count);
451 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
452 struct task_struct *tsk;
455 get_task_struct(tsk);
458 * Ensure calling get_task_struct() before setting the reader
459 * waiter to nil such that rwsem_down_read_slowpath() cannot
460 * race with do_exit() by always holding a reference count
461 * to the task to wakeup.
463 smp_store_release(&waiter->task, NULL);
465 * Ensure issuing the wakeup (either by us or someone else)
466 * after setting the reader waiter to nil.
468 wake_q_add_safe(wake_q, tsk);
473 * This function must be called with the sem->wait_lock held to prevent
474 * race conditions between checking the rwsem wait list and setting the
475 * sem->count accordingly.
477 * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
478 * bit is set or the lock is acquired with handoff bit cleared.
480 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
481 enum writer_wait_state wstate)
485 lockdep_assert_held(&sem->wait_lock);
487 count = atomic_long_read(&sem->count);
489 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
491 if (has_handoff && wstate == WRITER_NOT_FIRST)
496 if (count & RWSEM_LOCK_MASK) {
497 if (has_handoff || (wstate != WRITER_HANDOFF))
500 new |= RWSEM_FLAG_HANDOFF;
502 new |= RWSEM_WRITER_LOCKED;
503 new &= ~RWSEM_FLAG_HANDOFF;
505 if (list_is_singular(&sem->wait_list))
506 new &= ~RWSEM_FLAG_WAITERS;
508 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
511 * We have either acquired the lock with handoff bit cleared or
512 * set the handoff bit.
514 if (new & RWSEM_FLAG_HANDOFF)
517 rwsem_set_owner(sem);
521 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
523 * Try to acquire read lock before the reader is put on wait queue.
524 * Lock acquisition isn't allowed if the rwsem is locked or a writer handoff
527 static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem)
529 long count = atomic_long_read(&sem->count);
531 if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))
534 count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count);
535 if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
536 rwsem_set_reader_owned(sem);
537 lockevent_inc(rwsem_opt_rlock);
541 /* Back out the change */
542 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
547 * Try to acquire write lock before the writer has been put on wait queue.
549 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
551 long count = atomic_long_read(&sem->count);
553 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
554 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
555 count | RWSEM_WRITER_LOCKED)) {
556 rwsem_set_owner(sem);
557 lockevent_inc(rwsem_opt_wlock);
564 static inline bool owner_on_cpu(struct task_struct *owner)
567 * As lock holder preemption issue, we both skip spinning if
568 * task is not on cpu or its cpu is preempted
570 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
573 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
574 unsigned long nonspinnable)
576 struct task_struct *owner;
580 BUILD_BUG_ON(!(RWSEM_OWNER_UNKNOWN & RWSEM_NONSPINNABLE));
582 if (need_resched()) {
583 lockevent_inc(rwsem_opt_fail);
589 owner = rwsem_owner_flags(sem, &flags);
590 if ((flags & nonspinnable) || (owner && !owner_on_cpu(owner)))
595 lockevent_cond_inc(rwsem_opt_fail, !ret);
600 * The rwsem_spin_on_owner() function returns the folowing 4 values
601 * depending on the lock owner state.
602 * OWNER_NULL : owner is currently NULL
603 * OWNER_WRITER: when owner changes and is a writer
604 * OWNER_READER: when owner changes and the new owner may be a reader.
605 * OWNER_NONSPINNABLE:
606 * when optimistic spinning has to stop because either the
607 * owner stops running, is unknown, or its timeslice has
612 OWNER_WRITER = 1 << 1,
613 OWNER_READER = 1 << 2,
614 OWNER_NONSPINNABLE = 1 << 3,
616 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
618 static inline enum owner_state
619 rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long nonspinnable)
621 if (flags & nonspinnable)
622 return OWNER_NONSPINNABLE;
624 if (flags & RWSEM_READER_OWNED)
627 return owner ? OWNER_WRITER : OWNER_NULL;
630 static noinline enum owner_state
631 rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
633 struct task_struct *new, *owner;
634 unsigned long flags, new_flags;
635 enum owner_state state;
637 owner = rwsem_owner_flags(sem, &flags);
638 state = rwsem_owner_state(owner, flags, nonspinnable);
639 if (state != OWNER_WRITER)
644 if (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF) {
645 state = OWNER_NONSPINNABLE;
649 new = rwsem_owner_flags(sem, &new_flags);
650 if ((new != owner) || (new_flags != flags)) {
651 state = rwsem_owner_state(new, new_flags, nonspinnable);
656 * Ensure we emit the owner->on_cpu, dereference _after_
657 * checking sem->owner still matches owner, if that fails,
658 * owner might point to free()d memory, if it still matches,
659 * the rcu_read_lock() ensures the memory stays valid.
663 if (need_resched() || !owner_on_cpu(owner)) {
664 state = OWNER_NONSPINNABLE;
676 * Calculate reader-owned rwsem spinning threshold for writer
678 * The more readers own the rwsem, the longer it will take for them to
679 * wind down and free the rwsem. So the empirical formula used to
680 * determine the actual spinning time limit here is:
682 * Spinning threshold = (10 + nr_readers/2)us
684 * The limit is capped to a maximum of 25us (30 readers). This is just
685 * a heuristic and is subjected to change in the future.
687 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
689 long count = atomic_long_read(&sem->count);
690 int readers = count >> RWSEM_READER_SHIFT;
695 delta = (20 + readers) * NSEC_PER_USEC / 2;
697 return sched_clock() + delta;
700 static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
703 int prev_owner_state = OWNER_NULL;
705 u64 rspin_threshold = 0;
706 unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
707 : RWSEM_RD_NONSPINNABLE;
711 /* sem->wait_lock should not be held when doing optimistic spinning */
712 if (!osq_lock(&sem->osq))
716 * Optimistically spin on the owner field and attempt to acquire the
717 * lock whenever the owner changes. Spinning will be stopped when:
718 * 1) the owning writer isn't running; or
719 * 2) readers own the lock and spinning time has exceeded limit.
722 enum owner_state owner_state;
724 owner_state = rwsem_spin_on_owner(sem, nonspinnable);
725 if (!(owner_state & OWNER_SPINNABLE))
729 * Try to acquire the lock
731 taken = wlock ? rwsem_try_write_lock_unqueued(sem)
732 : rwsem_try_read_lock_unqueued(sem);
738 * Time-based reader-owned rwsem optimistic spinning
740 if (wlock && (owner_state == OWNER_READER)) {
742 * Re-initialize rspin_threshold every time when
743 * the owner state changes from non-reader to reader.
744 * This allows a writer to steal the lock in between
745 * 2 reader phases and have the threshold reset at
746 * the beginning of the 2nd reader phase.
748 if (prev_owner_state != OWNER_READER) {
749 if (rwsem_test_oflags(sem, nonspinnable))
751 rspin_threshold = rwsem_rspin_threshold(sem);
756 * Check time threshold once every 16 iterations to
757 * avoid calling sched_clock() too frequently so
758 * as to reduce the average latency between the times
759 * when the lock becomes free and when the spinner
760 * is ready to do a trylock.
762 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
763 rwsem_set_nonspinnable(sem);
764 lockevent_inc(rwsem_opt_nospin);
770 * An RT task cannot do optimistic spinning if it cannot
771 * be sure the lock holder is running or live-lock may
772 * happen if the current task and the lock holder happen
773 * to run in the same CPU. However, aborting optimistic
774 * spinning while a NULL owner is detected may miss some
775 * opportunity where spinning can continue without causing
778 * There are 2 possible cases where an RT task may be able
779 * to continue spinning.
781 * 1) The lock owner is in the process of releasing the
782 * lock, sem->owner is cleared but the lock has not
784 * 2) The lock was free and owner cleared, but another
785 * task just comes in and acquire the lock before
786 * we try to get it. The new owner may be a spinnable
789 * To take advantage of two scenarios listed agove, the RT
790 * task is made to retry one more time to see if it can
791 * acquire the lock or continue spinning on the new owning
792 * writer. Of course, if the time lag is long enough or the
793 * new owner is not a writer or spinnable, the RT task will
796 * If the owner is a writer, the need_resched() check is
797 * done inside rwsem_spin_on_owner(). If the owner is not
798 * a writer, need_resched() check needs to be done here.
800 if (owner_state != OWNER_WRITER) {
803 if (rt_task(current) &&
804 (prev_owner_state != OWNER_WRITER))
807 prev_owner_state = owner_state;
810 * The cpu_relax() call is a compiler barrier which forces
811 * everything in this loop to be re-loaded. We don't need
812 * memory barriers as we'll eventually observe the right
813 * values at the cost of a few extra spins.
817 osq_unlock(&sem->osq);
820 lockevent_cond_inc(rwsem_opt_fail, !taken);
825 * Clear the owner's RWSEM_WR_NONSPINNABLE bit if it is set. This should
826 * only be called when the reader count reaches 0.
828 * This give writers better chance to acquire the rwsem first before
829 * readers when the rwsem was being held by readers for a relatively long
830 * period of time. Race can happen that an optimistic spinner may have
831 * just stolen the rwsem and set the owner, but just clearing the
832 * RWSEM_WR_NONSPINNABLE bit will do no harm anyway.
834 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem)
836 if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE))
837 atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner);
840 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
841 unsigned long nonspinnable)
846 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
851 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { }
855 * Wait for the read lock to be granted
857 static struct rw_semaphore __sched *
858 rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
860 long count, adjustment = -RWSEM_READER_BIAS;
862 struct rwsem_waiter waiter;
863 DEFINE_WAKE_Q(wake_q);
865 if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE))
869 * Undo read bias from down_read() and do optimistic spinning.
871 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
873 if (rwsem_optimistic_spin(sem, false)) {
875 * Wake up other readers in the wait list if the front
876 * waiter is a reader.
878 if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
879 raw_spin_lock_irq(&sem->wait_lock);
880 if (!list_empty(&sem->wait_list))
881 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
883 raw_spin_unlock_irq(&sem->wait_lock);
890 waiter.task = current;
891 waiter.type = RWSEM_WAITING_FOR_READ;
892 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
894 raw_spin_lock_irq(&sem->wait_lock);
895 if (list_empty(&sem->wait_list)) {
897 * In case the wait queue is empty and the lock isn't owned
898 * by a writer or has the handoff bit set, this reader can
899 * exit the slowpath and return immediately as its
900 * RWSEM_READER_BIAS has already been set in the count.
902 if (adjustment && !(atomic_long_read(&sem->count) &
903 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
904 raw_spin_unlock_irq(&sem->wait_lock);
905 rwsem_set_reader_owned(sem);
906 lockevent_inc(rwsem_rlock_fast);
909 adjustment += RWSEM_FLAG_WAITERS;
911 list_add_tail(&waiter.list, &sem->wait_list);
913 /* we're now waiting on the lock, but no longer actively locking */
915 count = atomic_long_add_return(adjustment, &sem->count);
917 count = atomic_long_read(&sem->count);
920 * If there are no active locks, wake the front queued process(es).
922 * If there are no writers and we are first in the queue,
923 * wake our own waiter to join the existing active readers !
925 if (!(count & RWSEM_LOCK_MASK)) {
926 clear_wr_nonspinnable(sem);
929 if (wake || (!(count & RWSEM_WRITER_MASK) &&
930 (adjustment & RWSEM_FLAG_WAITERS)))
931 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
933 raw_spin_unlock_irq(&sem->wait_lock);
936 /* wait to be given the lock */
938 set_current_state(state);
941 if (signal_pending_state(state, current)) {
942 raw_spin_lock_irq(&sem->wait_lock);
945 raw_spin_unlock_irq(&sem->wait_lock);
949 lockevent_inc(rwsem_sleep_reader);
952 __set_current_state(TASK_RUNNING);
953 lockevent_inc(rwsem_rlock);
956 list_del(&waiter.list);
957 if (list_empty(&sem->wait_list)) {
958 atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
961 raw_spin_unlock_irq(&sem->wait_lock);
962 __set_current_state(TASK_RUNNING);
963 lockevent_inc(rwsem_rlock_fail);
964 return ERR_PTR(-EINTR);
968 * Wait until we successfully acquire the write lock
970 static struct rw_semaphore *
971 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
974 enum writer_wait_state wstate;
975 struct rwsem_waiter waiter;
976 struct rw_semaphore *ret = sem;
977 DEFINE_WAKE_Q(wake_q);
979 /* do optimistic spinning and steal lock if possible */
980 if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
981 rwsem_optimistic_spin(sem, true))
985 * Optimistic spinning failed, proceed to the slowpath
986 * and block until we can acquire the sem.
988 waiter.task = current;
989 waiter.type = RWSEM_WAITING_FOR_WRITE;
990 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
992 raw_spin_lock_irq(&sem->wait_lock);
994 /* account for this before adding a new element to the list */
995 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
997 list_add_tail(&waiter.list, &sem->wait_list);
999 /* we're now waiting on the lock */
1000 if (wstate == WRITER_NOT_FIRST) {
1001 count = atomic_long_read(&sem->count);
1004 * If there were already threads queued before us and:
1005 * 1) there are no no active locks, wake the front
1006 * queued process(es) as the handoff bit might be set.
1007 * 2) there are no active writers and some readers, the lock
1008 * must be read owned; so we try to wake any read lock
1009 * waiters that were queued ahead of us.
1011 if (count & RWSEM_WRITER_MASK)
1014 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1015 ? RWSEM_WAKE_READERS
1016 : RWSEM_WAKE_ANY, &wake_q);
1018 if (!wake_q_empty(&wake_q)) {
1020 * We want to minimize wait_lock hold time especially
1021 * when a large number of readers are to be woken up.
1023 raw_spin_unlock_irq(&sem->wait_lock);
1025 wake_q_init(&wake_q); /* Used again, reinit */
1026 raw_spin_lock_irq(&sem->wait_lock);
1029 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1033 /* wait until we successfully acquire the lock */
1034 set_current_state(state);
1036 if (rwsem_try_write_lock(sem, wstate))
1039 raw_spin_unlock_irq(&sem->wait_lock);
1041 /* Block until there are no active lockers. */
1043 if (signal_pending_state(state, current))
1047 lockevent_inc(rwsem_sleep_writer);
1048 set_current_state(state);
1050 * If HANDOFF bit is set, unconditionally do
1053 if (wstate == WRITER_HANDOFF)
1056 if ((wstate == WRITER_NOT_FIRST) &&
1057 (rwsem_first_waiter(sem) == &waiter))
1058 wstate = WRITER_FIRST;
1060 count = atomic_long_read(&sem->count);
1061 if (!(count & RWSEM_LOCK_MASK))
1065 * The setting of the handoff bit is deferred
1066 * until rwsem_try_write_lock() is called.
1068 if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1069 time_after(jiffies, waiter.timeout))) {
1070 wstate = WRITER_HANDOFF;
1071 lockevent_inc(rwsem_wlock_handoff);
1076 raw_spin_lock_irq(&sem->wait_lock);
1078 __set_current_state(TASK_RUNNING);
1079 list_del(&waiter.list);
1080 raw_spin_unlock_irq(&sem->wait_lock);
1081 lockevent_inc(rwsem_wlock);
1086 __set_current_state(TASK_RUNNING);
1087 raw_spin_lock_irq(&sem->wait_lock);
1088 list_del(&waiter.list);
1090 if (unlikely(wstate == WRITER_HANDOFF))
1091 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
1093 if (list_empty(&sem->wait_list))
1094 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1096 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1097 raw_spin_unlock_irq(&sem->wait_lock);
1099 lockevent_inc(rwsem_wlock_fail);
1101 return ERR_PTR(-EINTR);
1105 * handle waking up a waiter on the semaphore
1106 * - up_read/up_write has decremented the active part of count if we come here
1108 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
1110 unsigned long flags;
1111 DEFINE_WAKE_Q(wake_q);
1113 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1115 if (!list_empty(&sem->wait_list))
1116 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1118 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1125 * downgrade a write lock into a read lock
1126 * - caller incremented waiting part of count and discovered it still negative
1127 * - just wake up any readers at the front of the queue
1129 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1131 unsigned long flags;
1132 DEFINE_WAKE_Q(wake_q);
1134 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1136 if (!list_empty(&sem->wait_list))
1137 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1139 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1148 inline void __down_read(struct rw_semaphore *sem)
1150 if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
1151 &sem->count) & RWSEM_READ_FAILED_MASK)) {
1152 rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
1153 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1155 rwsem_set_reader_owned(sem);
1159 static inline int __down_read_killable(struct rw_semaphore *sem)
1161 if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
1162 &sem->count) & RWSEM_READ_FAILED_MASK)) {
1163 if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
1165 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1167 rwsem_set_reader_owned(sem);
1172 static inline int __down_read_trylock(struct rw_semaphore *sem)
1175 * Optimize for the case when the rwsem is not locked at all.
1177 long tmp = RWSEM_UNLOCKED_VALUE;
1180 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1181 tmp + RWSEM_READER_BIAS)) {
1182 rwsem_set_reader_owned(sem);
1185 } while (!(tmp & RWSEM_READ_FAILED_MASK));
1192 static inline void __down_write(struct rw_semaphore *sem)
1194 long tmp = RWSEM_UNLOCKED_VALUE;
1196 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1197 RWSEM_WRITER_LOCKED)))
1198 rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
1199 rwsem_set_owner(sem);
1202 static inline int __down_write_killable(struct rw_semaphore *sem)
1204 long tmp = RWSEM_UNLOCKED_VALUE;
1206 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1207 RWSEM_WRITER_LOCKED))) {
1208 if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
1211 rwsem_set_owner(sem);
1215 static inline int __down_write_trylock(struct rw_semaphore *sem)
1217 long tmp = RWSEM_UNLOCKED_VALUE;
1219 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1220 RWSEM_WRITER_LOCKED)) {
1221 rwsem_set_owner(sem);
1228 * unlock after reading
1230 inline void __up_read(struct rw_semaphore *sem)
1234 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1235 rwsem_clear_reader_owned(sem);
1236 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1237 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1238 RWSEM_FLAG_WAITERS)) {
1239 clear_wr_nonspinnable(sem);
1240 rwsem_wake(sem, tmp);
1245 * unlock after writing
1247 static inline void __up_write(struct rw_semaphore *sem)
1252 * sem->owner may differ from current if the ownership is transferred
1253 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1255 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1256 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1257 rwsem_clear_owner(sem);
1258 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1259 if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1260 rwsem_wake(sem, tmp);
1264 * downgrade write lock to read lock
1266 static inline void __downgrade_write(struct rw_semaphore *sem)
1271 * When downgrading from exclusive to shared ownership,
1272 * anything inside the write-locked region cannot leak
1273 * into the read side. In contrast, anything in the
1274 * read-locked region is ok to be re-ordered into the
1275 * write side. As such, rely on RELEASE semantics.
1277 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1278 tmp = atomic_long_fetch_add_release(
1279 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1280 rwsem_set_reader_owned(sem);
1281 if (tmp & RWSEM_FLAG_WAITERS)
1282 rwsem_downgrade_wake(sem);
1288 void __sched down_read(struct rw_semaphore *sem)
1291 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1293 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1295 EXPORT_SYMBOL(down_read);
1297 int __sched down_read_killable(struct rw_semaphore *sem)
1300 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1302 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1303 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1309 EXPORT_SYMBOL(down_read_killable);
1312 * trylock for reading -- returns 1 if successful, 0 if contention
1314 int down_read_trylock(struct rw_semaphore *sem)
1316 int ret = __down_read_trylock(sem);
1319 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1322 EXPORT_SYMBOL(down_read_trylock);
1327 void __sched down_write(struct rw_semaphore *sem)
1330 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1331 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1333 EXPORT_SYMBOL(down_write);
1338 int __sched down_write_killable(struct rw_semaphore *sem)
1341 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1343 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1344 __down_write_killable)) {
1345 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1351 EXPORT_SYMBOL(down_write_killable);
1354 * trylock for writing -- returns 1 if successful, 0 if contention
1356 int down_write_trylock(struct rw_semaphore *sem)
1358 int ret = __down_write_trylock(sem);
1361 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1365 EXPORT_SYMBOL(down_write_trylock);
1368 * release a read lock
1370 void up_read(struct rw_semaphore *sem)
1372 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1375 EXPORT_SYMBOL(up_read);
1378 * release a write lock
1380 void up_write(struct rw_semaphore *sem)
1382 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1385 EXPORT_SYMBOL(up_write);
1388 * downgrade write lock to read lock
1390 void downgrade_write(struct rw_semaphore *sem)
1392 lock_downgrade(&sem->dep_map, _RET_IP_);
1393 __downgrade_write(sem);
1395 EXPORT_SYMBOL(downgrade_write);
1397 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1399 void down_read_nested(struct rw_semaphore *sem, int subclass)
1402 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1403 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1405 EXPORT_SYMBOL(down_read_nested);
1407 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1410 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1411 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1413 EXPORT_SYMBOL(_down_write_nest_lock);
1415 void down_read_non_owner(struct rw_semaphore *sem)
1419 __rwsem_set_reader_owned(sem, NULL);
1421 EXPORT_SYMBOL(down_read_non_owner);
1423 void down_write_nested(struct rw_semaphore *sem, int subclass)
1426 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1427 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1429 EXPORT_SYMBOL(down_write_nested);
1431 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1434 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1436 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1437 __down_write_killable)) {
1438 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1444 EXPORT_SYMBOL(down_write_killable_nested);
1446 void up_read_non_owner(struct rw_semaphore *sem)
1448 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1451 EXPORT_SYMBOL(up_read_non_owner);