1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 * Rwsem count bit fields re-definition and rwsem rearchitecture
14 * by Waiman Long <longman@redhat.com>.
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/sched/rt.h>
21 #include <linux/sched/task.h>
22 #include <linux/sched/debug.h>
23 #include <linux/sched/wake_q.h>
24 #include <linux/sched/signal.h>
25 #include <linux/export.h>
26 #include <linux/rwsem.h>
27 #include <linux/atomic.h>
30 #include "lock_events.h"
33 * The least significant 2 bits of the owner value has the following
35 * - RWSEM_READER_OWNED (bit 0): The rwsem is owned by readers
36 * - RWSEM_ANONYMOUSLY_OWNED (bit 1): The rwsem is anonymously owned,
37 * i.e. the owner(s) cannot be readily determined. It can be reader
38 * owned or the owning writer is indeterminate.
40 * When a writer acquires a rwsem, it puts its task_struct pointer
41 * into the owner field. It is cleared after an unlock.
43 * When a reader acquires a rwsem, it will also puts its task_struct
44 * pointer into the owner field with both the RWSEM_READER_OWNED and
45 * RWSEM_ANONYMOUSLY_OWNED bits set. On unlock, the owner field will
46 * largely be left untouched. So for a free or reader-owned rwsem,
47 * the owner value may contain information about the last reader that
48 * acquires the rwsem. The anonymous bit is set because that particular
49 * reader may or may not still own the lock.
51 * That information may be helpful in debugging cases where the system
52 * seems to hang on a reader owned rwsem especially if only one reader
53 * is involved. Ideally we would like to track all the readers that own
54 * a rwsem, but the overhead is simply too big.
56 #define RWSEM_READER_OWNED (1UL << 0)
57 #define RWSEM_ANONYMOUSLY_OWNED (1UL << 1)
59 #ifdef CONFIG_DEBUG_RWSEMS
60 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
61 if (!debug_locks_silent && \
62 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
63 #c, atomic_long_read(&(sem)->count), \
64 (long)((sem)->owner), (long)current, \
65 list_empty(&(sem)->wait_list) ? "" : "not ")) \
69 # define DEBUG_RWSEMS_WARN_ON(c, sem)
73 * The definition of the atomic counter in the semaphore:
75 * Bit 0 - writer locked bit
76 * Bit 1 - waiters present bit
78 * Bits 8-X - 24-bit (32-bit) or 56-bit reader count
80 * atomic_long_fetch_add() is used to obtain reader lock, whereas
81 * atomic_long_cmpxchg() will be used to obtain writer lock.
83 #define RWSEM_WRITER_LOCKED (1UL << 0)
84 #define RWSEM_FLAG_WAITERS (1UL << 1)
85 #define RWSEM_READER_SHIFT 8
86 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
87 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
88 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
89 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
90 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS)
93 * All writes to owner are protected by WRITE_ONCE() to make sure that
94 * store tearing can't happen as optimistic spinners may read and use
95 * the owner value concurrently without lock. Read from owner, however,
96 * may not need READ_ONCE() as long as the pointer value is only used
97 * for comparison and isn't being dereferenced.
99 static inline void rwsem_set_owner(struct rw_semaphore *sem)
101 WRITE_ONCE(sem->owner, current);
104 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
106 WRITE_ONCE(sem->owner, NULL);
110 * The task_struct pointer of the last owning reader will be left in
113 * Note that the owner value just indicates the task has owned the rwsem
114 * previously, it may not be the real owner or one of the real owners
115 * anymore when that field is examined, so take it with a grain of salt.
117 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
118 struct task_struct *owner)
120 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED
121 | RWSEM_ANONYMOUSLY_OWNED;
123 WRITE_ONCE(sem->owner, (struct task_struct *)val);
126 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
128 __rwsem_set_reader_owned(sem, current);
132 * Return true if the a rwsem waiter can spin on the rwsem's owner
133 * and steal the lock, i.e. the lock is not anonymously owned.
134 * N.B. !owner is considered spinnable.
136 static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
138 return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
142 * Return true if rwsem is owned by an anonymous writer or readers.
144 static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
146 return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
149 #ifdef CONFIG_DEBUG_RWSEMS
151 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
152 * is a task pointer in owner of a reader-owned rwsem, it will be the
153 * real owner or one of the real owners. The only exception is when the
154 * unlock is done by up_read_non_owner().
156 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
158 unsigned long val = (unsigned long)current | RWSEM_READER_OWNED
159 | RWSEM_ANONYMOUSLY_OWNED;
160 if (READ_ONCE(sem->owner) == (struct task_struct *)val)
161 cmpxchg_relaxed((unsigned long *)&sem->owner, val,
162 RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED);
165 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
171 * Guide to the rw_semaphore's count field.
173 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
176 * The lock is owned by readers when
177 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
178 * (2) some of the reader bits are set in count, and
179 * (3) the owner field has RWSEM_READ_OWNED bit set.
181 * Having some reader bits set is not enough to guarantee a readers owned
182 * lock as the readers may be in the process of backing out from the count
183 * and a writer has just released the lock. So another writer may steal
184 * the lock immediately after that.
188 * Initialize an rwsem:
190 void __init_rwsem(struct rw_semaphore *sem, const char *name,
191 struct lock_class_key *key)
193 #ifdef CONFIG_DEBUG_LOCK_ALLOC
195 * Make sure we are not reinitializing a held semaphore:
197 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
198 lockdep_init_map(&sem->dep_map, name, key, 0);
200 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
201 raw_spin_lock_init(&sem->wait_lock);
202 INIT_LIST_HEAD(&sem->wait_list);
204 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
205 osq_lock_init(&sem->osq);
209 EXPORT_SYMBOL(__init_rwsem);
211 enum rwsem_waiter_type {
212 RWSEM_WAITING_FOR_WRITE,
213 RWSEM_WAITING_FOR_READ
216 struct rwsem_waiter {
217 struct list_head list;
218 struct task_struct *task;
219 enum rwsem_waiter_type type;
222 enum rwsem_wake_type {
223 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
224 RWSEM_WAKE_READERS, /* Wake readers only */
225 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
229 * handle the lock release when processes blocked on it that can now run
230 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
232 * - there must be someone on the queue
233 * - the wait_lock must be held by the caller
234 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
235 * to actually wakeup the blocked task(s) and drop the reference count,
236 * preferably when the wait_lock is released
237 * - woken process blocks are discarded from the list after having task zeroed
238 * - writers are only marked woken if downgrading is false
240 static void __rwsem_mark_wake(struct rw_semaphore *sem,
241 enum rwsem_wake_type wake_type,
242 struct wake_q_head *wake_q)
244 struct rwsem_waiter *waiter, *tmp;
245 long oldcount, woken = 0, adjustment = 0;
246 struct list_head wlist;
249 * Take a peek at the queue head waiter such that we can determine
250 * the wakeup(s) to perform.
252 waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
254 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
255 if (wake_type == RWSEM_WAKE_ANY) {
257 * Mark writer at the front of the queue for wakeup.
258 * Until the task is actually later awoken later by
259 * the caller, other writers are able to steal it.
260 * Readers, on the other hand, will block as they
261 * will notice the queued writer.
263 wake_q_add(wake_q, waiter->task);
264 lockevent_inc(rwsem_wake_writer);
271 * Writers might steal the lock before we grant it to the next reader.
272 * We prefer to do the first reader grant before counting readers
273 * so we can bail out early if a writer stole the lock.
275 if (wake_type != RWSEM_WAKE_READ_OWNED) {
276 adjustment = RWSEM_READER_BIAS;
277 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
278 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
279 atomic_long_sub(adjustment, &sem->count);
283 * Set it to reader-owned to give spinners an early
284 * indication that readers now have the lock.
286 __rwsem_set_reader_owned(sem, waiter->task);
290 * Grant an infinite number of read locks to the readers at the front
291 * of the queue. We know that woken will be at least 1 as we accounted
292 * for above. Note we increment the 'active part' of the count by the
293 * number of readers before waking any processes up.
295 * We have to do wakeup in 2 passes to prevent the possibility that
296 * the reader count may be decremented before it is incremented. It
297 * is because the to-be-woken waiter may not have slept yet. So it
298 * may see waiter->task got cleared, finish its critical section and
299 * do an unlock before the reader count increment.
301 * 1) Collect the read-waiters in a separate list, count them and
302 * fully increment the reader count in rwsem.
303 * 2) For each waiters in the new list, clear waiter->task and
304 * put them into wake_q to be woken up later.
306 list_for_each_entry(waiter, &sem->wait_list, list) {
307 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
312 list_cut_before(&wlist, &sem->wait_list, &waiter->list);
314 adjustment = woken * RWSEM_READER_BIAS - adjustment;
315 lockevent_cond_inc(rwsem_wake_reader, woken);
316 if (list_empty(&sem->wait_list)) {
317 /* hit end of list above */
318 adjustment -= RWSEM_FLAG_WAITERS;
322 atomic_long_add(adjustment, &sem->count);
325 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
326 struct task_struct *tsk;
329 get_task_struct(tsk);
332 * Ensure calling get_task_struct() before setting the reader
333 * waiter to nil such that rwsem_down_read_failed() cannot
334 * race with do_exit() by always holding a reference count
335 * to the task to wakeup.
337 smp_store_release(&waiter->task, NULL);
339 * Ensure issuing the wakeup (either by us or someone else)
340 * after setting the reader waiter to nil.
342 wake_q_add_safe(wake_q, tsk);
347 * This function must be called with the sem->wait_lock held to prevent
348 * race conditions between checking the rwsem wait list and setting the
349 * sem->count accordingly.
351 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
355 if (count & RWSEM_LOCK_MASK)
358 new = count + RWSEM_WRITER_LOCKED -
359 (list_is_singular(&sem->wait_list) ? RWSEM_FLAG_WAITERS : 0);
361 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)) {
362 rwsem_set_owner(sem);
369 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
371 * Try to acquire write lock before the writer has been put on wait queue.
373 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
375 long count = atomic_long_read(&sem->count);
377 while (!(count & RWSEM_LOCK_MASK)) {
378 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
379 count + RWSEM_WRITER_LOCKED)) {
380 rwsem_set_owner(sem);
381 lockevent_inc(rwsem_opt_wlock);
388 static inline bool owner_on_cpu(struct task_struct *owner)
391 * As lock holder preemption issue, we both skip spinning if
392 * task is not on cpu or its cpu is preempted
394 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
397 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
399 struct task_struct *owner;
402 BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
408 owner = READ_ONCE(sem->owner);
410 ret = is_rwsem_owner_spinnable(owner) &&
418 * Return true only if we can still spin on the owner field of the rwsem.
420 static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
422 struct task_struct *owner = READ_ONCE(sem->owner);
424 if (!is_rwsem_owner_spinnable(owner))
428 while (owner && (READ_ONCE(sem->owner) == owner)) {
430 * Ensure we emit the owner->on_cpu, dereference _after_
431 * checking sem->owner still matches owner, if that fails,
432 * owner might point to free()d memory, if it still matches,
433 * the rcu_read_lock() ensures the memory stays valid.
438 * abort spinning when need_resched or owner is not running or
439 * owner's cpu is preempted.
441 if (need_resched() || !owner_on_cpu(owner)) {
451 * If there is a new owner or the owner is not set, we continue
454 return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
457 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
463 /* sem->wait_lock should not be held when doing optimistic spinning */
464 if (!rwsem_can_spin_on_owner(sem))
467 if (!osq_lock(&sem->osq))
471 * Optimistically spin on the owner field and attempt to acquire the
472 * lock whenever the owner changes. Spinning will be stopped when:
473 * 1) the owning writer isn't running; or
474 * 2) readers own the lock as we can't determine if they are
475 * actively running or not.
477 while (rwsem_spin_on_owner(sem)) {
479 * Try to acquire the lock
481 if (rwsem_try_write_lock_unqueued(sem)) {
487 * When there's no owner, we might have preempted between the
488 * owner acquiring the lock and setting the owner field. If
489 * we're an RT task that will live-lock because we won't let
490 * the owner complete.
492 if (!sem->owner && (need_resched() || rt_task(current)))
496 * The cpu_relax() call is a compiler barrier which forces
497 * everything in this loop to be re-loaded. We don't need
498 * memory barriers as we'll eventually observe the right
499 * values at the cost of a few extra spins.
503 osq_unlock(&sem->osq);
506 lockevent_cond_inc(rwsem_opt_fail, !taken);
510 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
517 * Wait for the read lock to be granted
519 static inline struct rw_semaphore __sched *
520 __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
522 long count, adjustment = -RWSEM_READER_BIAS;
523 struct rwsem_waiter waiter;
524 DEFINE_WAKE_Q(wake_q);
526 waiter.task = current;
527 waiter.type = RWSEM_WAITING_FOR_READ;
529 raw_spin_lock_irq(&sem->wait_lock);
530 if (list_empty(&sem->wait_list)) {
532 * In case the wait queue is empty and the lock isn't owned
533 * by a writer, this reader can exit the slowpath and return
534 * immediately as its RWSEM_READER_BIAS has already been
537 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) {
538 raw_spin_unlock_irq(&sem->wait_lock);
539 rwsem_set_reader_owned(sem);
540 lockevent_inc(rwsem_rlock_fast);
543 adjustment += RWSEM_FLAG_WAITERS;
545 list_add_tail(&waiter.list, &sem->wait_list);
547 /* we're now waiting on the lock, but no longer actively locking */
548 count = atomic_long_add_return(adjustment, &sem->count);
551 * If there are no active locks, wake the front queued process(es).
553 * If there are no writers and we are first in the queue,
554 * wake our own waiter to join the existing active readers !
556 if (!(count & RWSEM_LOCK_MASK) ||
557 (!(count & RWSEM_WRITER_MASK) && (adjustment & RWSEM_FLAG_WAITERS)))
558 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
560 raw_spin_unlock_irq(&sem->wait_lock);
563 /* wait to be given the lock */
565 set_current_state(state);
568 if (signal_pending_state(state, current)) {
569 raw_spin_lock_irq(&sem->wait_lock);
572 raw_spin_unlock_irq(&sem->wait_lock);
576 lockevent_inc(rwsem_sleep_reader);
579 __set_current_state(TASK_RUNNING);
580 lockevent_inc(rwsem_rlock);
583 list_del(&waiter.list);
584 if (list_empty(&sem->wait_list))
585 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
586 raw_spin_unlock_irq(&sem->wait_lock);
587 __set_current_state(TASK_RUNNING);
588 lockevent_inc(rwsem_rlock_fail);
589 return ERR_PTR(-EINTR);
592 __visible struct rw_semaphore * __sched
593 rwsem_down_read_failed(struct rw_semaphore *sem)
595 return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
597 EXPORT_SYMBOL(rwsem_down_read_failed);
599 __visible struct rw_semaphore * __sched
600 rwsem_down_read_failed_killable(struct rw_semaphore *sem)
602 return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
604 EXPORT_SYMBOL(rwsem_down_read_failed_killable);
607 * Wait until we successfully acquire the write lock
609 static inline struct rw_semaphore *
610 __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
613 bool waiting = true; /* any queued threads before us */
614 struct rwsem_waiter waiter;
615 struct rw_semaphore *ret = sem;
616 DEFINE_WAKE_Q(wake_q);
618 /* do optimistic spinning and steal lock if possible */
619 if (rwsem_optimistic_spin(sem))
623 * Optimistic spinning failed, proceed to the slowpath
624 * and block until we can acquire the sem.
626 waiter.task = current;
627 waiter.type = RWSEM_WAITING_FOR_WRITE;
629 raw_spin_lock_irq(&sem->wait_lock);
631 /* account for this before adding a new element to the list */
632 if (list_empty(&sem->wait_list))
635 list_add_tail(&waiter.list, &sem->wait_list);
637 /* we're now waiting on the lock */
639 count = atomic_long_read(&sem->count);
642 * If there were already threads queued before us and there are
643 * no active writers and some readers, the lock must be read
644 * owned; so we try to any read locks that were queued ahead
647 if (!(count & RWSEM_WRITER_MASK) &&
648 (count & RWSEM_READER_MASK)) {
649 __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
651 * The wakeup is normally called _after_ the wait_lock
652 * is released, but given that we are proactively waking
653 * readers we can deal with the wake_q overhead as it is
654 * similar to releasing and taking the wait_lock again
655 * for attempting rwsem_try_write_lock().
660 * Reinitialize wake_q after use.
662 wake_q_init(&wake_q);
666 count = atomic_long_add_return(RWSEM_FLAG_WAITERS, &sem->count);
669 /* wait until we successfully acquire the lock */
670 set_current_state(state);
672 if (rwsem_try_write_lock(count, sem))
674 raw_spin_unlock_irq(&sem->wait_lock);
676 /* Block until there are no active lockers. */
678 if (signal_pending_state(state, current))
682 lockevent_inc(rwsem_sleep_writer);
683 set_current_state(state);
684 count = atomic_long_read(&sem->count);
685 } while (count & RWSEM_LOCK_MASK);
687 raw_spin_lock_irq(&sem->wait_lock);
689 __set_current_state(TASK_RUNNING);
690 list_del(&waiter.list);
691 raw_spin_unlock_irq(&sem->wait_lock);
692 lockevent_inc(rwsem_wlock);
697 __set_current_state(TASK_RUNNING);
698 raw_spin_lock_irq(&sem->wait_lock);
699 list_del(&waiter.list);
700 if (list_empty(&sem->wait_list))
701 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
703 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
704 raw_spin_unlock_irq(&sem->wait_lock);
706 lockevent_inc(rwsem_wlock_fail);
708 return ERR_PTR(-EINTR);
711 __visible struct rw_semaphore * __sched
712 rwsem_down_write_failed(struct rw_semaphore *sem)
714 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
716 EXPORT_SYMBOL(rwsem_down_write_failed);
718 __visible struct rw_semaphore * __sched
719 rwsem_down_write_failed_killable(struct rw_semaphore *sem)
721 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
723 EXPORT_SYMBOL(rwsem_down_write_failed_killable);
726 * handle waking up a waiter on the semaphore
727 * - up_read/up_write has decremented the active part of count if we come here
730 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
733 DEFINE_WAKE_Q(wake_q);
735 raw_spin_lock_irqsave(&sem->wait_lock, flags);
737 if (!list_empty(&sem->wait_list))
738 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
740 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
745 EXPORT_SYMBOL(rwsem_wake);
748 * downgrade a write lock into a read lock
749 * - caller incremented waiting part of count and discovered it still negative
750 * - just wake up any readers at the front of the queue
753 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
756 DEFINE_WAKE_Q(wake_q);
758 raw_spin_lock_irqsave(&sem->wait_lock, flags);
760 if (!list_empty(&sem->wait_list))
761 __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
763 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
768 EXPORT_SYMBOL(rwsem_downgrade_wake);
773 inline void __down_read(struct rw_semaphore *sem)
775 if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
776 &sem->count) & RWSEM_READ_FAILED_MASK)) {
777 rwsem_down_read_failed(sem);
778 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
779 RWSEM_READER_OWNED), sem);
781 rwsem_set_reader_owned(sem);
785 static inline int __down_read_killable(struct rw_semaphore *sem)
787 if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
788 &sem->count) & RWSEM_READ_FAILED_MASK)) {
789 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
791 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
792 RWSEM_READER_OWNED), sem);
794 rwsem_set_reader_owned(sem);
799 static inline int __down_read_trylock(struct rw_semaphore *sem)
802 * Optimize for the case when the rwsem is not locked at all.
804 long tmp = RWSEM_UNLOCKED_VALUE;
806 lockevent_inc(rwsem_rtrylock);
808 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
809 tmp + RWSEM_READER_BIAS)) {
810 rwsem_set_reader_owned(sem);
813 } while (!(tmp & RWSEM_READ_FAILED_MASK));
820 static inline void __down_write(struct rw_semaphore *sem)
822 if (unlikely(atomic_long_cmpxchg_acquire(&sem->count, 0,
823 RWSEM_WRITER_LOCKED)))
824 rwsem_down_write_failed(sem);
825 rwsem_set_owner(sem);
828 static inline int __down_write_killable(struct rw_semaphore *sem)
830 if (unlikely(atomic_long_cmpxchg_acquire(&sem->count, 0,
831 RWSEM_WRITER_LOCKED)))
832 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
834 rwsem_set_owner(sem);
838 static inline int __down_write_trylock(struct rw_semaphore *sem)
842 lockevent_inc(rwsem_wtrylock);
843 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
844 RWSEM_WRITER_LOCKED);
845 if (tmp == RWSEM_UNLOCKED_VALUE) {
846 rwsem_set_owner(sem);
853 * unlock after reading
855 inline void __up_read(struct rw_semaphore *sem)
859 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
861 rwsem_clear_reader_owned(sem);
862 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
863 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS))
864 == RWSEM_FLAG_WAITERS))
869 * unlock after writing
871 static inline void __up_write(struct rw_semaphore *sem)
873 DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
874 rwsem_clear_owner(sem);
875 if (unlikely(atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED,
876 &sem->count) & RWSEM_FLAG_WAITERS))
881 * downgrade write lock to read lock
883 static inline void __downgrade_write(struct rw_semaphore *sem)
888 * When downgrading from exclusive to shared ownership,
889 * anything inside the write-locked region cannot leak
890 * into the read side. In contrast, anything in the
891 * read-locked region is ok to be re-ordered into the
892 * write side. As such, rely on RELEASE semantics.
894 DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
895 tmp = atomic_long_fetch_add_release(
896 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
897 rwsem_set_reader_owned(sem);
898 if (tmp & RWSEM_FLAG_WAITERS)
899 rwsem_downgrade_wake(sem);
905 void __sched down_read(struct rw_semaphore *sem)
908 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
910 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
913 EXPORT_SYMBOL(down_read);
915 int __sched down_read_killable(struct rw_semaphore *sem)
918 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
920 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
921 rwsem_release(&sem->dep_map, 1, _RET_IP_);
928 EXPORT_SYMBOL(down_read_killable);
931 * trylock for reading -- returns 1 if successful, 0 if contention
933 int down_read_trylock(struct rw_semaphore *sem)
935 int ret = __down_read_trylock(sem);
938 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
942 EXPORT_SYMBOL(down_read_trylock);
947 void __sched down_write(struct rw_semaphore *sem)
950 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
952 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
955 EXPORT_SYMBOL(down_write);
960 int __sched down_write_killable(struct rw_semaphore *sem)
963 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
965 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
966 rwsem_release(&sem->dep_map, 1, _RET_IP_);
973 EXPORT_SYMBOL(down_write_killable);
976 * trylock for writing -- returns 1 if successful, 0 if contention
978 int down_write_trylock(struct rw_semaphore *sem)
980 int ret = __down_write_trylock(sem);
983 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
988 EXPORT_SYMBOL(down_write_trylock);
991 * release a read lock
993 void up_read(struct rw_semaphore *sem)
995 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1000 EXPORT_SYMBOL(up_read);
1003 * release a write lock
1005 void up_write(struct rw_semaphore *sem)
1007 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1012 EXPORT_SYMBOL(up_write);
1015 * downgrade write lock to read lock
1017 void downgrade_write(struct rw_semaphore *sem)
1019 lock_downgrade(&sem->dep_map, _RET_IP_);
1021 __downgrade_write(sem);
1024 EXPORT_SYMBOL(downgrade_write);
1026 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1028 void down_read_nested(struct rw_semaphore *sem, int subclass)
1031 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1033 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1036 EXPORT_SYMBOL(down_read_nested);
1038 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1041 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1043 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1046 EXPORT_SYMBOL(_down_write_nest_lock);
1048 void down_read_non_owner(struct rw_semaphore *sem)
1053 __rwsem_set_reader_owned(sem, NULL);
1056 EXPORT_SYMBOL(down_read_non_owner);
1058 void down_write_nested(struct rw_semaphore *sem, int subclass)
1061 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1063 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1066 EXPORT_SYMBOL(down_write_nested);
1068 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1071 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1073 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
1074 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1081 EXPORT_SYMBOL(down_write_killable_nested);
1083 void up_read_non_owner(struct rw_semaphore *sem)
1085 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
1090 EXPORT_SYMBOL(up_read_non_owner);