1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
9 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
10 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12 #include <linux/rwsem.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/rt.h>
17 #include <linux/osq_lock.h>
22 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit)
25 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
26 * X = #active_readers + #readers attempting to lock
29 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
30 * attempting to read lock or write lock.
32 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
33 * X = #active readers + # readers attempting lock
34 * (X*ACTIVE_BIAS + WAITING_BIAS)
35 * (2) 1 writer attempting lock, no waiters for lock
36 * X-1 = #active readers + #readers attempting lock
37 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
38 * (3) 1 writer active, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
42 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
43 * (WAITING_BIAS + ACTIVE_BIAS)
44 * (2) 1 writer active or attempting lock, no waiters for lock
47 * 0xffff0000 (1) There are writers or readers queued but none active
48 * or in the process of attempting lock.
50 * Note: writer can attempt to steal lock for this count by adding
51 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
53 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
54 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
57 * the count becomes more than 0 for successful lock acquisition,
58 * i.e. the case where there are only readers or nobody has lock.
59 * (1st and 2nd case above).
61 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
62 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
63 * acquisition (i.e. nobody else has lock or attempts lock). If
64 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
65 * are only waiters but none active (5th case above), and attempt to
71 * Initialize an rwsem:
73 void __init_rwsem(struct rw_semaphore *sem, const char *name,
74 struct lock_class_key *key)
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
78 * Make sure we are not reinitializing a held semaphore:
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
81 lockdep_init_map(&sem->dep_map, name, key, 0);
83 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
84 raw_spin_lock_init(&sem->wait_lock);
85 INIT_LIST_HEAD(&sem->wait_list);
86 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
88 osq_lock_init(&sem->osq);
92 EXPORT_SYMBOL(__init_rwsem);
94 enum rwsem_waiter_type {
95 RWSEM_WAITING_FOR_WRITE,
96 RWSEM_WAITING_FOR_READ
100 struct list_head list;
101 struct task_struct *task;
102 enum rwsem_waiter_type type;
105 enum rwsem_wake_type {
106 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
107 RWSEM_WAKE_READERS, /* Wake readers only */
108 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
112 * handle the lock release when processes blocked on it that can now run
113 * - if we come here from up_xxxx(), then:
114 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
115 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
116 * - there must be someone on the queue
117 * - the wait_lock must be held by the caller
118 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
119 * to actually wakeup the blocked task(s) and drop the reference count,
120 * preferably when the wait_lock is released
121 * - woken process blocks are discarded from the list after having task zeroed
122 * - writers are only marked woken if downgrading is false
124 static struct rw_semaphore *
125 __rwsem_mark_wake(struct rw_semaphore *sem,
126 enum rwsem_wake_type wake_type, struct wake_q_head *wake_q)
128 struct rwsem_waiter *waiter;
129 struct task_struct *tsk;
130 struct list_head *next;
131 long oldcount, woken, loop, adjustment;
133 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
134 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
135 if (wake_type == RWSEM_WAKE_ANY) {
137 * Mark writer at the front of the queue for wakeup.
138 * Until the task is actually later awoken later by
139 * the caller, other writers are able to steal it.
140 * Readers, on the other hand, will block as they
141 * will notice the queued writer.
143 wake_q_add(wake_q, waiter->task);
148 /* Writers might steal the lock before we grant it to the next reader.
149 * We prefer to do the first reader grant before counting readers
150 * so we can bail out early if a writer stole the lock.
153 if (wake_type != RWSEM_WAKE_READ_OWNED) {
154 adjustment = RWSEM_ACTIVE_READ_BIAS;
156 oldcount = atomic_long_add_return(adjustment, &sem->count) - adjustment;
158 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
159 /* A writer stole the lock. Undo our reader grant. */
160 if (atomic_long_sub_return(adjustment, &sem->count) &
163 /* Last active locker left. Retry waking readers. */
164 goto try_reader_grant;
168 /* Grant an infinite number of read locks to the readers at the front
169 * of the queue. Note we increment the 'active part' of the count by
170 * the number of readers before waking any processes up.
176 if (waiter->list.next == &sem->wait_list)
179 waiter = list_entry(waiter->list.next,
180 struct rwsem_waiter, list);
182 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
184 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
185 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
186 /* hit end of list above */
187 adjustment -= RWSEM_WAITING_BIAS;
190 atomic_long_add(adjustment, &sem->count);
192 next = sem->wait_list.next;
195 waiter = list_entry(next, struct rwsem_waiter, list);
196 next = waiter->list.next;
199 wake_q_add(wake_q, tsk);
201 * Ensure that the last operation is setting the reader
202 * waiter to nil such that rwsem_down_read_failed() cannot
203 * race with do_exit() by always holding a reference count
204 * to the task to wakeup.
206 smp_store_release(&waiter->task, NULL);
209 sem->wait_list.next = next;
210 next->prev = &sem->wait_list;
217 * Wait for the read lock to be granted
220 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
222 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
223 struct rwsem_waiter waiter;
224 struct task_struct *tsk = current;
227 /* set up my own style of waitqueue */
229 waiter.type = RWSEM_WAITING_FOR_READ;
231 raw_spin_lock_irq(&sem->wait_lock);
232 if (list_empty(&sem->wait_list))
233 adjustment += RWSEM_WAITING_BIAS;
234 list_add_tail(&waiter.list, &sem->wait_list);
236 /* we're now waiting on the lock, but no longer actively locking */
237 count = atomic_long_add_return(adjustment, &sem->count);
239 /* If there are no active locks, wake the front queued process(es).
241 * If there are no writers and we are first in the queue,
242 * wake our own waiter to join the existing active readers !
244 if (count == RWSEM_WAITING_BIAS ||
245 (count > RWSEM_WAITING_BIAS &&
246 adjustment != -RWSEM_ACTIVE_READ_BIAS))
247 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
249 raw_spin_unlock_irq(&sem->wait_lock);
252 /* wait to be given the lock */
254 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
260 __set_task_state(tsk, TASK_RUNNING);
263 EXPORT_SYMBOL(rwsem_down_read_failed);
266 * This function must be called with the sem->wait_lock held to prevent
267 * race conditions between checking the rwsem wait list and setting the
268 * sem->count accordingly.
270 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
273 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
275 if (count != RWSEM_WAITING_BIAS)
279 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
280 * are other tasks on the wait list, we need to add on WAITING_BIAS.
282 count = list_is_singular(&sem->wait_list) ?
283 RWSEM_ACTIVE_WRITE_BIAS :
284 RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
286 if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
287 == RWSEM_WAITING_BIAS) {
288 rwsem_set_owner(sem);
295 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
297 * Try to acquire write lock before the writer has been put on wait queue.
299 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
301 long old, count = atomic_long_read(&sem->count);
304 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
307 old = atomic_long_cmpxchg_acquire(&sem->count, count,
308 count + RWSEM_ACTIVE_WRITE_BIAS);
310 rwsem_set_owner(sem);
318 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
320 struct task_struct *owner;
327 owner = READ_ONCE(sem->owner);
329 long count = atomic_long_read(&sem->count);
331 * If sem->owner is not set, yet we have just recently entered the
332 * slowpath with the lock being active, then there is a possibility
333 * reader(s) may have the lock. To be safe, bail spinning in these
336 if (count & RWSEM_ACTIVE_MASK)
348 bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
353 while (sem->owner == owner) {
355 * Ensure we emit the owner->on_cpu, dereference _after_
356 * checking sem->owner still matches owner, if that fails,
357 * owner might point to free()d memory, if it still matches,
358 * the rcu_read_lock() ensures the memory stays valid.
362 /* abort spinning when need_resched or owner is not running */
363 if (!owner->on_cpu || need_resched()) {
368 cpu_relax_lowlatency();
372 if (READ_ONCE(sem->owner))
373 return true; /* new owner, continue spinning */
376 * When the owner is not set, the lock could be free or
377 * held by readers. Check the counter to verify the
380 count = atomic_long_read(&sem->count);
381 return (count == 0 || count == RWSEM_WAITING_BIAS);
384 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
386 struct task_struct *owner;
391 /* sem->wait_lock should not be held when doing optimistic spinning */
392 if (!rwsem_can_spin_on_owner(sem))
395 if (!osq_lock(&sem->osq))
399 owner = READ_ONCE(sem->owner);
400 if (owner && !rwsem_spin_on_owner(sem, owner))
403 /* wait_lock will be acquired if write_lock is obtained */
404 if (rwsem_try_write_lock_unqueued(sem)) {
410 * When there's no owner, we might have preempted between the
411 * owner acquiring the lock and setting the owner field. If
412 * we're an RT task that will live-lock because we won't let
413 * the owner complete.
415 if (!owner && (need_resched() || rt_task(current)))
419 * The cpu_relax() call is a compiler barrier which forces
420 * everything in this loop to be re-loaded. We don't need
421 * memory barriers as we'll eventually observe the right
422 * values at the cost of a few extra spins.
424 cpu_relax_lowlatency();
426 osq_unlock(&sem->osq);
433 * Return true if the rwsem has active spinner
435 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
437 return osq_is_locked(&sem->osq);
441 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
446 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
453 * Wait until we successfully acquire the write lock
455 static inline struct rw_semaphore *
456 __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
459 bool waiting = true; /* any queued threads before us */
460 struct rwsem_waiter waiter;
461 struct rw_semaphore *ret = sem;
464 /* undo write bias from down_write operation, stop active locking */
465 count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
467 /* do optimistic spinning and steal lock if possible */
468 if (rwsem_optimistic_spin(sem))
472 * Optimistic spinning failed, proceed to the slowpath
473 * and block until we can acquire the sem.
475 waiter.task = current;
476 waiter.type = RWSEM_WAITING_FOR_WRITE;
478 raw_spin_lock_irq(&sem->wait_lock);
480 /* account for this before adding a new element to the list */
481 if (list_empty(&sem->wait_list))
484 list_add_tail(&waiter.list, &sem->wait_list);
486 /* we're now waiting on the lock, but no longer actively locking */
488 count = atomic_long_read(&sem->count);
491 * If there were already threads queued before us and there are
492 * no active writers, the lock must be read owned; so we try to
493 * wake any read locks that were queued ahead of us.
495 if (count > RWSEM_WAITING_BIAS) {
498 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
500 * The wakeup is normally called _after_ the wait_lock
501 * is released, but given that we are proactively waking
502 * readers we can deal with the wake_q overhead as it is
503 * similar to releasing and taking the wait_lock again
504 * for attempting rwsem_try_write_lock().
510 count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
512 /* wait until we successfully acquire the lock */
513 set_current_state(state);
515 if (rwsem_try_write_lock(count, sem))
517 raw_spin_unlock_irq(&sem->wait_lock);
519 /* Block until there are no active lockers. */
521 if (signal_pending_state(state, current))
525 set_current_state(state);
526 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
528 raw_spin_lock_irq(&sem->wait_lock);
530 __set_current_state(TASK_RUNNING);
531 list_del(&waiter.list);
532 raw_spin_unlock_irq(&sem->wait_lock);
537 __set_current_state(TASK_RUNNING);
538 raw_spin_lock_irq(&sem->wait_lock);
539 list_del(&waiter.list);
540 if (list_empty(&sem->wait_list))
541 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
543 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
544 raw_spin_unlock_irq(&sem->wait_lock);
547 return ERR_PTR(-EINTR);
550 __visible struct rw_semaphore * __sched
551 rwsem_down_write_failed(struct rw_semaphore *sem)
553 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
555 EXPORT_SYMBOL(rwsem_down_write_failed);
557 __visible struct rw_semaphore * __sched
558 rwsem_down_write_failed_killable(struct rw_semaphore *sem)
560 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
562 EXPORT_SYMBOL(rwsem_down_write_failed_killable);
565 * handle waking up a waiter on the semaphore
566 * - up_read/up_write has decremented the active part of count if we come here
569 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
575 * If a spinner is present, it is not necessary to do the wakeup.
576 * Try to do wakeup only if the trylock succeeds to minimize
577 * spinlock contention which may introduce too much delay in the
580 * spinning writer up_write/up_read caller
581 * --------------- -----------------------
582 * [S] osq_unlock() [L] osq
584 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
586 * Here, it is important to make sure that there won't be a missed
587 * wakeup while the rwsem is free and the only spinning writer goes
588 * to sleep without taking the rwsem. Even when the spinning writer
589 * is just going to break out of the waiting loop, it will still do
590 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
591 * rwsem_has_spinner() is true, it will guarantee at least one
592 * trylock attempt on the rwsem later on.
594 if (rwsem_has_spinner(sem)) {
596 * The smp_rmb() here is to make sure that the spinner
597 * state is consulted before reading the wait_lock.
600 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
604 raw_spin_lock_irqsave(&sem->wait_lock, flags);
607 /* do nothing if list empty */
608 if (!list_empty(&sem->wait_list))
609 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
611 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
616 EXPORT_SYMBOL(rwsem_wake);
619 * downgrade a write lock into a read lock
620 * - caller incremented waiting part of count and discovered it still negative
621 * - just wake up any readers at the front of the queue
624 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
629 raw_spin_lock_irqsave(&sem->wait_lock, flags);
631 /* do nothing if list empty */
632 if (!list_empty(&sem->wait_list))
633 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
635 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
640 EXPORT_SYMBOL(rwsem_downgrade_wake);